diff --git a/.github/workflows/cli-release.yaml b/.github/workflows/cli-release.yaml
index c852510b..6a765c1d 100644
--- a/.github/workflows/cli-release.yaml
+++ b/.github/workflows/cli-release.yaml
@@ -24,7 +24,7 @@ on:
- cli/*
env:
- GO_VERSION: 1.24.6
+ GO_VERSION: 1.25.5
jobs:
release:
diff --git a/.github/workflows/operator-ci.yaml b/.github/workflows/operator-ci.yaml
index a5a23f0a..213a8ee9 100644
--- a/.github/workflows/operator-ci.yaml
+++ b/.github/workflows/operator-ci.yaml
@@ -43,7 +43,7 @@ on:
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
- GO_VERSION: 1.24.6
+ GO_VERSION: 1.25.5
PLATFORMS: linux/amd64,linux/arm64
jobs:
@@ -53,16 +53,16 @@ jobs:
strategy:
matrix:
# Standard E2E tests on all supported K8s versions
- k8s-version: ["1.31.12", "1.32.8", "1.33.4", "1.34.0"]
+ k8s-version: ["1.31.14", "1.32.11", "1.33.7", "1.34.3", "1.35.0"]
test-suite: ["e2e"]
include:
# Deployment policy tests on 15-node cluster (K8s 1.34 only)
- - k8s-version: "1.34.0"
+ - k8s-version: "1.35.0"
test-suite: deployment-policy
kind-config: k8s-tests/chainsaw/deployment-policy/kind-config.yaml
make-target: deployment-policy-tests
# CLI e2e tests on K8s 1.34 only
- - k8s-version: "1.34.0"
+ - k8s-version: "1.35.0"
test-suite: cli-e2e
make-target: cli-e2e-tests
fail-fast: false # Continue testing other versions if one fails
@@ -130,7 +130,7 @@ jobs:
fi
# Upload coverage to Coveralls using goveralls (Go-specific tool)
- name: Upload coverage to Coveralls
- if: matrix.test-suite == 'e2e' && matrix.k8s-version == '1.34.0'
+ if: matrix.test-suite == 'e2e' && matrix.k8s-version == '1.35.0'
uses: coverallsapp/github-action@v2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/docs/kubernetes-support.md b/docs/kubernetes-support.md
index 6aa3f40b..e534dce3 100644
--- a/docs/kubernetes-support.md
+++ b/docs/kubernetes-support.md
@@ -6,7 +6,8 @@ This document outlines Skyhook's approach to supporting different Kubernetes ver
| Kubernetes Version | Skyhook Version | Status | Notes |
|--------------------|-----------------|---------|-------|
-| 1.34, 1.33, 1.32, 1.31 | v0.9.0+ | ✅ Fully Supported | Current stable versions |
+| 1.35, 1.34, 1.33, 1.32, 1.31 | v0.11.0+ | ✅ Next Supported | Current stable versions |
+| 1.34, 1.33, 1.32, 1.31 | v0.9.0 - 0.10.0 | ✅ Fully Supported | Current stable versions |
| 1.30 | v0.8.x | ⚠️ Use older Skyhook | K8s 1.30 EOL: June 28, 2025 |
| 1.29 and older | v0.8.x or older | ⚠️ Use older Skyhook | No longer maintained |
diff --git a/operator/Makefile b/operator/Makefile
index 080d89e3..b228dd3e 100644
--- a/operator/Makefile
+++ b/operator/Makefile
@@ -16,7 +16,7 @@ include deps.mk
## Version of the operator
VERSION ?= $(GIT_TAG_LAST)
-GO_VERSION ?= 1.24.6
+GO_VERSION ?= 1.25.5
# Image URL to use all building/pushing image
## TODO: update this to the correct image location
@@ -24,7 +24,7 @@ IMG_REPO ?= ghcr.io/nvidia/skyhook
IMG ?= $(IMG_REPO)/operator
## default version of kind to use
-KIND_VERSION?=1.34.0
+KIND_VERSION?=1.35.0
PLATFORM := $(shell uname -s 2>/dev/null || echo unknown)
SKYHOOK_NAMESPACE ?= skyhook
diff --git a/operator/api/v1alpha1/webhook_suite_test.go b/operator/api/v1alpha1/webhook_suite_test.go
index af8f4477..61a1cc66 100644
--- a/operator/api/v1alpha1/webhook_suite_test.go
+++ b/operator/api/v1alpha1/webhook_suite_test.go
@@ -75,7 +75,7 @@ var _ = BeforeSuite(func() {
// Note that you must have the required binaries setup under the bin directory to perform
// the tests directly. When we run make test it will be setup and used automatically.
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
- fmt.Sprintf("1.34.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
+ fmt.Sprintf("1.35.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
WebhookInstallOptions: envtest.WebhookInstallOptions{
Paths: []string{filepath.Join("..", "..", "config", "webhook")},
diff --git a/operator/api/v1alpha1/zz_generated.deepcopy.go b/operator/api/v1alpha1/zz_generated.deepcopy.go
index a19c6fbe..1dc1be66 100644
--- a/operator/api/v1alpha1/zz_generated.deepcopy.go
+++ b/operator/api/v1alpha1/zz_generated.deepcopy.go
@@ -1,5 +1,3 @@
-//go:build !ignore_autogenerated
-
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
@@ -18,6 +16,8 @@
* limitations under the License.
*/
+//go:build !ignore_autogenerated
+
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
diff --git a/operator/cmd/cli/app/node/node_ignore_test.go b/operator/cmd/cli/app/node/node_ignore_test.go
index 13fb3488..3fc7529d 100644
--- a/operator/cmd/cli/app/node/node_ignore_test.go
+++ b/operator/cmd/cli/app/node/node_ignore_test.go
@@ -94,7 +94,7 @@ var _ = Describe("Node Ignore Command", func() {
BeforeEach(func() {
output = &bytes.Buffer{}
- mockKube = fake.NewSimpleClientset()
+ mockKube = fake.NewClientset()
kubeClient = client.NewWithClientsAndConfig(mockKube, nil, nil)
cliCtx = context.NewCLIContext(context.NewCLIConfig(context.WithOutputWriter(output)))
diff --git a/operator/cmd/cli/app/node/node_list_test.go b/operator/cmd/cli/app/node/node_list_test.go
index 08f25843..fff160c1 100644
--- a/operator/cmd/cli/app/node/node_list_test.go
+++ b/operator/cmd/cli/app/node/node_list_test.go
@@ -121,7 +121,7 @@ var _ = Describe("Node List Command", func() {
BeforeEach(func() {
output = &bytes.Buffer{}
- mockKube = fake.NewSimpleClientset()
+ mockKube = fake.NewClientset()
kubeClient = client.NewWithClientsAndConfig(mockKube, nil, nil)
cliCtx = context.NewCLIContext(context.NewCLIConfig(context.WithOutputWriter(output)))
})
diff --git a/operator/cmd/cli/app/node/node_reset_test.go b/operator/cmd/cli/app/node/node_reset_test.go
index ac684da2..6d4c1fcc 100644
--- a/operator/cmd/cli/app/node/node_reset_test.go
+++ b/operator/cmd/cli/app/node/node_reset_test.go
@@ -95,7 +95,7 @@ var _ = Describe("Node Reset Command", func() {
BeforeEach(func() {
output = &bytes.Buffer{}
- mockKube = fake.NewSimpleClientset()
+ mockKube = fake.NewClientset()
kubeClient = client.NewWithClientsAndConfig(mockKube, nil, nil)
cliCtx = context.NewCLIContext(context.NewCLIConfig(context.WithOutputWriter(output)))
diff --git a/operator/cmd/cli/app/node/node_status_test.go b/operator/cmd/cli/app/node/node_status_test.go
index ddbef196..702d799b 100644
--- a/operator/cmd/cli/app/node/node_status_test.go
+++ b/operator/cmd/cli/app/node/node_status_test.go
@@ -150,7 +150,7 @@ var _ = Describe("Node Status Command", func() {
BeforeEach(func() {
output = &bytes.Buffer{}
- mockKube = fake.NewSimpleClientset()
+ mockKube = fake.NewClientset()
kubeClient = client.NewWithClientsAndConfig(mockKube, nil, nil)
cliCtx = context.NewCLIContext(context.NewCLIConfig(context.WithOutputWriter(output)))
})
diff --git a/operator/cmd/cli/app/package/package_logs_test.go b/operator/cmd/cli/app/package/package_logs_test.go
index ce5232ca..ab86821d 100644
--- a/operator/cmd/cli/app/package/package_logs_test.go
+++ b/operator/cmd/cli/app/package/package_logs_test.go
@@ -271,7 +271,7 @@ var _ = Describe("Package Logs Command", func() {
BeforeEach(func() {
output = &bytes.Buffer{}
- fakeKube = fake.NewSimpleClientset()
+ fakeKube = fake.NewClientset()
kubeClient = client.NewWithClientsAndConfig(fakeKube, nil, nil)
})
diff --git a/operator/cmd/cli/app/package/package_rerun_test.go b/operator/cmd/cli/app/package/package_rerun_test.go
index b7204e81..54e398a1 100644
--- a/operator/cmd/cli/app/package/package_rerun_test.go
+++ b/operator/cmd/cli/app/package/package_rerun_test.go
@@ -263,7 +263,7 @@ var _ = Describe("Package Rerun Command", func() {
)
BeforeEach(func() {
- fakeKube = fake.NewSimpleClientset()
+ fakeKube = fake.NewClientset()
mockDynamic = &mockdynamic.Interface{}
mockNSRes = &mockdynamic.NamespaceableResourceInterface{}
kubeClient = client.NewWithClientsAndConfig(fakeKube, mockDynamic, nil)
diff --git a/operator/cmd/cli/app/package/package_status_test.go b/operator/cmd/cli/app/package/package_status_test.go
index 83d0744e..9aad0f77 100644
--- a/operator/cmd/cli/app/package/package_status_test.go
+++ b/operator/cmd/cli/app/package/package_status_test.go
@@ -171,7 +171,7 @@ var _ = Describe("Package Status Command", func() {
BeforeEach(func() {
output = &bytes.Buffer{}
- fakeKube = fake.NewSimpleClientset()
+ fakeKube = fake.NewClientset()
mockDynamic = &mockdynamic.Interface{}
mockNSRes = &mockdynamic.NamespaceableResourceInterface{}
kubeClient = client.NewWithClientsAndConfig(fakeKube, mockDynamic, nil)
diff --git a/operator/cmd/cli/app/version_test.go b/operator/cmd/cli/app/version_test.go
index 738d2597..6bc49e31 100644
--- a/operator/cmd/cli/app/version_test.go
+++ b/operator/cmd/cli/app/version_test.go
@@ -176,7 +176,7 @@ func TestDiscoverOperatorVersion(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- clientset := fake.NewSimpleClientset()
+ clientset := fake.NewClientset()
if tt.deployment != nil {
_, err := clientset.AppsV1().Deployments("skyhook").Create(
diff --git a/operator/config/crd/bases/skyhook.nvidia.com_skyhooks.yaml b/operator/config/crd/bases/skyhook.nvidia.com_skyhooks.yaml
index d273abfd..bb13a401 100644
--- a/operator/config/crd/bases/skyhook.nvidia.com_skyhooks.yaml
+++ b/operator/config/crd/bases/skyhook.nvidia.com_skyhooks.yaml
@@ -91,9 +91,10 @@ spec:
operator:
description: |-
Operator represents a key's relationship to the value.
- Valid operators are Exists and Equal. Defaults to Equal.
+ Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal.
Exists is equivalent to wildcard for value, so that a pod can
tolerate all taints of a particular category.
+ Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).
type: string
tolerationSeconds:
description: |-
diff --git a/operator/deps.mk b/operator/deps.mk
index a6e1e162..420647ad 100644
--- a/operator/deps.mk
+++ b/operator/deps.mk
@@ -36,12 +36,12 @@ ifndef ARCH
endif
## versions
-GOLANGCI_LINT_VERSION ?= v2.2.1
+GOLANGCI_LINT_VERSION ?= v2.7.2
KUSTOMIZE_VERSION ?= v5.4.1
CONTROLLER_TOOLS_VERSION ?= v0.18.0
-ENVTEST_K8S_VERSION ?= 1.34.1
-GOCOVER_VERSION ?= v1.3.0
-GINKGO_VERSION ?= v2.22.2
+ENVTEST_K8S_VERSION ?= 1.35.0
+GOCOVER_VERSION ?= v1.4.0
+GINKGO_VERSION ?= v2.27.2
MOCKERY_VERSION ?= v3.5.0
CHAINSAW_VERSION ?= v0.2.10
HELM_VERSION ?= v3.18.5
@@ -93,7 +93,7 @@ $(CONTROLLER_GEN): $(LOCALBIN)
.PHONY: envtest
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
- test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.20 ## latest requires golang 1.24
+ test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.22
$(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN)
.PHONY: gocover-cobertura
diff --git a/operator/go.mod b/operator/go.mod
index 77650471..5112726f 100644
--- a/operator/go.mod
+++ b/operator/go.mod
@@ -1,27 +1,26 @@
module github.com/NVIDIA/skyhook/operator
-go 1.24.0
-
-toolchain go1.24.6
+go 1.25.5
require (
- github.com/go-logr/logr v1.4.2
- github.com/onsi/ginkgo/v2 v2.22.2
- github.com/onsi/gomega v1.36.2
+ github.com/go-logr/logr v1.4.3
+ github.com/onsi/ginkgo/v2 v2.27.2
+ github.com/onsi/gomega v1.38.2
github.com/sethvargo/go-envconfig v1.0.0
github.com/spf13/cobra v1.10.1
- github.com/stretchr/testify v1.10.0
+ github.com/stretchr/testify v1.11.1
go.uber.org/zap v1.27.0
- k8s.io/api v0.34.1
- k8s.io/apimachinery v0.34.1
- k8s.io/cli-runtime v0.34.1
- k8s.io/client-go v0.34.1
- k8s.io/kubernetes v1.34.2
- sigs.k8s.io/controller-runtime v0.21.0
+ k8s.io/api v0.35.0
+ k8s.io/apimachinery v0.35.0
+ k8s.io/cli-runtime v0.35.0
+ k8s.io/client-go v0.35.0
+ k8s.io/kubernetes v1.35.0
+ sigs.k8s.io/controller-runtime v0.22.4
)
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
+ github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
@@ -36,10 +35,10 @@ require (
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
- go.yaml.in/yaml/v2 v2.4.2 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/sync v0.12.0 // indirect
- gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
+ golang.org/x/sync v0.18.0 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
sigs.k8s.io/kustomize/api v0.20.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
@@ -61,7 +60,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
- github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
+ github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -69,30 +68,29 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus/client_golang v1.22.0
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.62.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus/client_golang v1.23.2
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/common v0.66.1 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
github.com/spf13/pflag v1.0.9
github.com/stretchr/objx v0.5.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/mod v0.22.0
- golang.org/x/net v0.38.0 // indirect
- golang.org/x/oauth2 v0.27.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- golang.org/x/term v0.30.0 // indirect
- golang.org/x/text v0.23.0 // indirect
+ golang.org/x/mod v0.29.0
+ golang.org/x/net v0.47.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sys v0.38.0 // indirect
+ golang.org/x/term v0.37.0 // indirect
+ golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.9.0 // indirect
- golang.org/x/tools v0.28.0 // indirect
+ golang.org/x/tools v0.38.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/protobuf v1.36.5 // indirect
+ google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/apiextensions-apiserver v0.34.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
- k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
- sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/yaml v1.6.0
)
diff --git a/operator/go.sum b/operator/go.sum
index 6e8c4330..159b11af 100644
--- a/operator/go.sum
+++ b/operator/go.sum
@@ -1,5 +1,7 @@
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
@@ -24,10 +26,16 @@ github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
+github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
+github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
+github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk=
+github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE=
+github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@@ -40,6 +48,8 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
+github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
@@ -51,8 +61,8 @@ github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
-github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
@@ -61,6 +71,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
+github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -80,6 +92,10 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
+github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
+github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
+github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -92,10 +108,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
-github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
-github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
-github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -103,16 +119,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
-github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
-github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
-github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
-github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
@@ -132,8 +148,16 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
+github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
+github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
+github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
+github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
@@ -146,8 +170,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
-go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -155,81 +179,81 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
-golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
-golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
-golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
-golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
-golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
-golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
-golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
-golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
+golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
-google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
-google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
-gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
-k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
+k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
+k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
-k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
-k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
-k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M=
-k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE=
-k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
-k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/cli-runtime v0.35.0 h1:PEJtYS/Zr4p20PfZSLCbY6YvaoLrfByd6THQzPworUE=
+k8s.io/cli-runtime v0.35.0/go.mod h1:VBRvHzosVAoVdP3XwUQn1Oqkvaa8facnokNkD7jOTMY=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
-k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
-k8s.io/kubernetes v1.34.2 h1:WQdDvYJazkmkwSncgNwGvVtaCt4TYXIU3wSMRgvp3MI=
-k8s.io/kubernetes v1.34.2/go.mod h1:m6pZk6a179pRo2wsTiCPORJ86iOEQmfIzUvtyEF8BwA=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
-k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
-sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
-sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
-sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/kubernetes v1.35.0 h1:PUOojD8c8E3csMP5NX+nLLne6SGqZjrYCscptyBfWMY=
+k8s.io/kubernetes v1.35.0/go.mod h1:Tzk9Y9W/XUFFFgTUVg+BAowoFe+Pc7koGLuaiLHdcFg=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
+sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
diff --git a/operator/internal/controller/cluster_state_v2.go b/operator/internal/controller/cluster_state_v2.go
index 9332061a..640d631b 100644
--- a/operator/internal/controller/cluster_state_v2.go
+++ b/operator/internal/controller/cluster_state_v2.go
@@ -455,12 +455,14 @@ func (s *skyhookNodes) UpdateCondition() bool { // TODO: might make sense to mak
}
type NodePicker struct {
+ logger logr.Logger
priorityNodes map[string]time.Time
runtimeRequiredToleration corev1.Toleration
}
-func NewNodePicker(runtimeRequiredToleration corev1.Toleration) *NodePicker {
+func NewNodePicker(logger logr.Logger, runtimeRequiredToleration corev1.Toleration) *NodePicker {
return &NodePicker{
+ logger: logger,
priorityNodes: make(map[string]time.Time),
runtimeRequiredToleration: runtimeRequiredToleration,
}
@@ -507,13 +509,13 @@ func (s *NodePicker) upsertPick(name string, skyhook *wrapper.Skyhook) {
skyhook.Updated = true
}
-func CheckTaintToleration(tolerations []corev1.Toleration, taints []corev1.Taint) bool {
+func CheckTaintToleration(logger logr.Logger, tolerations []corev1.Toleration, taints []corev1.Taint) bool {
// Must tolerate all taints.
all_tolerated := true
for _, taint := range taints {
tolerated := false
for _, toleration := range tolerations {
- if toleration.ToleratesTaint(&taint) {
+ if toleration.ToleratesTaint(logger.WithName("CheckTaintToleration"), &taint, false) {
tolerated = true
break
}
@@ -564,7 +566,7 @@ func (np *NodePicker) selectNodesWithCompartments(s SkyhookNodes, compartments m
// This ensures the conditions reflect the true state even when no batch is being processed
for _, compartment := range compartments {
for _, node := range compartment.GetNodes() {
- if !CheckTaintToleration(tolerations, node.GetNode().Spec.Taints) {
+ if !CheckTaintToleration(np.logger, tolerations, node.GetNode().Spec.Taints) {
nodesWithTaintTolerationIssue = append(nodesWithTaintTolerationIssue, node.GetNode().Name)
}
if CheckNodeIgnoreLabel(node) {
@@ -584,7 +586,7 @@ func (np *NodePicker) selectNodesWithCompartments(s SkyhookNodes, compartments m
continue
}
// Check taint toleration
- if CheckTaintToleration(tolerations, node.GetNode().Spec.Taints) {
+ if CheckTaintToleration(np.logger, tolerations, node.GetNode().Spec.Taints) {
selectedNodes = append(selectedNodes, node)
np.upsertPick(node.GetNode().GetName(), s.GetSkyhook())
} else {
diff --git a/operator/internal/controller/cluster_state_v2_test.go b/operator/internal/controller/cluster_state_v2_test.go
index 21fbdcb3..a8f5a95c 100644
--- a/operator/internal/controller/cluster_state_v2_test.go
+++ b/operator/internal/controller/cluster_state_v2_test.go
@@ -23,6 +23,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/NVIDIA/skyhook/operator/api/v1alpha1"
skyhookNodesMock "github.com/NVIDIA/skyhook/operator/internal/controller/mock"
@@ -38,6 +39,8 @@ const (
var _ = Describe("cluster state v2 tests", func() {
+ logger := log.FromContext(ctx)
+
It("should check taint toleration", func() {
taints := []corev1.Taint{
{
@@ -67,7 +70,7 @@ var _ = Describe("cluster state v2 tests", func() {
},
}
- Expect(CheckTaintToleration(tolerations, taints)).To(BeTrue())
+ Expect(CheckTaintToleration(logger, tolerations, taints)).To(BeTrue())
})
It("Must tolerate all taints", func() {
@@ -91,7 +94,7 @@ var _ = Describe("cluster state v2 tests", func() {
},
}
- Expect(CheckTaintToleration(tolerations, taints)).To(BeFalse())
+ Expect(CheckTaintToleration(logger, tolerations, taints)).To(BeFalse())
})
It("When no taints it is tolerated", func() {
@@ -104,7 +107,7 @@ var _ = Describe("cluster state v2 tests", func() {
},
}
- Expect(CheckTaintToleration(tolerations, taints)).To(BeTrue())
+ Expect(CheckTaintToleration(logger, tolerations, taints)).To(BeTrue())
})
It("When no taints and no tolerations it is tolerated", func() {
@@ -112,7 +115,7 @@ var _ = Describe("cluster state v2 tests", func() {
tolerations := make([]corev1.Toleration, 0)
- Expect(CheckTaintToleration(tolerations, taints)).To(BeTrue())
+ Expect(CheckTaintToleration(logger, tolerations, taints)).To(BeTrue())
})
It("When node has ignore label it is blocked", func() {
diff --git a/operator/internal/controller/skyhook_controller.go b/operator/internal/controller/skyhook_controller.go
index eb41a00d..c8eaef22 100644
--- a/operator/internal/controller/skyhook_controller.go
+++ b/operator/internal/controller/skyhook_controller.go
@@ -303,7 +303,7 @@ func (r *SkyhookReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
}
// node picker is for selecting nodes to do work, tries maintain a prior of nodes between SCRs
- nodePicker := NewNodePicker(r.opts.GetRuntimeRequiredToleration())
+ nodePicker := NewNodePicker(logger, r.opts.GetRuntimeRequiredToleration())
errs := make([]error, 0)
var result *ctrl.Result
diff --git a/operator/internal/controller/skyhook_controller_test.go b/operator/internal/controller/skyhook_controller_test.go
index 4b2aac83..84402c46 100644
--- a/operator/internal/controller/skyhook_controller_test.go
+++ b/operator/internal/controller/skyhook_controller_test.go
@@ -34,11 +34,14 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var _ = Describe("skyhook controller tests", func() {
+ var logger = log.FromContext(ctx)
+
It("should map only pods we created", func() {
pod := &corev1.Pod{
@@ -113,7 +116,7 @@ var _ = Describe("skyhook controller tests", func() {
Expect(err).ToNot(HaveOccurred())
for _, skyhook := range clusterState.skyhooks {
- picker := NewNodePicker(opts.GetRuntimeRequiredToleration())
+ picker := NewNodePicker(logger, opts.GetRuntimeRequiredToleration())
pick := picker.SelectNodes(skyhook)
Expect(pick).To(HaveLen(expected))
}
@@ -173,7 +176,7 @@ var _ = Describe("skyhook controller tests", func() {
Expect(err).ToNot(HaveOccurred())
for _, skyhook := range clusterState.skyhooks {
- picker := NewNodePicker(opts.GetRuntimeRequiredToleration())
+ picker := NewNodePicker(logger, opts.GetRuntimeRequiredToleration())
pick := picker.SelectNodes(skyhook)
Expect(pick).To(HaveLen(expected))
}
@@ -810,7 +813,7 @@ var _ = Describe("skyhook controller tests", func() {
Effect: "NoSchedule",
}
toleration := opts.GetRuntimeRequiredToleration()
- Expect(toleration.ToleratesTaint(&taint)).To(BeTrue())
+ Expect(toleration.ToleratesTaint(logger, &taint, false)).To(BeTrue())
})
It("Pods should always tolerate runtime required taint", func() {
diff --git a/operator/internal/controller/suite_test.go b/operator/internal/controller/suite_test.go
index bad12155..11b8483e 100644
--- a/operator/internal/controller/suite_test.go
+++ b/operator/internal/controller/suite_test.go
@@ -80,7 +80,7 @@ var _ = BeforeSuite(func() {
// the tests directly. When we run make test it will be setup and used automatically.
// NOTE: Also, this version needs to match what is installed in the make file
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
- fmt.Sprintf("1.34.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
+ fmt.Sprintf("1.35.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
// AttachControlPlaneOutput: true,
// UseExistingCluster: ptr[bool](true),
}
diff --git a/operator/internal/mocks/client/Client.go b/operator/internal/mocks/client/Client.go
index 14753ba1..f8cd6a70 100644
--- a/operator/internal/mocks/client/Client.go
+++ b/operator/internal/mocks/client/Client.go
@@ -59,6 +59,82 @@ func (_m *Client) EXPECT() *Client_Expecter {
return &Client_Expecter{mock: &_m.Mock}
}
+// Apply provides a mock function for the type Client
+func (_mock *Client) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error {
+ // client.ApplyOption
+ _va := make([]interface{}, len(opts))
+ for _i := range opts {
+ _va[_i] = opts[_i]
+ }
+ var _ca []interface{}
+ _ca = append(_ca, ctx, obj)
+ _ca = append(_ca, _va...)
+ ret := _mock.Called(_ca...)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Apply")
+ }
+
+ var r0 error
+ if returnFunc, ok := ret.Get(0).(func(context.Context, runtime.ApplyConfiguration, ...client.ApplyOption) error); ok {
+ r0 = returnFunc(ctx, obj, opts...)
+ } else {
+ r0 = ret.Error(0)
+ }
+ return r0
+}
+
+// Client_Apply_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Apply'
+type Client_Apply_Call struct {
+ *mock.Call
+}
+
+// Apply is a helper method to define mock.On call
+// - ctx context.Context
+// - obj runtime.ApplyConfiguration
+// - opts ...client.ApplyOption
+func (_e *Client_Expecter) Apply(ctx interface{}, obj interface{}, opts ...interface{}) *Client_Apply_Call {
+ return &Client_Apply_Call{Call: _e.mock.On("Apply",
+ append([]interface{}{ctx, obj}, opts...)...)}
+}
+
+func (_c *Client_Apply_Call) Run(run func(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption)) *Client_Apply_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ var arg0 context.Context
+ if args[0] != nil {
+ arg0 = args[0].(context.Context)
+ }
+ var arg1 runtime.ApplyConfiguration
+ if args[1] != nil {
+ arg1 = args[1].(runtime.ApplyConfiguration)
+ }
+ var arg2 []client.ApplyOption
+ variadicArgs := make([]client.ApplyOption, len(args)-2)
+ for i, a := range args[2:] {
+ if a != nil {
+ variadicArgs[i] = a.(client.ApplyOption)
+ }
+ }
+ arg2 = variadicArgs
+ run(
+ arg0,
+ arg1,
+ arg2...,
+ )
+ })
+ return _c
+}
+
+func (_c *Client_Apply_Call) Return(err error) *Client_Apply_Call {
+ _c.Call.Return(err)
+ return _c
+}
+
+func (_c *Client_Apply_Call) RunAndReturn(run func(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error) *Client_Apply_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// Create provides a mock function for the type Client
func (_mock *Client) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
// client.CreateOption
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/.gitignore b/operator/vendor/github.com/Masterminds/semver/v3/.gitignore
new file mode 100644
index 00000000..6b061e61
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/.gitignore
@@ -0,0 +1 @@
+_fuzz/
\ No newline at end of file
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/operator/vendor/github.com/Masterminds/semver/v3/.golangci.yml
new file mode 100644
index 00000000..fbc63325
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/.golangci.yml
@@ -0,0 +1,27 @@
+run:
+ deadline: 2m
+
+linters:
+ disable-all: true
+ enable:
+ - misspell
+ - govet
+ - staticcheck
+ - errcheck
+ - unparam
+ - ineffassign
+ - nakedret
+ - gocyclo
+ - dupl
+ - goimports
+ - revive
+ - gosec
+ - gosimple
+ - typecheck
+ - unused
+
+linters-settings:
+ gofmt:
+ simplify: true
+ dupl:
+ threshold: 600
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/operator/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
new file mode 100644
index 00000000..fabe5e43
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -0,0 +1,268 @@
+# Changelog
+
+## 3.4.0 (2025-06-27)
+
+### Added
+
+- #268: Added property to Constraints to include prereleases for Check and Validate
+
+### Changed
+
+- #263: Updated Go testing for 1.24, 1.23, and 1.22
+- #269: Updated the error message handling for message case and wrapping errors
+- #266: Restore the ability to have leading 0's when parsing with NewVersion.
+ Opt-out of this by setting CoerceNewVersion to false.
+
+### Fixed
+
+- #257: Fixed the CodeQL link (thanks @dmitris)
+- #262: Restored detailed errors when failed to parse with NewVersion. Opt-out
+ of this by setting DetailedNewVersionErrors to false for faster performance.
+- #267: Handle pre-releases for an "and" group if one constraint includes them
+
+## 3.3.1 (2024-11-19)
+
+### Fixed
+
+- #253: Fix for allowing some version that were invalid
+
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
+## 3.2.0 (2022-11-28)
+
+### Added
+
+- #190: Added text marshaling and unmarshaling
+- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg)
+- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker)
+- #179: Added New() version constructor (thanks @kazhuravlev)
+
+### Changed
+
+- #182/#183: Updated CI testing setup
+
+### Fixed
+
+- #186: Fixing issue where validation of constraint section gave false positives
+- #176: Fix constraints check with *-0 (thanks @mtt0)
+- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni)
+- #161: Fixed godoc (thanks @afirth)
+
+## 3.1.1 (2020-11-23)
+
+### Fixed
+
+- #158: Fixed issue with generated regex operation order that could cause problem
+
+## 3.1.0 (2020-04-15)
+
+### Added
+
+- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
+
+### Changed
+
+- #148: More accurate validation messages on constraints
+
+## 3.0.3 (2019-12-13)
+
+### Fixed
+
+- #141: Fixed issue with <= comparison
+
+## 3.0.2 (2019-11-14)
+
+### Fixed
+
+- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
+
+## 3.0.1 (2019-09-13)
+
+### Fixed
+
+- #125: Fixes issue with module path for v3
+
+## 3.0.0 (2019-09-12)
+
+This is a major release of the semver package which includes API changes. The Go
+API is compatible with ^1. The Go API was not changed because many people are using
+`go get` without Go modules for their applications and API breaking changes cause
+errors which we have or would need to support.
+
+The changes in this release are the handling based on the data passed into the
+functions. These are described in the added and changed sections below.
+
+### Added
+
+- StrictNewVersion function. This is similar to NewVersion but will return an
+ error if the version passed in is not a strict semantic version. For example,
+ 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
+ speaking semantic versions. This function is faster, performs fewer operations,
+ and uses fewer allocations than NewVersion.
+- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
+ The Makefile contains the operations used. For more information on you can start
+ on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
+- Now using Go modules
+
+### Changed
+
+- NewVersion has proper prerelease and metadata validation with error messages
+ to signal an issue with either of them
+- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
+ version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
+ rules have changed. The minor version is treated as the stable version unless
+ a patch is specified and then it is equivalent to =. One difference from npm/js
+ is that prereleases there are only to a specific version (e.g. 1.2.3).
+ Prereleases here look over multiple versions and follow semantic version
+ ordering rules. This pattern now follows along with the expected and requested
+ handling of this packaged by numerous users.
+
+## 1.5.0 (2019-09-11)
+
+### Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+### Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+### Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+## 1.4.2 (2018-04-10)
+
+### Changed
+
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+### Fixed
+
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+## 1.4.1 (2018-04-02)
+
+### Fixed
+
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+## 1.4.0 (2017-10-04)
+
+### Changed
+
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+## 1.3.1 (2017-07-10)
+
+### Fixed
+
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+## 1.3.0 (2017-05-02)
+
+### Added
+
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+### Fixed
+
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+### Changed
+
+- #55: The godoc icon moved from png to svg
+
+## 1.2.3 (2017-04-03)
+
+### Fixed
+
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+## Release 1.2.2 (2016-12-13)
+
+### Fixed
+
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+## Release 1.2.1 (2016-11-28)
+
+### Fixed
+
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+ properly.
+
+## Release 1.2.0 (2016-11-04)
+
+### Added
+
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+### Fixed
+
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+ might not satisfy the intended compatibility. The change here ignores pre-releases
+ on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+ constraint. For example, `^1.2.3` will ignore pre-releases while
+ `^1.2.3-alpha` will include them.
+
+## Release 1.1.1 (2016-06-30)
+
+### Changed
+
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+## Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+ constraint.
+
+## Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+## Release 1.0.0 (2015-10-20)
+
+- Initial release
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/operator/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
new file mode 100644
index 00000000..9ff7da9c
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/Makefile b/operator/vendor/github.com/Masterminds/semver/v3/Makefile
new file mode 100644
index 00000000..9ca87a2c
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -0,0 +1,31 @@
+GOPATH=$(shell go env GOPATH)
+GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT)
+ @echo "==> Linting codebase"
+ @$(GOLANGCI_LINT) run
+
+.PHONY: test
+test:
+ @echo "==> Running tests"
+ GO111MODULE=on go test -v
+
+.PHONY: test-cover
+test-cover:
+ @echo "==> Running Tests with coverage"
+ GO111MODULE=on go test -cover .
+
+.PHONY: fuzz
+fuzz:
+ @echo "==> Running Fuzz Tests"
+ go env GOCACHE
+ go test -fuzz=FuzzNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
+ go test -fuzz=FuzzNewConstraint -fuzztime=15s .
+
+$(GOLANGCI_LINT):
+ # Install golangci-lint. The configuration for it is in the .golangci.yml
+ # file in the root of the repository
+ echo ${GOPATH}
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/README.md b/operator/vendor/github.com/Masterminds/semver/v3/README.md
new file mode 100644
index 00000000..2f56c676
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/README.md
@@ -0,0 +1,274 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[](https://masterminds.github.io/stability/active.html)
+[](https://github.com/Masterminds/semver/actions)
+[](https://pkg.go.dev/github.com/Masterminds/semver/v3)
+[](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+## Package Versions
+
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
+
+There are three major versions fo the `semver` package.
+
+* 3.x.x is the stable and active version. This version is focused on constraint
+ compatibility for range handling in other tools from other languages. It has
+ a similar API to the v1 releases. The development of this version is on the master
+ branch. The documentation for this version is below.
+* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
+ no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
+ There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
+* 1.x.x is the original release. It is no longer maintained. You should use the
+ v3 release instead. You can read the documentation for the 1.x.x release
+ [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+
+## Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an error is returned if there is an issue parsing the
+version. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. Getting the original string is useful if the semantic version was coerced
+into a valid form.
+
+There are package level variables that affect how `NewVersion` handles parsing.
+
+- `CoerceNewVersion` is `true` by default. When set to `true` it coerces non-compliant
+ versions into SemVer. For example, allowing a leading 0 in a major, minor, or patch
+ part. This enables the use of CalVer in versions even when not compliant with SemVer.
+ When set to `false` less coercion work is done.
+- `DetailedNewVersionErrors` provides more detailed errors. It only has an affect when
+ `CoerceNewVersion` is set to `false`. When `DetailedNewVersionErrors` is set to `true`
+ it can provide some more insight into why a version is invalid. Setting
+ `DetailedNewVersionErrors` to `false` is faster on performance but provides less
+ detailed error messages if a version fails to parse.
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+```go
+raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+vs := make([]*semver.Version, len(raw))
+for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+}
+
+sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other uses `Constraints`. There are some important
+differences to notes between these two methods of comparison.
+
+1. When two versions are compared using functions such as `Compare`, `LessThan`,
+ and others it will follow the specification and always include pre-releases
+ within the comparison. It will provide an answer that is valid with the
+ comparison section of the spec at https://semver.org/#spec-item-11
+2. When constraint checking is used for checks or validation it will follow a
+ different set of rules that are common for ranges with tools like npm/js
+ and Rust/Cargo. This includes considering pre-releases to be invalid if the
+ ranges does not include one. If you want to have it include pre-releases a
+ simple solution is to include `-0` in your range.
+3. Constraint ranges can have some complex rules including the shorthand use of
+ ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+c, err := semver.NewConstraint(">= 1.2.3")
+if err != nil {
+ // Handle constraint not being parsable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+ // Handle version not being parsable.
+}
+// Check if the version meets the constraints. The variable a will be true.
+a := c.Check(v)
+```
+
+### Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of space or comma separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+### Working With Prerelease Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precedence, pre-releases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification, pre-releases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
+spec. The lowest character is a `0` in ASCII sort order
+(see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+The `Constraints` instance returned from `semver.NewConstraint()` has a property
+`IncludePrerelease` that, when set to true, will return prerelease versions when calls
+to `Check()` and `Validate()` are made.
+
+### Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
+### Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the patch level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+### Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+### Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+* `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+## Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+if err != nil {
+ // Handle constraint not being parseable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+ // Handle version not being parseable.
+}
+
+// Validate a version against a constraint.
+a, msgs := c.Validate(v)
+// a is false
+for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+}
+```
+
+## Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
+
+## Security
+
+Security is an important consideration for this project. The project currently
+uses the following tools to help discover security issues:
+
+* [CodeQL](https://codeql.github.com)
+* [gosec](https://github.com/securego/gosec)
+* Daily Fuzz testing
+
+If you believe you have found a security vulnerability you can privately disclose
+it through the [GitHub security page](https://github.com/Masterminds/semver/security).
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/SECURITY.md b/operator/vendor/github.com/Masterminds/semver/v3/SECURITY.md
new file mode 100644
index 00000000..a30a66b1
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+The following versions of semver are currently supported:
+
+| Version | Supported |
+| ------- | ------------------ |
+| 3.x | :white_check_mark: |
+| 2.x | :x: |
+| 1.x | :x: |
+
+Fixes are only released for the latest minor version in the form of a patch release.
+
+## Reporting a Vulnerability
+
+You can privately disclose a vulnerability through GitHubs
+[private vulnerability reporting](https://github.com/Masterminds/semver/security/advisories)
+mechanism.
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/collection.go b/operator/vendor/github.com/Masterminds/semver/v3/collection.go
new file mode 100644
index 00000000..a7823589
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/collection.go
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+ return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+ return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/constraints.go b/operator/vendor/github.com/Masterminds/semver/v3/constraints.go
new file mode 100644
index 00000000..8b7a10f8
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/constraints.go
@@ -0,0 +1,601 @@
+package semver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+ constraints [][]*constraint
+ containsPre []bool
+
+ // IncludePrerelease specifies if pre-releases should be included in
+ // the results. Note, if a constraint range has a prerelease than
+ // prereleases will be included for that AND group even if this is
+ // set to false.
+ IncludePrerelease bool
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+ // Rewrite - ranges into a comparison operation.
+ c = rewriteRange(c)
+
+ ors := strings.Split(c, "||")
+ lenors := len(ors)
+ or := make([][]*constraint, lenors)
+ hasPre := make([]bool, lenors)
+ for k, v := range ors {
+ // Validate the segment
+ if !validConstraintRegex.MatchString(v) {
+ return nil, fmt.Errorf("improper constraint: %s", v)
+ }
+
+ cs := findConstraintRegex.FindAllString(v, -1)
+ if cs == nil {
+ cs = append(cs, v)
+ }
+ result := make([]*constraint, len(cs))
+ for i, s := range cs {
+ pc, err := parseConstraint(s)
+ if err != nil {
+ return nil, err
+ }
+
+ // If one of the constraints has a prerelease record this.
+ // This information is used when checking all in an "and"
+ // group to ensure they all check for prereleases.
+ if pc.con.pre != "" {
+ hasPre[k] = true
+ }
+
+ result[i] = pc
+ }
+ or[k] = result
+ }
+
+ o := &Constraints{
+ constraints: or,
+ containsPre: hasPre,
+ }
+ return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ // TODO(mattfarina): For v4 of this library consolidate the Check and Validate
+ // functions as the underlying functions make that possible now.
+ // loop over the ORs and check the inner ANDs
+ for i, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ if check, _ := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); !check {
+ joy = false
+ break
+ }
+ }
+
+ if joy {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+ // loop over the ORs and check the inner ANDs
+ var e []error
+
+ // Capture the prerelease message only once. When it happens the first time
+ // this var is marked
+ var prerelesase bool
+ for i, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ // Before running the check handle the case there the version is
+ // a prerelease and the check is not searching for prereleases.
+ if !(cs.IncludePrerelease || cs.containsPre[i]) && v.pre != "" {
+ if !prerelesase {
+ em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ e = append(e, em)
+ prerelesase = true
+ }
+ joy = false
+
+ } else {
+
+ if _, err := c.check(v, (cs.IncludePrerelease || cs.containsPre[i])); err != nil {
+ e = append(e, err)
+ joy = false
+ }
+ }
+ }
+
+ if joy {
+ return true, []error{}
+ }
+ }
+
+ return false, e
+}
+
+func (cs Constraints) String() string {
+ buf := make([]string, len(cs.constraints))
+ var tmp bytes.Buffer
+
+ for k, v := range cs.constraints {
+ tmp.Reset()
+ vlen := len(v)
+ for kk, c := range v {
+ tmp.WriteString(c.string())
+
+ // Space separate the AND conditions
+ if vlen > 1 && kk < vlen-1 {
+ tmp.WriteString(" ")
+ }
+ }
+ buf[k] = tmp.String()
+ }
+
+ return strings.Join(buf, " || ")
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (cs *Constraints) UnmarshalText(text []byte) error {
+ temp, err := NewConstraint(string(text))
+ if err != nil {
+ return err
+ }
+
+ *cs = *temp
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (cs Constraints) MarshalText() ([]byte, error) {
+ return []byte(cs.String()), nil
+}
+
+var constraintOps map[string]cfunc
+var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+// Used to find individual constraints within a multi-constraint string
+var findConstraintRegex *regexp.Regexp
+
+// Used to validate an segment of ANDs is valid
+var validConstraintRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func init() {
+ constraintOps = map[string]cfunc{
+ "": constraintTildeOrEqual,
+ "=": constraintTildeOrEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "=>": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "=<": constraintLessThanEqual,
+ "~": constraintTilde,
+ "~>": constraintTilde,
+ "^": constraintCaret,
+ }
+
+ ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
+
+ constraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ ops,
+ cvRegex))
+
+ constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+ `\s*(%s)\s+-\s+(%s)\s*`,
+ cvRegex, cvRegex))
+
+ findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `(%s)\s*(%s)`,
+ ops,
+ cvRegex))
+
+ // The first time a constraint shows up will look slightly different from
+ // future times it shows up due to a leading space or comma in a given
+ // string.
+ validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`,
+ ops,
+ cvRegex,
+ ops,
+ cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+ // The version used in the constraint check. For example, if a constraint
+ // is '<= 2.0.0' the con a version instance representing 2.0.0.
+ con *Version
+
+ // The original parsed version (e.g., 4.x from != 4.x)
+ orig string
+
+ // The original operator for the constraint
+ origfunc string
+
+ // When an x is used as part of the version (e.g., 1.x)
+ minorDirty bool
+ dirty bool
+ patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version, includePre bool) (bool, error) {
+ return constraintOps[c.origfunc](v, c, includePre)
+}
+
+// String prints an individual constraint into a string
+func (c *constraint) string() string {
+ return c.origfunc + c.orig
+}
+
+type cfunc func(v *Version, c *constraint, includePre bool) (bool, error)
+
+func parseConstraint(c string) (*constraint, error) {
+ if len(c) > 0 {
+ m := constraintRegex.FindStringSubmatch(c)
+ if m == nil {
+ return nil, fmt.Errorf("improper constraint: %s", c)
+ }
+
+ cs := &constraint{
+ orig: m[2],
+ origfunc: m[1],
+ }
+
+ ver := m[2]
+ minorDirty := false
+ patchDirty := false
+ dirty := false
+ if isX(m[3]) || m[3] == "" {
+ ver = fmt.Sprintf("0.0.0%s", m[6])
+ dirty = true
+ } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+ minorDirty = true
+ dirty = true
+ ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+ } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
+ dirty = true
+ patchDirty = true
+ ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+ }
+
+ con, err := NewVersion(ver)
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint parser error")
+ }
+
+ cs.con = con
+ cs.minorDirty = minorDirty
+ cs.patchDirty = patchDirty
+ cs.dirty = dirty
+
+ return cs, nil
+ }
+
+ // The rest is the special case where an empty string was passed in which
+ // is equivalent to * or >=0.0.0
+ con, err := StrictNewVersion("0.0.0")
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint parser error")
+ }
+
+ cs := &constraint{
+ con: con,
+ orig: c,
+ origfunc: "",
+ minorDirty: false,
+ patchDirty: false,
+ dirty: true,
+ }
+ return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if c.dirty {
+ if c.con.Major() != v.Major() {
+ return true, nil
+ }
+ if c.con.Minor() != v.Minor() && !c.minorDirty {
+ return true, nil
+ } else if c.minorDirty {
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ } else if c.con.Patch() != v.Patch() && !c.patchDirty {
+ return true, nil
+ } else if c.patchDirty {
+ // Need to handle prereleases if present
+ if v.Prerelease() != "" || c.con.Prerelease() != "" {
+ eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+ }
+
+ eq := v.Equal(c.con)
+ if eq {
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+func constraintGreaterThan(v *Version, c *constraint, includePre bool) (bool, error) {
+
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ var eq bool
+
+ if !c.dirty {
+ eq = v.Compare(c.con) == 1
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ }
+
+ if v.Major() > c.con.Major() {
+ return true, nil
+ } else if v.Major() < c.con.Major() {
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ } else if c.minorDirty {
+ // This is a range case such as >11. When the version is something like
+ // 11.1.0 is it not > 11. For that we would need 12 or higher
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ } else if c.patchDirty {
+ // This is for ranges such as >11.1. A version of 11.1.1 is not greater
+ // which one of 11.2.1 is greater
+ eq = v.Minor() > c.con.Minor()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ }
+
+ // If we have gotten here we are not comparing pre-preleases and can use the
+ // Compare function to accomplish that.
+ eq = v.Compare(c.con) == 1
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+}
+
+func constraintLessThan(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ eq := v.Compare(c.con) < 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ eq := v.Compare(c.con) >= 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+}
+
+func constraintLessThanEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ var eq bool
+
+ if !c.dirty {
+ eq = v.Compare(c.con) <= 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ }
+
+ if v.Major() > c.con.Major() {
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if v.LessThan(c.con) {
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+ }
+
+ // ~0.0.0 is a special case where all constraints are accepted. It's
+ // equivalent to >= 0.0.0.
+ if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+ !c.minorDirty && !c.patchDirty {
+ return true, nil
+ }
+
+ if v.Major() != c.con.Major() {
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+
+ if v.Minor() != c.con.Minor() && !c.minorDirty {
+ return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if c.dirty {
+ return constraintTilde(v, c, includePre)
+ }
+
+ eq := v.Equal(c.con)
+ if eq {
+ return true, nil
+ }
+
+ return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
+}
+
+// ^* --> (any)
+// ^1.2.3 --> >=1.2.3 <2.0.0
+// ^1.2 --> >=1.2.0 <2.0.0
+// ^1 --> >=1.0.0 <2.0.0
+// ^0.2.3 --> >=0.2.3 <0.3.0
+// ^0.2 --> >=0.2.0 <0.3.0
+// ^0.0.3 --> >=0.0.3 <0.0.4
+// ^0.0 --> >=0.0.0 <0.1.0
+// ^0 --> >=0.0.0 <1.0.0
+func constraintCaret(v *Version, c *constraint, includePre bool) (bool, error) {
+ // The existence of prereleases is checked at the group level and passed in.
+ // Exit early if the version has a prerelease but those are to be ignored.
+ if v.Prerelease() != "" && !includePre {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ // This less than handles prereleases
+ if v.LessThan(c.con) {
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+ }
+
+ var eq bool
+
+ // ^ when the major > 0 is >=x.y.z < x+1
+ if c.con.Major() > 0 || c.minorDirty {
+
+ // ^ has to be within a major range for > 0. Everything less than was
+ // filtered out with the LessThan call above. This filters out those
+ // that greater but not within the same major range.
+ eq = v.Major() == c.con.Major()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+
+ // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
+ if c.con.Major() == 0 && v.Major() > 0 {
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+ // If the con Minor is > 0 it is not dirty
+ if c.con.Minor() > 0 || c.patchDirty {
+ eq = v.Minor() == c.con.Minor()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
+ }
+ // ^ when the minor is 0 and minor > 0 is =0.0.z
+ if c.con.Minor() == 0 && v.Minor() > 0 {
+ return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig)
+ }
+
+ // At this point the major is 0 and the minor is 0 and not dirty. The patch
+ // is not dirty so we need to check if they are equal. If they are not equal
+ eq = c.con.Patch() == v.Patch()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
+}
+
+func isX(x string) bool {
+ switch x {
+ case "x", "*", "X":
+ return true
+ default:
+ return false
+ }
+}
+
+func rewriteRange(i string) string {
+ m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+ if m == nil {
+ return i
+ }
+ o := i
+ for _, v := range m {
+ t := fmt.Sprintf(">= %s, <= %s ", v[1], v[11])
+ o = strings.Replace(o, v[0], t, 1)
+ }
+
+ return o
+}
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/doc.go b/operator/vendor/github.com/Masterminds/semver/v3/doc.go
new file mode 100644
index 00000000..74f97caa
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/doc.go
@@ -0,0 +1,184 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+ - Parse semantic versions
+ - Sort semantic versions
+ - Check if a semantic version fits within a set of constraints
+ - Optionally work with a `v` prefix
+
+# Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an optional error can be returned if there is an issue
+parsing the version. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+b345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. For more details please see the documentation
+at https://godoc.org/github.com/Masterminds/semver.
+
+# Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+
+# Checking Version Constraints and Comparing Versions
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other is using Constraints. There are some important
+differences to notes between these two methods of comparison.
+
+ 1. When two versions are compared using functions such as `Compare`, `LessThan`,
+ and others it will follow the specification and always include prereleases
+ within the comparison. It will provide an answer valid with the comparison
+ spec section at https://semver.org/#spec-item-11
+ 2. When constraint checking is used for checks or validation it will follow a
+ different set of rules that are common for ranges with tools like npm/js
+ and Rust/Cargo. This includes considering prereleases to be invalid if the
+ ranges does not include on. If you want to have it include pre-releases a
+ simple solution is to include `-0` in your range.
+ 3. Constraint ranges can have some complex rules including the shorthard use of
+ ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parsable.
+ }
+
+ v, err := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parsable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+
+# Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma or space separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3. This can also be written as
+`">= 1.2, < 3.0.0 || >= 4.2.3"`
+
+The basic comparisons are:
+
+ - `=`: equal (aliased to no operator)
+ - `!=`: not equal
+ - `>`: greater than
+ - `<`: less than
+ - `>=`: greater than or equal to
+ - `<=`: less than or equal to
+
+# Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+ - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+ - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+# Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the tilde operation. For example,
+
+ - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+ - `>= 1.2.x` is equivalent to `>= 1.2.0`
+ - `<= 2.x` is equivalent to `<= 3`
+ - `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+ - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
+ - `~1` is equivalent to `>= 1, < 2`
+ - `~2.3` is equivalent to `>= 2.3 < 2.4`
+ - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+ - `~1.x` is equivalent to `>= 1 < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+ - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+ - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+ - `^2.3` is equivalent to `>= 2.3, < 3`
+ - `^2.x` is equivalent to `>= 2.0.0, < 3`
+ - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+ - `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+ - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+ - `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+ - `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+ c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+
+ // Validate a version against a constraint.
+ a, msgs := c.Validate(v)
+ // a is false
+ for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+ }
+*/
+package semver
diff --git a/operator/vendor/github.com/Masterminds/semver/v3/version.go b/operator/vendor/github.com/Masterminds/semver/v3/version.go
new file mode 100644
index 00000000..7a3ba738
--- /dev/null
+++ b/operator/vendor/github.com/Masterminds/semver/v3/version.go
@@ -0,0 +1,788 @@
+package semver
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+var looseVersionRegex *regexp.Regexp
+
+// CoerceNewVersion sets if leading 0's are allowd in the version part. Leading 0's are
+// not allowed in a valid semantic version. When set to true, NewVersion will coerce
+// leading 0's into a valid version.
+var CoerceNewVersion = true
+
+// DetailedNewVersionErrors specifies if detailed errors are returned from the NewVersion
+// function. This is used when CoerceNewVersion is set to false. If set to false
+// ErrInvalidSemVer is returned for an invalid version. This does not apply to
+// StrictNewVersion. Setting this function to false returns errors more quickly.
+var DetailedNewVersionErrors = true
+
+var (
+ // ErrInvalidSemVer is returned a version is found to be invalid when
+ // being parsed.
+ ErrInvalidSemVer = errors.New("invalid semantic version")
+
+ // ErrEmptyString is returned when an empty string is passed in for parsing.
+ ErrEmptyString = errors.New("version string empty")
+
+ // ErrInvalidCharacters is returned when invalid characters are found as
+ // part of a version
+ ErrInvalidCharacters = errors.New("invalid characters in version")
+
+ // ErrSegmentStartsZero is returned when a version segment starts with 0.
+ // This is invalid in SemVer.
+ ErrSegmentStartsZero = errors.New("version segment starts with 0")
+
+ // ErrInvalidMetadata is returned when the metadata is an invalid format
+ ErrInvalidMetadata = errors.New("invalid metadata string")
+
+ // ErrInvalidPrerelease is returned when the pre-release is an invalid format
+ ErrInvalidPrerelease = errors.New("invalid prerelease string")
+)
+
+// semVerRegex is the regular expression used to parse a semantic version.
+// This is not the official regex from the semver spec. It has been modified to allow for loose handling
+// where versions like 2.1 are detected.
+const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` +
+ `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` +
+ `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?`
+
+// looseSemVerRegex is a regular expression that lets invalid semver expressions through
+// with enough detail that certain errors can be checked for.
+const looseSemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// Version represents a single semantic version.
+type Version struct {
+ major, minor, patch uint64
+ pre string
+ metadata string
+ original string
+}
+
+func init() {
+ versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
+ looseVersionRegex = regexp.MustCompile("^" + looseSemVerRegex + "$")
+}
+
+const (
+ num string = "0123456789"
+ allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
+)
+
+// StrictNewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. Only parses valid semantic versions.
+// Performs checking that can find errors within the version.
+// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x
+// releases of semver did, use the NewVersion() function.
+func StrictNewVersion(v string) (*Version, error) {
+ // Parsing here does not use RegEx in order to increase performance and reduce
+ // allocations.
+
+ if len(v) == 0 {
+ return nil, ErrEmptyString
+ }
+
+ // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
+ parts := strings.SplitN(v, ".", 3)
+ if len(parts) != 3 {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ original: v,
+ }
+
+ // Extract build metadata
+ if strings.Contains(parts[2], "+") {
+ extra := strings.SplitN(parts[2], "+", 2)
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ if err := validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ // Extract build prerelease
+ if strings.Contains(parts[2], "-") {
+ extra := strings.SplitN(parts[2], "-", 2)
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ if err := validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ // Validate the number segments are valid. This includes only having positive
+ // numbers and no leading 0's.
+ for _, p := range parts {
+ if !containsOnly(p, num) {
+ return nil, ErrInvalidCharacters
+ }
+
+ if len(p) > 1 && p[0] == '0' {
+ return nil, ErrSegmentStartsZero
+ }
+ }
+
+ // Extract major, minor, and patch
+ var err error
+ sv.major, err = strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return sv, nil
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. If the version is SemVer-ish it
+// attempts to convert it to SemVer. If you want to validate it was a strict
+// semantic version at parse time see StrictNewVersion().
+func NewVersion(v string) (*Version, error) {
+ if CoerceNewVersion {
+ return coerceNewVersion(v)
+ }
+ m := versionRegex.FindStringSubmatch(v)
+ if m == nil {
+
+ // Disabling detailed errors is first so that it is in the fast path.
+ if !DetailedNewVersionErrors {
+ return nil, ErrInvalidSemVer
+ }
+
+ // Check for specific errors with the semver string and return a more detailed
+ // error.
+ m = looseVersionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+ err := validateVersion(m)
+ if err != nil {
+ return nil, err
+ }
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[5],
+ pre: m[4],
+ original: v,
+ }
+
+ var err error
+ sv.major, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+
+ if m[2] != "" {
+ sv.minor, err = strconv.ParseUint(m[2], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ sv.patch, err = strconv.ParseUint(m[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.patch = 0
+ }
+
+ // Perform some basic due diligence on the extra parts to ensure they are
+ // valid.
+
+ if sv.pre != "" {
+ if err = validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ if sv.metadata != "" {
+ if err = validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return sv, nil
+}
+
+func coerceNewVersion(v string) (*Version, error) {
+ m := looseVersionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[8],
+ pre: m[5],
+ original: v,
+ }
+
+ var err error
+ sv.major, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+
+ if m[2] != "" {
+ sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing version segment: %w", err)
+ }
+ } else {
+ sv.patch = 0
+ }
+
+ // Perform some basic due diligence on the extra parts to ensure they are
+ // valid.
+
+ if sv.pre != "" {
+ if err = validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ if sv.metadata != "" {
+ if err = validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return sv, nil
+}
+
+// New creates a new instance of Version with each of the parts passed in as
+// arguments instead of parsing a version string.
+func New(major, minor, patch uint64, pre, metadata string) *Version {
+ v := Version{
+ major: major,
+ minor: minor,
+ patch: patch,
+ pre: pre,
+ metadata: metadata,
+ original: "",
+ }
+
+ v.original = v.String()
+
+ return &v
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+ sv, err := NewVersion(v)
+ if err != nil {
+ panic(err)
+ }
+ return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v Version) String() string {
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+ return v.original
+}
+
+// Major returns the major version.
+func (v Version) Major() uint64 {
+ return v.major
+}
+
+// Minor returns the minor version.
+func (v Version) Minor() uint64 {
+ return v.minor
+}
+
+// Patch returns the patch version.
+func (v Version) Patch() uint64 {
+ return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v Version) Prerelease() string {
+ return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v Version) Metadata() string {
+ return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v Version) originalVPrefix() string {
+ // Note, only lowercase v is supported as a prefix by the parser.
+ if v.original != "" && v.original[:1] == "v" {
+ return v.original[:1]
+ }
+ return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps current patch value
+func (v Version) IncPatch() Version {
+ vNext := v
+ // according to http://semver.org/#spec-item-9
+ // Pre-release versions have a lower precedence than the associated normal version.
+ // according to http://semver.org/#spec-item-10
+ // Build metadata SHOULD be ignored when determining version precedence.
+ if v.pre != "" {
+ vNext.metadata = ""
+ vNext.pre = ""
+ } else {
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = v.patch + 1
+ }
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = v.minor + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = 0
+ vNext.major = v.major + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hyphen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+ vNext := v
+ if len(prerelease) > 0 {
+ if err := validatePrerelease(prerelease); err != nil {
+ return vNext, err
+ }
+ }
+ vNext.pre = prerelease
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+ vNext := v
+ if len(metadata) > 0 {
+ if err := validateMetadata(metadata); err != nil {
+ return vNext, err
+ }
+ }
+ vNext.metadata = metadata
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+ if v == o {
+ return true
+ }
+ if v == nil || o == nil {
+ return false
+ }
+ return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease. Compare always takes into account
+// prereleases. If you want to work with ranges using typical range syntaxes that
+// skip prereleases if the range is not looking for them use constraints.
+func (v *Version) Compare(o *Version) int {
+ // Compare the major, minor, and patch version for differences. If a
+ // difference is found return the comparison.
+ if d := compareSegment(v.Major(), o.Major()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+ return d
+ }
+
+ // At this point the major, minor, and patch versions are the same.
+ ps := v.pre
+ po := o.Prerelease()
+
+ if ps == "" && po == "" {
+ return 0
+ }
+ if ps == "" {
+ return 1
+ }
+ if po == "" {
+ return -1
+ }
+
+ return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (v *Version) UnmarshalText(text []byte) error {
+ temp, err := NewVersion(string(text))
+ if err != nil {
+ return err
+ }
+
+ *v = *temp
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (v Version) MarshalText() ([]byte, error) {
+ return []byte(v.String()), nil
+}
+
+// Scan implements the SQL.Scanner interface.
+func (v *Version) Scan(value interface{}) error {
+ var s string
+ s, _ = value.(string)
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ return nil
+}
+
+// Value implements the Driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
+
+func compareSegment(v, o uint64) int {
+ if v < o {
+ return -1
+ }
+ if v > o {
+ return 1
+ }
+
+ return 0
+}
+
+func comparePrerelease(v, o string) int {
+ // split the prelease versions by their part. The separator, per the spec,
+ // is a .
+ sparts := strings.Split(v, ".")
+ oparts := strings.Split(o, ".")
+
+ // Find the longer length of the parts to know how many loop iterations to
+ // go through.
+ slen := len(sparts)
+ olen := len(oparts)
+
+ l := slen
+ if olen > slen {
+ l = olen
+ }
+
+ // Iterate over each part of the prereleases to compare the differences.
+ for i := 0; i < l; i++ {
+ // Since the lentgh of the parts can be different we need to create
+ // a placeholder. This is to avoid out of bounds issues.
+ stemp := ""
+ if i < slen {
+ stemp = sparts[i]
+ }
+
+ otemp := ""
+ if i < olen {
+ otemp = oparts[i]
+ }
+
+ d := comparePrePart(stemp, otemp)
+ if d != 0 {
+ return d
+ }
+ }
+
+ // Reaching here means two versions are of equal value but have different
+ // metadata (the part following a +). They are not identical in string form
+ // but the version comparison finds them to be equal.
+ return 0
+}
+
+func comparePrePart(s, o string) int {
+ // Fastpath if they are equal
+ if s == o {
+ return 0
+ }
+
+ // When s or o are empty we can use the other in an attempt to determine
+ // the response.
+ if s == "" {
+ if o != "" {
+ return -1
+ }
+ return 1
+ }
+
+ if o == "" {
+ if s != "" {
+ return 1
+ }
+ return -1
+ }
+
+ // When comparing strings "99" is greater than "103". To handle
+ // cases like this we need to detect numbers and compare them. According
+ // to the semver spec, numbers are always positive. If there is a - at the
+ // start like -99 this is to be evaluated as an alphanum. numbers always
+ // have precedence over alphanum. Parsing as Uints because negative numbers
+ // are ignored.
+
+ oi, n1 := strconv.ParseUint(o, 10, 64)
+ si, n2 := strconv.ParseUint(s, 10, 64)
+
+ // The case where both are strings compare the strings
+ if n1 != nil && n2 != nil {
+ if s > o {
+ return 1
+ }
+ return -1
+ } else if n1 != nil {
+ // o is a string and s is a number
+ return -1
+ } else if n2 != nil {
+ // s is a string and o is a number
+ return 1
+ }
+ // Both are numbers
+ if si > oi {
+ return 1
+ }
+ return -1
+}
+
+// Like strings.ContainsAny but does an only instead of any.
+func containsOnly(s string, comp string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(comp, r)
+ }) == -1
+}
+
+// From the spec, "Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
+// Numeric identifiers MUST NOT include leading zeroes.". These segments can
+// be dot separated.
+func validatePrerelease(p string) error {
+ eparts := strings.Split(p, ".")
+ for _, p := range eparts {
+ if p == "" {
+ return ErrInvalidPrerelease
+ } else if containsOnly(p, num) {
+ if len(p) > 1 && p[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ } else if !containsOnly(p, allowed) {
+ return ErrInvalidPrerelease
+ }
+ }
+
+ return nil
+}
+
+// From the spec, "Build metadata MAY be denoted by
+// appending a plus sign and a series of dot separated identifiers immediately
+// following the patch or pre-release version. Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
+func validateMetadata(m string) error {
+ eparts := strings.Split(m, ".")
+ for _, p := range eparts {
+ if p == "" {
+ return ErrInvalidMetadata
+ } else if !containsOnly(p, allowed) {
+ return ErrInvalidMetadata
+ }
+ }
+ return nil
+}
+
+// validateVersion checks for common validation issues but may not catch all errors
+func validateVersion(m []string) error {
+ var err error
+ var v string
+ if m[1] != "" {
+ if len(m[1]) > 1 && m[1][0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ _, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing version segment: %w", err)
+ }
+ }
+
+ if m[2] != "" {
+ v = strings.TrimPrefix(m[2], ".")
+ if len(v) > 1 && v[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ _, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing version segment: %w", err)
+ }
+ }
+
+ if m[3] != "" {
+ v = strings.TrimPrefix(m[3], ".")
+ if len(v) > 1 && v[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ _, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing version segment: %w", err)
+ }
+ }
+
+ if m[5] != "" {
+ if err = validatePrerelease(m[5]); err != nil {
+ return err
+ }
+ }
+
+ if m[8] != "" {
+ if err = validateMetadata(m[8]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/operator/vendor/github.com/go-logr/logr/.golangci.yaml b/operator/vendor/github.com/go-logr/logr/.golangci.yaml
index 0cffafa7..0ed62c1a 100644
--- a/operator/vendor/github.com/go-logr/logr/.golangci.yaml
+++ b/operator/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -1,26 +1,28 @@
+version: "2"
+
run:
timeout: 1m
tests: true
linters:
- disable-all: true
- enable:
+ default: none
+ enable: # please keep this alphabetized
+ - asasalint
- asciicheck
+ - copyloopvar
+ - dupl
- errcheck
- forcetypeassert
+ - goconst
- gocritic
- - gofmt
- - goimports
- - gosimple
- govet
- ineffassign
- misspell
+ - musttag
- revive
- staticcheck
- - typecheck
- unused
issues:
- exclude-use-default: false
max-issues-per-linter: 0
max-same-issues: 10
diff --git a/operator/vendor/github.com/go-logr/logr/funcr/funcr.go b/operator/vendor/github.com/go-logr/logr/funcr/funcr.go
index 30568e76..b22c57d7 100644
--- a/operator/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/operator/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -77,7 +77,7 @@ func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
write: fn,
}
// For skipping fnlogger.Info and fnlogger.Error.
- l.Formatter.AddCallDepth(1)
+ l.AddCallDepth(1) // via Formatter
return l
}
@@ -164,17 +164,17 @@ type fnlogger struct {
}
func (l fnlogger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
+ l.AddName(name) // via Formatter
return &l
}
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
- l.Formatter.AddValues(kvList)
+ l.AddValues(kvList) // via Formatter
return &l
}
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
+ l.AddCallDepth(depth) // via Formatter
return &l
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/operator/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
index f9d52e58..09217941 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -1,3 +1,177 @@
+## 2.27.2
+
+### Fixes
+- inline automaxprocs to simplify dependencies; this will be removed when Go 1.26 comes out [a69113a]
+
+### Maintenance
+- Fix syntax errors and typo [a99c6e0]
+- Fix paragraph position error [f993df5]
+
+## 2.27.1
+
+### Fixes
+- Fix Ginkgo Reporter slice-bounds panic [606c1cb]
+- Bug Fix: Add GinkoTBWrapper.Attr() and GinkoTBWrapper.Output() [a6463b3]
+
+## 2.27.0
+
+### Features
+
+#### Transforming Nodes during Tree Construction
+
+This release adds support for `NodeArgsTransformer`s that can be registered with `AddTreeConstructionNodeArgsTransformer`.
+
+These are called during the tree construction phase as nodes are constructed and can modify the node strings and decorators. This enables frameworks built on top of Ginkgo to modify Ginkgo nodes and enforce conventions.
+
+Learn more [here](https://onsi.github.io/ginkgo/#advanced-transforming-node-arguments-during-tree-construction).
+
+#### Spec Prioritization
+
+A new `SpecPriority(int)` decorator has been added. Ginkgo will honor priority when ordering specs, ensuring that higher priority specs start running before lower priority specs
+
+Learn more [here](https://onsi.github.io/ginkgo/#prioritizing-specs).
+
+### Maintenance
+- Bump rexml from 3.4.0 to 3.4.2 in /docs (#1595) [1333dae]
+- Bump github.com/gkampitakis/go-snaps from 0.5.14 to 0.5.15 (#1600) [17ae63e]
+
+## 2.26.0
+
+### Features
+
+Ginkgo can now generate json-formatted reports that are compatible with the `go test` json format. Use `ginkgo --gojson-report=report.go.json`. This is not intended to be a replacement for Ginkgo's native json format which is more information rich and better models Ginkgo's test structure semantics.
+
+## 2.25.3
+
+### Fixes
+
+- emit --github-output group only for progress report itself [f01aed1]
+
+## 2.25.2
+
+### Fixes
+Add github output group for progress report content
+
+### Maintenance
+Bump Gomega
+
+## 2.25.1
+
+### Fixes
+- fix(types): ignore nameless nodes on FullText() [10866d3]
+- chore: fix some CodeQL warnings [2e42cff]
+
+## 2.25.0
+
+### `AroundNode`
+
+This release introduces a new decorator to support more complex spec setup usecases.
+
+`AroundNode` registers a function that runs before each individual node. This is considered a more advanced decorator.
+
+Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information and some examples.
+
+Allowed signatures:
+
+- `AroundNode(func())` - `func` will be called before the node is run.
+- `AroundNode(func(ctx context.Context) context.Context)` - `func` can wrap the passed in context and return a new one which will be passed on to the node.
+- `AroundNode(func(ctx context.Context, body func(ctx context.Context)))` - `ctx` is the context for the node and `body` is a function that must be called to run the node. This gives you complete control over what runs before and after the node.
+
+Multiple `AroundNode` decorators can be applied to a single node and they will run in the order they are applied.
+
+Unlike setup nodes like `BeforeEach` and `DeferCleanup`, `AroundNode` is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call `runtime.LockOSThread()` in the `AroundNode` to ensure that the node runs on a single thread).
+
+Since `AroundNode` allows you to modify the context you can also use `AroundNode` to implement shared setup that attaches values to the context.
+
+If applied to a container, `AroundNode` will run before every node in the container. Including setup nodes like `BeforeEach` and `DeferCleanup`.
+
+`AroundNode` can also be applied to `RunSpecs` to run before every node in the suite. This opens up new mechanisms for instrumenting individual nodes across an entire suite.
+
+## 2.24.0
+
+### Features
+
+Specs can now be decorated with (e.g.) `SemVerConstraint("2.1.0")` and `ginkgo --sem-ver-filter="2.1.1"` will only run constrained specs that match the requested version. Learn more in the docs [here](https://onsi.github.io/ginkgo/#spec-semantic-version-filtering)! Thanks to @Icarus9913 for the PR.
+
+### Fixes
+
+- remove -o from run command [3f5d379]. fixes [#1582](https://github.com/onsi/ginkgo/issues/1582)
+
+### Maintenance
+
+Numerous dependency bumps and documentation fixes
+
+## 2.23.4
+
+Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix!
+
+### Features
+- Add automaxprocs for using CPUQuota [2b9c428]
+
+### Fixes
+- clarify gotchas about -vet flag [1f59d07]
+
+### Maintenance
+- bump dependencies [2d134d5]
+
+## 2.23.3
+
+### Fixes
+
+- allow `-` as a standalone argument [cfcc1a5]
+- Bug Fix: Add GinkoTBWrapper.Chdir() and GinkoTBWrapper.Context() [feaf292]
+- ignore exit code for symbol test on linux [88e2282]
+
+## 2.23.2
+
+🎉🎉🎉
+
+At long last, some long-standing performance gaps between `ginkgo` and `go test` have been resolved!
+
+Ginkgo operates by running `go test -c` to generate test binaries, and then running those binaries. It turns out that the compilation step of `go test -c` is slower than `go test`'s compilation step because `go test` strips out debug symbols (`ldflags=-w`) whereas `go test -c` does not.
+
+Ginkgo now passes the appropriate `ldflags` to `go test -c` when running specs to strip out symbols. This is only done when it is safe to do so and symbols are preferred when profiling is enabled and when `ginkgo build` is called explicitly.
+
+This, coupled, with the [instructions for disabling XProtect on MacOS](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) yields a much better performance experience with Ginkgo.
+
+## 2.23.1
+
+## 🚨 For users on MacOS 🚨
+
+A long-standing Ginkgo performance issue on MacOS seems to be due to mac's antimalware XProtect. You can follow the instructions [here](https://onsi.github.io/ginkgo/#if-you-are-running-on-macos) to disable it in your terminal. Doing so sped up Ginkgo's own test suite from 1m8s to 47s.
+
+### Fixes
+
+Ginkgo's CLI is now a bit clearer if you pass flags in incorrectly:
+
+- make it clearer that you need to pass a filename to the various profile flags, not an absolute directory [a0e52ff]
+- emit an error and exit if the ginkgo invocation includes flags after positional arguments [b799d8d]
+
+This might cause existing CI builds to fail. If so then it's likely that your CI build was misconfigured and should be corrected. Open an issue if you need help.
+
+## 2.23.0
+
+Ginkgo 2.23.0 adds a handful of methods to `GinkgoT()` to make it compatible with the `testing.TB` interface in Go 1.24. `GinkgoT().Context()`, in particular, is a useful shorthand for generating a new context that will clean itself up in a `DeferCleanup()`. This has subtle behavior differences from the golang implementation but should make sense in a Ginkgo... um... context.
+
+### Features
+- bump to go 1.24.0 - support new testing.TB methods and add a test to cover testing.TB regressions [37a511b]
+
+### Fixes
+- fix edge case where build -o is pointing at an explicit file, not a directory [7556a86]
+- Fix binary paths when precompiling multiple suites. [4df06c6]
+
+### Maintenance
+- Fix: Correct Markdown list rendering in MIGRATING_TO_V2.md [cbcf39a]
+- docs: fix test workflow badge (#1512) [9b261ff]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1516) [00f19c8]
+- Bump golang.org/x/tools from 0.28.0 to 0.30.0 (#1515) [e98a4df]
+- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#1504) [60cc4e2]
+- Bump github-pages from 231 to 232 in /docs (#1447) [fea6f2d]
+- Bump rexml from 3.2.8 to 3.3.9 in /docs (#1497) [31d7813]
+- Bump webrick from 1.8.1 to 1.9.1 in /docs (#1501) [fc3bbd6]
+- Code linting (#1500) [aee0d56]
+- change interface{} to any (#1502) [809a710]
+
## 2.22.2
### Maintenance
@@ -630,7 +804,7 @@ Ginkgo also uses this progress reporting infrastructure under the hood when hand
### Features
- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
- As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...any`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
### Maintenance
- Modernize the invocation of Ginkgo in github actions [0ffde58]
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/README.md b/operator/vendor/github.com/onsi/ginkgo/v2/README.md
index cb23ffdf..7b7ab9e3 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/README.md
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -1,6 +1,6 @@

-[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+[](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
---
@@ -113,3 +113,13 @@ Ginkgo is MIT-Licensed
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md)
+
+## Sponsors
+
+Sponsors commit to a [sponsorship](https://github.com/sponsors/onsi) for a year. If you're an organization that makes use of Ginkgo please consider becoming a sponsor!
+
+
Browser testing via
+
+
+
+
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/operator/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
index a3e8237e..7e165e47 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -83,9 +83,9 @@ func exitIfErrors(errors []error) {
type GinkgoWriterInterface interface {
io.Writer
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
TeeTo(writer io.Writer)
ClearTeeWriters()
@@ -186,6 +186,20 @@ func GinkgoLabelFilter() string {
return suiteConfig.LabelFilter
}
+/*
+GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`.
+
+You can use this to manually check if a set of semantic version constraints would satisfy the filter via:
+
+ if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) {
+ //...
+ }
+*/
+func GinkgoSemVerFilter() string {
+ suiteConfig, _ := GinkgoConfiguration()
+ return suiteConfig.SemVerFilter
+}
+
/*
PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
when running in parallel and output to stdout/stderr is being intercepted. You generally
@@ -243,7 +257,7 @@ for more on how specs are parallelized in Ginkgo.
You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
*/
-func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+func RunSpecs(t GinkgoTestingT, description string, args ...any) bool {
if suiteDidRun {
exitIfErr(types.GinkgoErrors.RerunningSuite())
}
@@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
}
defer global.PopClone()
- suiteLabels := extractSuiteConfiguration(args)
+ suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
var reporter reporters.Reporter
if suiteConfig.ParallelTotal == 1 {
@@ -297,7 +311,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
- passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
outputInterceptor.Shutdown()
flagSet.ValidateDeprecations(deprecationTracker)
@@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
return passed
}
-func extractSuiteConfiguration(args []interface{}) Labels {
+func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) {
suiteLabels := Labels{}
+ suiteSemVerConstraints := SemVerConstraints{}
+ aroundNodes := types.AroundNodes{}
configErrors := []error{}
for _, arg := range args {
switch arg := arg.(type) {
@@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []interface{}) Labels {
reporterConfig = arg
case Labels:
suiteLabels = append(suiteLabels, arg...)
+ case SemVerConstraints:
+ suiteSemVerConstraints = append(suiteSemVerConstraints, arg...)
+ case types.AroundNodeDecorator:
+ aroundNodes = append(aroundNodes, arg)
default:
configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
}
@@ -342,7 +362,7 @@ func extractSuiteConfiguration(args []interface{}) Labels {
os.Exit(1)
}
- return suiteLabels
+ return suiteLabels, suiteSemVerConstraints, aroundNodes
}
func getwd() (string, error) {
@@ -365,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report {
}
defer global.PopClone()
- suiteLabels := extractSuiteConfiguration(args)
+ suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args)
priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
defer func() {
@@ -383,7 +403,7 @@ func PreviewSpecs(description string, args ...any) Report {
suitePath, err = filepath.Abs(suitePath)
exitIfErr(err)
- global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
return global.Suite.GetPreviewReport()
}
@@ -481,6 +501,38 @@ func pushNode(node internal.Node, errors []error) bool {
return true
}
+// NodeArgsTransformer is a hook which is called by the test construction DSL methods
+// before creating the new node. If it returns any error, the test suite
+// prints those errors and exits. The text and arguments can be modified,
+// which includes directly changing the args slice that is passed in.
+// Arguments have been flattened already, i.e. none of the entries in args is another []any.
+// The result may be nested.
+//
+// The node type is provided for information and remains the same.
+//
+// The offset is valid for calling NewLocation directly in the
+// implementation of TransformNodeArgs to find the location where
+// the Ginkgo DSL function is called. An additional offset supplied
+// by the caller via args is already included.
+//
+// A NodeArgsTransformer can be registered with AddTreeConstructionNodeArgsTransformer.
+type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error)
+
+// AddTreeConstructionNodeArgsTransformer registers a NodeArgsTransformer.
+// Only nodes which get created after registering a NodeArgsTransformer
+// are transformed by it. The returned function can be called to
+// unregister the transformer.
+//
+// Both may only be called during the construction phase.
+//
+// If there is more than one registered transformer, then the most
+// recently added ones get called first.
+func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() {
+ // This conversion could be avoided with a type alias, but type aliases make
+ // developer documentation less useful.
+ return internal.AddTreeConstructionNodeArgsTransformer(internal.NodeArgsTransformer(transformer))
+}
+
/*
Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
@@ -491,24 +543,24 @@ to Describe the behavior of an object or function and, within that Describe, out
You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
*/
-func Describe(text string, args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+func Describe(text string, args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...)))
}
/*
FDescribe focuses specs within the Describe block.
*/
-func FDescribe(text string, args ...interface{}) bool {
+func FDescribe(text string, args ...any) bool {
args = append(args, internal.Focus)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...)))
}
/*
PDescribe marks specs within the Describe block as pending.
*/
-func PDescribe(text string, args ...interface{}) bool {
+func PDescribe(text string, args ...any) bool {
args = append(args, internal.Pending)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, text, args...)))
}
/*
@@ -521,21 +573,21 @@ var XDescribe = PDescribe
/* Context is an alias for Describe - it generates the exact same kind of Container node */
var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
-/* When is an alias for Describe - it generates the exact same kind of Container node */
-func When(text string, args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */
+func When(text string, args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...)))
}
-/* When is an alias for Describe - it generates the exact same kind of Container node */
-func FWhen(text string, args ...interface{}) bool {
+/* When is an alias for Describe - it generates the exact same kind of Container node with "when " as prefix for the text. */
+func FWhen(text string, args ...any) bool {
args = append(args, internal.Focus)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...)))
}
/* When is an alias for Describe - it generates the exact same kind of Container node */
-func PWhen(text string, args ...interface{}) bool {
+func PWhen(text string, args ...any) bool {
args = append(args, internal.Pending)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, "when "+text, args...)))
}
var XWhen = PWhen
@@ -550,24 +602,24 @@ You can pass It nodes bare functions (func() {}) or functions that receive a Spe
You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
*/
-func It(text string, args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+func It(text string, args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...)))
}
/*
FIt allows you to focus an individual It.
*/
-func FIt(text string, args ...interface{}) bool {
+func FIt(text string, args ...any) bool {
args = append(args, internal.Focus)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...)))
}
/*
PIt allows you to mark an individual It as pending.
*/
-func PIt(text string, args ...interface{}) bool {
+func PIt(text string, args ...any) bool {
args = append(args, internal.Pending)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeIt, text, args...)))
}
/*
@@ -611,10 +663,10 @@ BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(c
You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
*/
-func BeforeSuite(body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func BeforeSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)))
}
/*
@@ -630,10 +682,10 @@ AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(co
You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
*/
-func AfterSuite(body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func AfterSuite(body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)))
}
/*
@@ -667,11 +719,11 @@ If either function receives a context.Context/SpecContext it is considered inter
You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
*/
-func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{process1Body, allProcessBody}
+func SynchronizedBeforeSuite(process1Body any, allProcessBody any, args ...any) bool {
+ combinedArgs := []any{process1Body, allProcessBody}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)))
}
/*
@@ -687,11 +739,11 @@ Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accompli
You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
*/
-func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
- combinedArgs := []interface{}{allProcessBody, process1Body}
+func SynchronizedAfterSuite(allProcessBody any, process1Body any, args ...any) bool {
+ combinedArgs := []any{allProcessBody, process1Body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)))
}
/*
@@ -703,8 +755,8 @@ BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(co
You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
*/
-func BeforeEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
+func BeforeEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeEach, "", args...)))
}
/*
@@ -716,8 +768,8 @@ JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/fun
You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
*/
-func JustBeforeEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
+func JustBeforeEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustBeforeEach, "", args...)))
}
/*
@@ -731,8 +783,8 @@ AfterEach can take a func() body, or an interruptible func(SpecContext)/func(con
You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
*/
-func AfterEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
+func AfterEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterEach, "", args...)))
}
/*
@@ -743,8 +795,8 @@ JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func
You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
*/
-func JustAfterEach(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
+func JustAfterEach(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeJustAfterEach, "", args...)))
}
/*
@@ -758,8 +810,8 @@ You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
*/
-func BeforeAll(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
+func BeforeAll(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeBeforeAll, "", args...)))
}
/*
@@ -775,8 +827,8 @@ You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
*/
-func AfterAll(args ...interface{}) bool {
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
+func AfterAll(args ...any) bool {
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeAfterAll, "", args...)))
}
/*
@@ -818,7 +870,7 @@ When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite,
Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
*/
-func DeferCleanup(args ...interface{}) {
+func DeferCleanup(args ...any) {
fail := func(message string, cl types.CodeLocation) {
global.Failer.Fail(message, cl)
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/operator/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
index c65af4ce..e331d7cf 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
@@ -2,6 +2,7 @@ package ginkgo
import (
"github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/types"
)
/*
@@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
*/
type Labels = internal.Labels
+/*
+SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules.
+SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy.
+
+You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func SemVerConstraint(semVerConstraints ...string) SemVerConstraints {
+ return SemVerConstraints(semVerConstraints)
+}
+
+/*
+SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering
+*/
+type SemVerConstraints = internal.SemVerConstraints
+
/*
PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.
@@ -136,8 +154,40 @@ Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will pro
*/
type GracePeriod = internal.GracePeriod
+/*
+SpecPriority allows you to assign a priority to a spec or container.
+
+Specs with higher priority will be scheduled to run before specs with lower priority. The default priority is 0 and negative priorities are allowed.
+*/
+type SpecPriority = internal.SpecPriority
+
/*
SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly
if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports.
*/
const SuppressProgressReporting = internal.SuppressProgressReporting
+
+/*
+AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator.
+
+Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information.
+
+Allowed signatures:
+
+- AroundNode(func()) - func will be called before the node is run.
+- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node.
+- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node.
+
+Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied.
+
+Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread).
+
+Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context.
+
+If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup.
+
+AroundNode can also be applied to RunSpecs to run before every node in the suite.
+*/
+func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator {
+ return types.AroundNode(f, types.NewCodeLocation(1))
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/operator/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
index f912bbec..fd45b8be 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -118,9 +118,9 @@ Use Gomega's gmeasure package instead.
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
*/
type Benchmarker interface {
- Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
- RecordValue(name string, value float64, info ...interface{})
- RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+ Time(name string, body func(), info ...any) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...any)
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...any)
}
/*
@@ -129,7 +129,7 @@ Deprecated: Measure() has been removed from Ginkgo 2.0
Use Gomega's gmeasure package instead.
You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
*/
-func Measure(_ ...interface{}) bool {
+func Measure(_ ...any) bool {
deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
return true
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/operator/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
index 4d574911..f61356db 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -24,15 +24,15 @@ const (
var SingletonFormatter = New(ColorModeTerminal)
-func F(format string, args ...interface{}) string {
+func F(format string, args ...any) string {
return SingletonFormatter.F(format, args...)
}
-func Fi(indentation uint, format string, args ...interface{}) string {
+func Fi(indentation uint, format string, args ...any) string {
return SingletonFormatter.Fi(indentation, format, args...)
}
-func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+func Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
}
@@ -115,15 +115,15 @@ func New(colorMode ColorMode) Formatter {
return f
}
-func (f Formatter) F(format string, args ...interface{}) string {
+func (f Formatter) F(format string, args ...any) string {
return f.Fi(0, format, args...)
}
-func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+func (f Formatter) Fi(indentation uint, format string, args ...any) string {
return f.Fiw(indentation, 0, format, args...)
}
-func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
out := f.style(format)
if len(args) > 0 {
out = fmt.Sprintf(out, args...)
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go
new file mode 100644
index 00000000..ee6ac7b5
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs.go
@@ -0,0 +1,8 @@
+//go:build !go1.25
+// +build !go1.25
+
+package main
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo/automaxprocs"
+)
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md
new file mode 100644
index 00000000..e249ebe8
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/README.md
@@ -0,0 +1,3 @@
+This entire directory is a lightly modified clone of https://github.com/uber-go/automaxprocs
+
+It will be removed when Go 1.26 ships and we no longer need to support Go 1.24 (which does not correctly autodetect maxprocs in containers).
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go
new file mode 100644
index 00000000..8a762b51
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/automaxprocs.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to
+// match the configured Linux CPU quota. Unlike the top-level automaxprocs
+// package, it lets the caller configure logging and handle errors.
+package automaxprocs
+
+import (
+ "os"
+ "runtime"
+)
+
+func init() {
+ Set()
+}
+
+const _maxProcsKey = "GOMAXPROCS"
+
+type config struct {
+ procs func(int, func(v float64) int) (int, CPUQuotaStatus, error)
+ minGOMAXPROCS int
+ roundQuotaFunc func(v float64) int
+}
+
+// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
+// any error encountered and an undo function.
+//
+// Set is a no-op on non-Linux systems and in Linux environments without a
+// configured CPU quota.
+func Set() error {
+ cfg := &config{
+ procs: CPUQuotaToGOMAXPROCS,
+ roundQuotaFunc: DefaultRoundFunc,
+ minGOMAXPROCS: 1,
+ }
+
+ // Honor the GOMAXPROCS environment variable if present. Otherwise, amend
+ // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is
+ // Linux, and guarantee a minimum value of 1. The minimum guaranteed value
+ // can be overridden using `maxprocs.Min()`.
+ if _, exists := os.LookupEnv(_maxProcsKey); exists {
+ return nil
+ }
+ maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc)
+ if err != nil {
+ return err
+ }
+ if status == CPUQuotaUndefined {
+ return nil
+ }
+ runtime.GOMAXPROCS(maxProcs)
+ return nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go
new file mode 100644
index 00000000..a4676933
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroup.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+// CGroup represents the data structure for a Linux control group.
+type CGroup struct {
+ path string
+}
+
+// NewCGroup returns a new *CGroup from a given path.
+func NewCGroup(path string) *CGroup {
+ return &CGroup{path: path}
+}
+
+// Path returns the path of the CGroup*.
+func (cg *CGroup) Path() string {
+ return cg.path
+}
+
+// ParamPath returns the path of the given cgroup param under itself.
+func (cg *CGroup) ParamPath(param string) string {
+ return filepath.Join(cg.path, param)
+}
+
+// readFirstLine reads the first line from a cgroup param file.
+func (cg *CGroup) readFirstLine(param string) (string, error) {
+ paramFile, err := os.Open(cg.ParamPath(param))
+ if err != nil {
+ return "", err
+ }
+ defer paramFile.Close()
+
+ scanner := bufio.NewScanner(paramFile)
+ if scanner.Scan() {
+ return scanner.Text(), nil
+ }
+ if err := scanner.Err(); err != nil {
+ return "", err
+ }
+ return "", io.ErrUnexpectedEOF
+}
+
+// readInt parses the first line from a cgroup param file as int.
+func (cg *CGroup) readInt(param string) (int, error) {
+ text, err := cg.readFirstLine(param)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.Atoi(text)
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go
new file mode 100644
index 00000000..ed384891
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups.go
@@ -0,0 +1,118 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+const (
+ // _cgroupFSType is the Linux CGroup file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupFSType = "cgroup"
+ // _cgroupSubsysCPU is the CPU CGroup subsystem.
+ _cgroupSubsysCPU = "cpu"
+ // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem.
+ _cgroupSubsysCPUAcct = "cpuacct"
+ // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem.
+ _cgroupSubsysCPUSet = "cpuset"
+ // _cgroupSubsysMemory is the Memory CGroup subsystem.
+ _cgroupSubsysMemory = "memory"
+
+ // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota
+ // parameter.
+ _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us"
+ // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period
+ // parameter.
+ _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us"
+)
+
+const (
+ _procPathCGroup = "/proc/self/cgroup"
+ _procPathMountInfo = "/proc/self/mountinfo"
+)
+
+// CGroups is a map that associates each CGroup with its subsystem name.
+type CGroups map[string]*CGroup
+
+// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files
+// under for some process under `/proc` file system (see also proc(5) for more
+// information).
+func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
+ cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ cgroups := make(CGroups)
+ newMountPoint := func(mp *MountPoint) error {
+ if mp.FSType != _cgroupFSType {
+ return nil
+ }
+
+ for _, opt := range mp.SuperOptions {
+ subsys, exists := cgroupSubsystems[opt]
+ if !exists {
+ continue
+ }
+
+ cgroupPath, err := mp.Translate(subsys.Name)
+ if err != nil {
+ return err
+ }
+ cgroups[opt] = NewCGroup(cgroupPath)
+ }
+
+ return nil
+ }
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return nil, err
+ }
+ return cgroups, nil
+}
+
+// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current
+// process.
+func NewCGroupsForCurrentProcess() (CGroups, error) {
+ return NewCGroups(_procPathMountInfo, _procPathCGroup)
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup controller.
+// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of
+// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`.
+func (cg CGroups) CPUQuota() (float64, bool, error) {
+ cpuCGroup, exists := cg[_cgroupSubsysCPU]
+ if !exists {
+ return -1, false, nil
+ }
+
+ cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
+ if defined := cfsQuotaUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
+ if defined := cfsPeriodUs > 0; err != nil || !defined {
+ return -1, defined, err
+ }
+
+ return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go
new file mode 100644
index 00000000..69a0be6b
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cgroups2.go
@@ -0,0 +1,176 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+)
+
+const (
+ // _cgroupv2CPUMax is the file name for the CGroup-V2 CPU max and period
+ // parameter.
+ _cgroupv2CPUMax = "cpu.max"
+ // _cgroupFSType is the Linux CGroup-V2 file system type used in
+ // `/proc/$PID/mountinfo`.
+ _cgroupv2FSType = "cgroup2"
+
+ _cgroupv2MountPoint = "/sys/fs/cgroup"
+
+ _cgroupV2CPUMaxDefaultPeriod = 100000
+ _cgroupV2CPUMaxQuotaMax = "max"
+)
+
+const (
+ _cgroupv2CPUMaxQuotaIndex = iota
+ _cgroupv2CPUMaxPeriodIndex
+)
+
+// ErrNotV2 indicates that the system is not using cgroups2.
+var ErrNotV2 = errors.New("not using cgroups2")
+
+// CGroups2 provides access to cgroups data for systems using cgroups2.
+type CGroups2 struct {
+ mountPoint string
+ groupPath string
+ cpuMaxFile string
+}
+
+// NewCGroups2ForCurrentProcess builds a CGroups2 for the current process.
+//
+// This returns ErrNotV2 if the system is not using cgroups2.
+func NewCGroups2ForCurrentProcess() (*CGroups2, error) {
+ return newCGroups2From(_procPathMountInfo, _procPathCGroup)
+}
+
+func newCGroups2From(mountInfoPath, procPathCGroup string) (*CGroups2, error) {
+ isV2, err := isCGroupV2(mountInfoPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isV2 {
+ return nil, ErrNotV2
+ }
+
+ subsystems, err := parseCGroupSubsystems(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+
+ // Find v2 subsystem by looking for the `0` id
+ var v2subsys *CGroupSubsys
+ for _, subsys := range subsystems {
+ if subsys.ID == 0 {
+ v2subsys = subsys
+ break
+ }
+ }
+
+ if v2subsys == nil {
+ return nil, ErrNotV2
+ }
+
+ return &CGroups2{
+ mountPoint: _cgroupv2MountPoint,
+ groupPath: v2subsys.Name,
+ cpuMaxFile: _cgroupv2CPUMax,
+ }, nil
+}
+
+func isCGroupV2(procPathMountInfo string) (bool, error) {
+ var (
+ isV2 bool
+ newMountPoint = func(mp *MountPoint) error {
+ isV2 = isV2 || (mp.FSType == _cgroupv2FSType && mp.MountPoint == _cgroupv2MountPoint)
+ return nil
+ }
+ )
+
+ if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
+ return false, err
+ }
+
+ return isV2, nil
+}
+
+// CPUQuota returns the CPU quota applied with the CPU cgroup2 controller.
+// It is a result of reading cpu quota and period from cpu.max file.
+// It will return `cpu.max / cpu.period`. If cpu.max is set to max, it returns
+// (-1, false, nil)
+func (cg *CGroups2) CPUQuota() (float64, bool, error) {
+ cpuMaxParams, err := os.Open(path.Join(cg.mountPoint, cg.groupPath, cg.cpuMaxFile))
+ if err != nil {
+ if os.IsNotExist(err) {
+ return -1, false, nil
+ }
+ return -1, false, err
+ }
+ defer cpuMaxParams.Close()
+
+ scanner := bufio.NewScanner(cpuMaxParams)
+ if scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ if len(fields) == 0 || len(fields) > 2 {
+ return -1, false, fmt.Errorf("invalid format")
+ }
+
+ if fields[_cgroupv2CPUMaxQuotaIndex] == _cgroupV2CPUMaxQuotaMax {
+ return -1, false, nil
+ }
+
+ max, err := strconv.Atoi(fields[_cgroupv2CPUMaxQuotaIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ var period int
+ if len(fields) == 1 {
+ period = _cgroupV2CPUMaxDefaultPeriod
+ } else {
+ period, err = strconv.Atoi(fields[_cgroupv2CPUMaxPeriodIndex])
+ if err != nil {
+ return -1, false, err
+ }
+
+ if period == 0 {
+ return -1, false, errors.New("zero value for period is not allowed")
+ }
+ }
+
+ return float64(max) / float64(period), true, nil
+ }
+
+ if err := scanner.Err(); err != nil {
+ return -1, false, err
+ }
+
+ return 0, false, io.ErrUnexpectedEOF
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go
new file mode 100644
index 00000000..2d83343b
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_linux.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "errors"
+)
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. The quota is converted from float to int using round.
+// If round == nil, DefaultRoundFunc is used.
+func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) {
+ if round == nil {
+ round = DefaultRoundFunc
+ }
+ cgroups, err := _newQueryer()
+ if err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ quota, defined, err := cgroups.CPUQuota()
+ if !defined || err != nil {
+ return -1, CPUQuotaUndefined, err
+ }
+
+ maxProcs := round(quota)
+ if minValue > 0 && maxProcs < minValue {
+ return minValue, CPUQuotaMinUsed, nil
+ }
+ return maxProcs, CPUQuotaUsed, nil
+}
+
+type queryer interface {
+ CPUQuota() (float64, bool, error)
+}
+
+var (
+ _newCgroups2 = NewCGroups2ForCurrentProcess
+ _newCgroups = NewCGroupsForCurrentProcess
+ _newQueryer = newQueryer
+)
+
+func newQueryer() (queryer, error) {
+ cgroups, err := _newCgroups2()
+ if err == nil {
+ return cgroups, nil
+ }
+ if errors.Is(err, ErrNotV2) {
+ return _newCgroups()
+ }
+ return nil, err
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go
new file mode 100644
index 00000000..d2d61e89
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/cpu_quota_unsupported.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !linux
+// +build !linux
+
+package automaxprocs
+
+// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
+// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
+// current OS.
+func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) {
+ return -1, CPUQuotaUndefined, nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go
new file mode 100644
index 00000000..2e235d7d
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/errors.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import "fmt"
+
+type cgroupSubsysFormatInvalidError struct {
+ line string
+}
+
+type mountPointFormatInvalidError struct {
+ line string
+}
+
+type pathNotExposedFromMountPointError struct {
+ mountPoint string
+ root string
+ path string
+}
+
+func (err cgroupSubsysFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line)
+}
+
+func (err mountPointFormatInvalidError) Error() string {
+ return fmt.Sprintf("invalid format for MountPoint: %q", err.line)
+}
+
+func (err pathNotExposedFromMountPointError) Error() string {
+ return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint)
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go
new file mode 100644
index 00000000..7c3fa306
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/mountpoint.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const (
+ _mountInfoSep = " "
+ _mountInfoOptsSep = ","
+ _mountInfoOptionalFieldsSep = "-"
+)
+
+const (
+ _miFieldIDMountID = iota
+ _miFieldIDParentID
+ _miFieldIDDeviceID
+ _miFieldIDRoot
+ _miFieldIDMountPoint
+ _miFieldIDOptions
+ _miFieldIDOptionalFields
+
+ _miFieldCountFirstHalf
+)
+
+const (
+ _miFieldOffsetFSType = iota
+ _miFieldOffsetMountSource
+ _miFieldOffsetSuperOptions
+
+ _miFieldCountSecondHalf
+)
+
+const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf
+
+// MountPoint is the data structure for the mount points in
+// `/proc/$PID/mountinfo`. See also proc(5) for more information.
+type MountPoint struct {
+ MountID int
+ ParentID int
+ DeviceID string
+ Root string
+ MountPoint string
+ Options []string
+ OptionalFields []string
+ FSType string
+ MountSource string
+ SuperOptions []string
+}
+
+// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and
+// returns a new *MountPoint.
+func NewMountPointFromLine(line string) (*MountPoint, error) {
+ fields := strings.Split(line, _mountInfoSep)
+
+ if len(fields) < _miFieldCountMin {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ mountID, err := strconv.Atoi(fields[_miFieldIDMountID])
+ if err != nil {
+ return nil, err
+ }
+
+ parentID, err := strconv.Atoi(fields[_miFieldIDParentID])
+ if err != nil {
+ return nil, err
+ }
+
+ for i, field := range fields[_miFieldIDOptionalFields:] {
+ if field == _mountInfoOptionalFieldsSep {
+ // End of optional fields.
+ fsTypeStart := _miFieldIDOptionalFields + i + 1
+
+ // Now we know where the optional fields end, split the line again with a
+ // limit to avoid issues with spaces in super options as present on WSL.
+ fields = strings.SplitN(line, _mountInfoSep, fsTypeStart+_miFieldCountSecondHalf)
+ if len(fields) != fsTypeStart+_miFieldCountSecondHalf {
+ return nil, mountPointFormatInvalidError{line}
+ }
+
+ miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart
+ miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart
+ miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart
+
+ return &MountPoint{
+ MountID: mountID,
+ ParentID: parentID,
+ DeviceID: fields[_miFieldIDDeviceID],
+ Root: fields[_miFieldIDRoot],
+ MountPoint: fields[_miFieldIDMountPoint],
+ Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep),
+ OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)],
+ FSType: fields[miFieldIDFSType],
+ MountSource: fields[miFieldIDMountSource],
+ SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep),
+ }, nil
+ }
+ }
+
+ return nil, mountPointFormatInvalidError{line}
+}
+
+// Translate converts an absolute path inside the *MountPoint's file system to
+// the host file system path in the mount namespace the *MountPoint belongs to.
+func (mp *MountPoint) Translate(absPath string) (string, error) {
+ relPath, err := filepath.Rel(mp.Root, absPath)
+
+ if err != nil {
+ return "", err
+ }
+ if relPath == ".." || strings.HasPrefix(relPath, "../") {
+ return "", pathNotExposedFromMountPointError{
+ mountPoint: mp.MountPoint,
+ root: mp.Root,
+ path: absPath,
+ }
+ }
+
+ return filepath.Join(mp.MountPoint, relPath), nil
+}
+
+// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
+// and yields parsed *MountPoint into newMountPoint.
+func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
+ mountInfoFile, err := os.Open(procPathMountInfo)
+ if err != nil {
+ return err
+ }
+ defer mountInfoFile.Close()
+
+ scanner := bufio.NewScanner(mountInfoFile)
+
+ for scanner.Scan() {
+ mountPoint, err := NewMountPointFromLine(scanner.Text())
+ if err != nil {
+ return err
+ }
+ if err := newMountPoint(mountPoint); err != nil {
+ return err
+ }
+ }
+
+ return scanner.Err()
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go
new file mode 100644
index 00000000..b8ec7e50
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/runtime.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package automaxprocs
+
+import "math"
+
+// CPUQuotaStatus presents the status of how CPU quota is used
+type CPUQuotaStatus int
+
+const (
+ // CPUQuotaUndefined is returned when CPU quota is undefined
+ CPUQuotaUndefined CPUQuotaStatus = iota
+ // CPUQuotaUsed is returned when a valid CPU quota can be used
+ CPUQuotaUsed
+ // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
+ CPUQuotaMinUsed
+)
+
+// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor).
+func DefaultRoundFunc(v float64) int {
+ return int(math.Floor(v))
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go
new file mode 100644
index 00000000..881ebd59
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/automaxprocs/subsys.go
@@ -0,0 +1,103 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build linux
+// +build linux
+
+package automaxprocs
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ _cgroupSep = ":"
+ _cgroupSubsysSep = ","
+)
+
+const (
+ _csFieldIDID = iota
+ _csFieldIDSubsystems
+ _csFieldIDName
+ _csFieldCount
+)
+
+// CGroupSubsys represents the data structure for entities in
+// `/proc/$PID/cgroup`. See also proc(5) for more information.
+type CGroupSubsys struct {
+ ID int
+ Subsystems []string
+ Name string
+}
+
+// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in
+// the format of `/proc/$PID/cgroup`
+func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) {
+ fields := strings.SplitN(line, _cgroupSep, _csFieldCount)
+
+ if len(fields) != _csFieldCount {
+ return nil, cgroupSubsysFormatInvalidError{line}
+ }
+
+ id, err := strconv.Atoi(fields[_csFieldIDID])
+ if err != nil {
+ return nil, err
+ }
+
+ cgroup := &CGroupSubsys{
+ ID: id,
+ Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep),
+ Name: fields[_csFieldIDName],
+ }
+
+ return cgroup, nil
+}
+
+// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`)
+// and returns a new map[string]*CGroupSubsys.
+func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) {
+ cgroupFile, err := os.Open(procPathCGroup)
+ if err != nil {
+ return nil, err
+ }
+ defer cgroupFile.Close()
+
+ scanner := bufio.NewScanner(cgroupFile)
+ subsystems := make(map[string]*CGroupSubsys)
+
+ for scanner.Scan() {
+ cgroup, err := NewCGroupSubsysFromLine(scanner.Text())
+ if err != nil {
+ return nil, err
+ }
+ for _, subsys := range cgroup.Subsystems {
+ subsystems[subsys] = cgroup
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ return subsystems, nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
index fd172608..3021dfec 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command {
var errors []error
cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
-
buildSpecs(args, cliConfig, goFlagsConfig)
},
}
@@ -44,7 +43,7 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
internal.VerifyCLIAndFrameworkVersion(suites)
opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
- opc.StartCompiling(suites, goFlagsConfig)
+ opc.StartCompiling(suites, goFlagsConfig, true)
for {
suiteIdx, suite := opc.Next()
@@ -55,18 +54,22 @@ func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.Go
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error())
} else {
- if len(goFlagsConfig.O) == 0 {
- goFlagsConfig.O = path.Join(suite.Path, suite.PackageName+".test")
- } else {
+ var testBinPath string
+ if len(goFlagsConfig.O) != 0 {
stat, err := os.Stat(goFlagsConfig.O)
if err != nil {
panic(err)
}
if stat.IsDir() {
- goFlagsConfig.O += "/" + suite.PackageName + ".test"
+ testBinPath = goFlagsConfig.O + "/" + suite.PackageName + ".test"
+ } else {
+ testBinPath = goFlagsConfig.O
}
}
- fmt.Printf("Compiled %s\n", goFlagsConfig.O)
+ if len(testBinPath) == 0 {
+ testBinPath = path.Join(suite.Path, suite.PackageName+".test")
+ }
+ fmt.Printf("Compiled %s\n", testBinPath)
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
index 2efd2860..f0e7331f 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -12,7 +12,7 @@ func Abort(details AbortDetails) {
panic(details)
}
-func AbortGracefullyWith(format string, args ...interface{}) {
+func AbortGracefullyWith(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 0,
Error: fmt.Errorf(format, args...),
@@ -20,7 +20,7 @@ func AbortGracefullyWith(format string, args ...interface{}) {
})
}
-func AbortWith(format string, args ...interface{}) {
+func AbortWith(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 1,
Error: fmt.Errorf(format, args...),
@@ -28,7 +28,7 @@ func AbortWith(format string, args ...interface{}) {
})
}
-func AbortWithUsage(format string, args ...interface{}) {
+func AbortWithUsage(format string, args ...any) {
Abort(AbortDetails{
ExitCode: 1,
Error: fmt.Errorf(format, args...),
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
index 12e0e565..79b83a3a 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -24,7 +24,11 @@ func (c Command) Run(args []string, additionalArgs []string) {
if err != nil {
AbortWithUsage(err.Error())
}
-
+ for _, arg := range args {
+ if len(arg) > 1 && strings.HasPrefix(arg, "-") {
+ AbortWith(types.GinkgoErrors.FlagAfterPositionalParameter().Error())
+ }
+ }
c.Command(args, additionalArgs)
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
index 88dd8d6b..c3f6d3a1 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -68,7 +68,6 @@ func (p Program) RunAndExit(osArgs []string) {
fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
}
p.Exiter(exitCode)
- return
}()
args, additionalArgs := []string{}, []string{}
@@ -157,7 +156,6 @@ func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
p.EmitUsage(writer)
Abort(AbortDetails{ExitCode: 1})
}
- return
}
func (p Program) EmitUsage(writer io.Writer) {
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
index 48827cc5..7bbe6be0 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -11,7 +11,7 @@ import (
"github.com/onsi/ginkgo/v2/types"
)
-func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) TestSuite {
if suite.PathToCompiledTest != "" {
return suite
}
@@ -46,7 +46,7 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite
suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
return suite
}
- args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath)
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, "./", pathToInvocationPath, preserveSymbols)
if err != nil {
suite.State = TestSuiteStateFailedToCompile
suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
@@ -120,7 +120,7 @@ func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
}
}
-func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig, preserveSymbols bool) {
opc.stopped = false
opc.idx = 0
opc.numSuites = len(suites)
@@ -135,7 +135,7 @@ func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsCon
stopped := opc.stopped
opc.mutex.Unlock()
if !stopped {
- suite = CompileSuite(suite, goFlagsConfig)
+ suite = CompileSuite(suite, goFlagsConfig, preserveSymbols)
}
c <- suite
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
index 3c5079ff..87cfa111 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -89,7 +89,7 @@ func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int)
}
i := 0
- if sortFunc(i) != true {
+ if !sortFunc(i) {
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
index 8e16d2bb..f3439a3f 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -90,6 +90,9 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
if reporterConfig.JSONReport != "" {
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
}
+ if reporterConfig.GoJSONReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.GoJSONReport, GenerateFunc: reporters.GenerateGoTestJSONReport, MergeFunc: reporters.MergeAndCleanupGoTestJSONReports})
+ }
if reporterConfig.JUnitReport != "" {
reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
index 41052ea1..30d8096c 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
@@ -107,6 +107,9 @@ func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig t
if reporterConfig.JSONReport != "" {
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
}
+ if reporterConfig.GoJSONReport != "" {
+ reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0)
+ }
if reporterConfig.JUnitReport != "" {
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
}
@@ -179,6 +182,9 @@ func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig
if reporterConfig.JSONReport != "" {
reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
}
+ if reporterConfig.GoJSONReport != "" {
+ reporterConfig.GoJSONReport = AbsPathForGeneratedAsset(reporterConfig.GoJSONReport, suite, cliConfig, 0)
+ }
if reporterConfig.JUnitReport != "" {
reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
index e9abb27d..419589b4 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -3,7 +3,6 @@ package main
import (
"fmt"
"os"
-
"github.com/onsi/ginkgo/v2/ginkgo/build"
"github.com/onsi/ginkgo/v2/ginkgo/command"
"github.com/onsi/ginkgo/v2/ginkgo/generators"
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
index aaed4d57..03875b97 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -107,7 +107,7 @@ OUTER_LOOP:
}
opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
- opc.StartCompiling(suites, r.goFlagsConfig)
+ opc.StartCompiling(suites, r.goFlagsConfig, false)
SUITE_LOOP:
for {
@@ -142,7 +142,7 @@ OUTER_LOOP:
}
if !endTime.IsZero() {
- r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ r.suiteConfig.Timeout = time.Until(endTime)
if r.suiteConfig.Timeout <= 0 {
suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
opc.StopAndDrain()
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
index a34d9435..75cbdb49 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
@@ -2,12 +2,9 @@ package watch
import (
"go/build"
- "regexp"
+ "strings"
)
-var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
-var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
-
type Dependencies struct {
deps map[string]int
}
@@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) {
if err != nil {
continue
}
- if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
+ if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) {
d.addDepIfNotPresent(pkg.Dir, depth)
}
}
@@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
d.deps[dep] = depth
}
}
+
+func matchesGinkgoOrGomega(s string) bool {
+ return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega")
+}
+
+func matchesGinkgoIntegration(s string) bool {
+ return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
index bde4193c..fe1ca305 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -153,7 +153,7 @@ func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
}
func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
- suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ suite = internal.CompileSuite(suite, w.goFlagsConfig, false)
if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
fmt.Println(suite.CompilationError.Error())
return suite
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
index 02c6739e..40d1e1ab 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -1,6 +1,8 @@
package ginkgo
import (
+ "context"
+ "io"
"testing"
"github.com/onsi/ginkgo/v2/internal/testingtproxy"
@@ -48,6 +50,8 @@ The portion of the interface returned by GinkgoT() that maps onto methods in the
*/
type GinkgoTInterface interface {
Cleanup(func())
+ Chdir(dir string)
+ Context() context.Context
Setenv(kev, value string)
Error(args ...any)
Errorf(format string, args ...any)
@@ -66,6 +70,8 @@ type GinkgoTInterface interface {
Skipf(format string, args ...any)
Skipped() bool
TempDir() string
+ Attr(key, value string)
+ Output() io.Writer
}
/*
@@ -127,6 +133,12 @@ type GinkgoTBWrapper struct {
func (g *GinkgoTBWrapper) Cleanup(f func()) {
g.GinkgoT.Cleanup(f)
}
+func (g *GinkgoTBWrapper) Chdir(dir string) {
+ g.GinkgoT.Chdir(dir)
+}
+func (g *GinkgoTBWrapper) Context() context.Context {
+ return g.GinkgoT.Context()
+}
func (g *GinkgoTBWrapper) Error(args ...any) {
g.GinkgoT.Error(args...)
}
@@ -178,3 +190,9 @@ func (g *GinkgoTBWrapper) Skipped() bool {
func (g *GinkgoTBWrapper) TempDir() string {
return g.GinkgoT.TempDir()
}
+func (g *GinkgoTBWrapper) Attr(key, value string) {
+ g.GinkgoT.Attr(key, value)
+}
+func (g *GinkgoTBWrapper) Output() io.Writer {
+ return g.GinkgoT.Output()
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go
new file mode 100644
index 00000000..c9657102
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go
@@ -0,0 +1,34 @@
+package internal
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func ComputeAroundNodes(specs Specs) Specs {
+ out := Specs{}
+ for _, spec := range specs {
+ nodes := Nodes{}
+ currentNestingLevel := 0
+ aroundNodes := types.AroundNodes{}
+ nestingLevelIndices := []int{}
+ for _, node := range spec.Nodes {
+ switch node.NodeType {
+ case types.NodeTypeContainer:
+ currentNestingLevel = node.NestingLevel + 1
+ nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes))
+ aroundNodes = aroundNodes.Append(node.AroundNodes...)
+ nodes = append(nodes, node)
+ default:
+ if currentNestingLevel > node.NestingLevel {
+ currentNestingLevel = node.NestingLevel
+ aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]]
+ }
+ node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...)
+ nodes = append(nodes, node)
+ }
+ }
+ spec.Nodes = nodes
+ out = append(out, spec)
+ }
+ return out
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/failer.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
index e9bd9565..8c5de9c1 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -32,7 +32,7 @@ func (f *Failer) GetFailure() types.Failure {
return f.failure
}
-func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
+func (f *Failer) Panic(location types.CodeLocation, forwardedPanic any) {
f.lock.Lock()
defer f.lock.Unlock()
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
index e3da7d14..a39daf5a 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
@@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic
*Note:* specs with pending nodes are Skipped when created by NewSpec.
*/
-func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
+func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) {
focusString := strings.Join(suiteConfig.FocusStrings, "|")
skipString := strings.Join(suiteConfig.SkipStrings, "|")
@@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit
})
}
+ if suiteConfig.SemVerFilter != "" {
+ semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter)
+ skipChecks = append(skipChecks, func(spec Spec) bool {
+ return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints()))
+ })
+ }
+
if len(suiteConfig.FocusFiles) > 0 {
focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/group.go
index 02c9fe4f..cc794903 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/group.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -110,21 +110,53 @@ func newGroup(suite *Suite) *group {
}
}
+// initialReportForSpec constructs a new SpecReport right before running the spec.
func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
return types.SpecReport{
- ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
- ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
- ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
- LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
- LeafNodeType: types.NodeTypeIt,
- LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
- LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
- ParallelProcess: g.suite.config.ParallelProcess,
- RunningInParallel: g.suite.isRunningInParallel(),
- IsSerial: spec.Nodes.HasNodeMarkedSerial(),
- IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
- MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
- MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
+ ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
+ ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
+ ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(),
+ LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
+ LeafNodeType: types.NodeTypeIt,
+ LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
+ LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
+ LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints),
+ ParallelProcess: g.suite.config.ParallelProcess,
+ RunningInParallel: g.suite.isRunningInParallel(),
+ IsSerial: spec.Nodes.HasNodeMarkedSerial(),
+ IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
+ MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
+ MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ SpecPriority: spec.Nodes.GetSpecPriority(),
+ }
+}
+
+// constructionNodeReportForTreeNode constructs a new SpecReport right before invoking the body
+// of a container node during construction of the full tree.
+func constructionNodeReportForTreeNode(node *TreeNode) *types.ConstructionNodeReport {
+ var report types.ConstructionNodeReport
+ // Walk up the tree and set attributes accordingly.
+ addNodeToReportForNode(&report, node)
+ return &report
+}
+
+// addNodeToReportForNode is conceptually similar to initialReportForSpec and therefore placed here
+// although it doesn't do anything with a group.
+func addNodeToReportForNode(report *types.ConstructionNodeReport, node *TreeNode) {
+ if node.Parent != nil {
+ // First add the parent node, then the current one.
+ addNodeToReportForNode(report, node.Parent)
+ }
+ report.ContainerHierarchyTexts = append(report.ContainerHierarchyTexts, node.Node.Text)
+ report.ContainerHierarchyLocations = append(report.ContainerHierarchyLocations, node.Node.CodeLocation)
+ report.ContainerHierarchyLabels = append(report.ContainerHierarchyLabels, node.Node.Labels)
+ report.ContainerHierarchySemVerConstraints = append(report.ContainerHierarchySemVerConstraints, node.Node.SemVerConstraints)
+ if node.Node.MarkedSerial {
+ report.IsSerial = true
+ }
+ if node.Node.MarkedOrdered {
+ report.IsInOrderedContainer = true
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
index 8ed86111..79bfa87d 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -40,7 +40,7 @@ func (ic InterruptCause) String() string {
}
type InterruptStatus struct {
- Channel chan interface{}
+ Channel chan any
Level InterruptLevel
Cause InterruptCause
}
@@ -62,14 +62,14 @@ type InterruptHandlerInterface interface {
}
type InterruptHandler struct {
- c chan interface{}
+ c chan any
lock *sync.Mutex
level InterruptLevel
cause InterruptCause
client parallel_support.Client
- stop chan interface{}
+ stop chan any
signals []os.Signal
- requestAbortCheck chan interface{}
+ requestAbortCheck chan any
}
func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
@@ -77,10 +77,10 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *
signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
}
handler := &InterruptHandler{
- c: make(chan interface{}),
+ c: make(chan any),
lock: &sync.Mutex{},
- stop: make(chan interface{}),
- requestAbortCheck: make(chan interface{}),
+ stop: make(chan any),
+ requestAbortCheck: make(chan any),
client: client,
signals: signals,
}
@@ -98,9 +98,9 @@ func (handler *InterruptHandler) registerForInterrupts() {
signal.Notify(signalChannel, handler.signals...)
// cross-process abort handling
- var abortChannel chan interface{}
+ var abortChannel chan any
if handler.client != nil {
- abortChannel = make(chan interface{})
+ abortChannel = make(chan any)
go func() {
pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
for {
@@ -125,7 +125,7 @@ func (handler *InterruptHandler) registerForInterrupts() {
}()
}
- go func(abortChannel chan interface{}) {
+ go func(abortChannel chan any) {
var interruptCause InterruptCause
for {
select {
@@ -151,7 +151,7 @@ func (handler *InterruptHandler) registerForInterrupts() {
}
if handler.level != oldLevel {
close(handler.c)
- handler.c = make(chan interface{})
+ handler.c = make(chan any)
}
handler.lock.Unlock()
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/node.go
index 0686f741..2bccec2d 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/node.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"reflect"
+ "slices"
"sort"
"sync"
"time"
@@ -46,20 +47,24 @@ type Node struct {
ReportEachBody func(SpecContext, types.SpecReport)
ReportSuiteBody func(SpecContext, types.Report)
- MarkedFocus bool
- MarkedPending bool
- MarkedSerial bool
- MarkedOrdered bool
- MarkedContinueOnFailure bool
- MarkedOncePerOrdered bool
- FlakeAttempts int
- MustPassRepeatedly int
- Labels Labels
- PollProgressAfter time.Duration
- PollProgressInterval time.Duration
- NodeTimeout time.Duration
- SpecTimeout time.Duration
- GracePeriod time.Duration
+ MarkedFocus bool
+ MarkedPending bool
+ MarkedSerial bool
+ MarkedOrdered bool
+ MarkedContinueOnFailure bool
+ MarkedOncePerOrdered bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ Labels Labels
+ SemVerConstraints SemVerConstraints
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ NodeTimeout time.Duration
+ SpecTimeout time.Duration
+ GracePeriod time.Duration
+ AroundNodes types.AroundNodes
+ HasExplicitlySetSpecPriority bool
+ SpecPriority int
NodeIDWhereCleanupWasGenerated uint
}
@@ -84,35 +89,51 @@ const SuppressProgressReporting = suppressProgressReporting(true)
type FlakeAttempts uint
type MustPassRepeatedly uint
type Offset uint
-type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
-type Labels []string
+type Done chan<- any // Deprecated Done Channel for asynchronous testing
type PollProgressInterval time.Duration
type PollProgressAfter time.Duration
type NodeTimeout time.Duration
type SpecTimeout time.Duration
type GracePeriod time.Duration
+type SpecPriority int
+
+type Labels []string
func (l Labels) MatchesLabelFilter(query string) bool {
return types.MustParseLabelFilter(query)(l)
}
-func UnionOfLabels(labels ...Labels) Labels {
- out := Labels{}
- seen := map[string]bool{}
- for _, labelSet := range labels {
- for _, label := range labelSet {
- if !seen[label] {
- seen[label] = true
- out = append(out, label)
+type SemVerConstraints []string
+
+func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool {
+ return types.MustParseSemVerFilter(version)(svc)
+}
+
+func unionOf[S ~[]E, E comparable](slices ...S) S {
+ out := S{}
+ seen := map[E]bool{}
+ for _, slice := range slices {
+ for _, item := range slice {
+ if !seen[item] {
+ seen[item] = true
+ out = append(out, item)
}
}
}
return out
}
-func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
- decorations := []interface{}{}
- remainingArgs := []interface{}{}
+func UnionOfLabels(labels ...Labels) Labels {
+ return unionOf(labels...)
+}
+
+func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints {
+ return unionOf(semVerConstraints...)
+}
+
+func PartitionDecorations(args ...any) ([]any, []any) {
+ decorations := []any{}
+ remainingArgs := []any{}
for _, arg := range args {
if isDecoration(arg) {
decorations = append(decorations, arg)
@@ -123,7 +144,7 @@ func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
return decorations, remainingArgs
}
-func isDecoration(arg interface{}) bool {
+func isDecoration(arg any) bool {
switch t := reflect.TypeOf(arg); {
case t == nil:
return false
@@ -151,6 +172,8 @@ func isDecoration(arg interface{}) bool {
return true
case t == reflect.TypeOf(Labels{}):
return true
+ case t == reflect.TypeOf(SemVerConstraints{}):
+ return true
case t == reflect.TypeOf(PollProgressInterval(0)):
return true
case t == reflect.TypeOf(PollProgressAfter(0)):
@@ -161,6 +184,10 @@ func isDecoration(arg interface{}) bool {
return true
case t == reflect.TypeOf(GracePeriod(0)):
return true
+ case t == reflect.TypeOf(types.AroundNodeDecorator{}):
+ return true
+ case t == reflect.TypeOf(SpecPriority(0)):
+ return true
case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
return true
default:
@@ -168,7 +195,7 @@ func isDecoration(arg interface{}) bool {
}
}
-func isSliceOfDecorations(slice interface{}) bool {
+func isSliceOfDecorations(slice any) bool {
vSlice := reflect.ValueOf(slice)
if vSlice.Len() == 0 {
return false
@@ -184,13 +211,14 @@ func isSliceOfDecorations(slice interface{}) bool {
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
-func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (Node, []error) {
baseOffset := 2
node := Node{
ID: UniqueNodeID(),
NodeType: nodeType,
Text: text,
Labels: Labels{},
+ SemVerConstraints: SemVerConstraints{},
CodeLocation: types.NewCodeLocation(baseOffset),
NestingLevel: -1,
PollProgressAfter: -1,
@@ -205,9 +233,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
}
- args = unrollInterfaceSlice(args)
+ args = UnrollInterfaceSlice(args)
- remainingArgs := []interface{}{}
+ remainingArgs := []any{}
// First get the CodeLocation up-to-date
for _, arg := range args {
switch v := arg.(type) {
@@ -221,9 +249,10 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
}
labelsSeen := map[string]bool{}
+ semVerConstraintsSeen := map[string]bool{}
trackedFunctionError := false
args = remainingArgs
- remainingArgs = []interface{}{}
+ remainingArgs = []any{}
// now process the rest of the args
for _, arg := range args {
switch t := reflect.TypeOf(arg); {
@@ -299,6 +328,14 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
if nodeType.Is(types.NodeTypeContainer) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
}
+ case t == reflect.TypeOf(SpecPriority(0)):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecPriority"))
+ }
+ node.SpecPriority = int(arg.(SpecPriority))
+ node.HasExplicitlySetSpecPriority = true
+ case t == reflect.TypeOf(types.AroundNodeDecorator{}):
+ node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator))
case t == reflect.TypeOf(Labels{}):
if !nodeType.Is(types.NodeTypesForContainerAndIt) {
appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
@@ -311,6 +348,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
appendError(err)
}
}
+ case t == reflect.TypeOf(SemVerConstraints{}):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint"))
+ }
+ for _, semVerConstraint := range arg.(SemVerConstraints) {
+ if !semVerConstraintsSeen[semVerConstraint] {
+ semVerConstraintsSeen[semVerConstraint] = true
+ semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation)
+ node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint)
+ appendError(err)
+ }
+ }
case t.Kind() == reflect.Func:
if nodeType.Is(types.NodeTypeContainer) {
if node.Body != nil {
@@ -451,7 +500,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
var doneType = reflect.TypeOf(make(Done))
-func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg any) (func(SpecContext), bool) {
t := reflect.TypeOf(arg)
if t.NumOut() > 0 || t.NumIn() > 1 {
return nil, false
@@ -477,7 +526,7 @@ func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.
var byteType = reflect.TypeOf([]byte{})
-func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+func extractSynchronizedBeforeSuiteProc1Body(arg any) (func(SpecContext) []byte, bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
@@ -505,7 +554,7 @@ func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext)
}, hasContext
}
-func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+func extractSynchronizedBeforeSuiteAllProcsBody(arg any) (func(SpecContext, []byte), bool) {
t := reflect.TypeOf(arg)
v := reflect.ValueOf(arg)
hasContext, hasByte := false, false
@@ -536,11 +585,11 @@ func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecConte
var errInterface = reflect.TypeOf((*error)(nil)).Elem()
-func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...any) (Node, []error) {
decorations, remainingArgs := PartitionDecorations(args...)
baseOffset := 2
cl := types.NewCodeLocation(baseOffset)
- finalArgs := []interface{}{}
+ finalArgs := []any{}
for _, arg := range decorations {
switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(Offset(0)):
@@ -599,7 +648,7 @@ func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(stri
})
}
- return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
+ return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs)
}
func (n Node) IsZero() bool {
@@ -824,6 +873,32 @@ func (n Nodes) UnionOfLabels() []string {
return out
}
+func (n Nodes) SemVerConstraints() [][]string {
+ out := make([][]string, len(n))
+ for i := range n {
+ if n[i].SemVerConstraints == nil {
+ out[i] = []string{}
+ } else {
+ out[i] = []string(n[i].SemVerConstraints)
+ }
+ }
+ return out
+}
+
+func (n Nodes) UnionOfSemVerConstraints() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for i := range n {
+ for _, constraint := range n[i].SemVerConstraints {
+ if !seen[constraint] {
+ seen[constraint] = true
+ out = append(out, constraint)
+ }
+ }
+ }
+ return out
+}
+
func (n Nodes) CodeLocations() []types.CodeLocation {
out := make([]types.CodeLocation, len(n))
for i := range n {
@@ -920,19 +995,84 @@ func (n Nodes) GetMaxMustPassRepeatedly() int {
return maxMustPassRepeatedly
}
-func unrollInterfaceSlice(args interface{}) []interface{} {
+func (n Nodes) GetSpecPriority() int {
+ for i := len(n) - 1; i >= 0; i-- {
+ if n[i].HasExplicitlySetSpecPriority {
+ return n[i].SpecPriority
+ }
+ }
+ return 0
+}
+
+func UnrollInterfaceSlice(args any) []any {
v := reflect.ValueOf(args)
if v.Kind() != reflect.Slice {
- return []interface{}{args}
+ return []any{args}
}
- out := []interface{}{}
+ out := []any{}
for i := 0; i < v.Len(); i++ {
el := reflect.ValueOf(v.Index(i).Interface())
- if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
- out = append(out, unrollInterfaceSlice(el.Interface())...)
+ if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) {
+ out = append(out, UnrollInterfaceSlice(el.Interface())...)
} else {
out = append(out, v.Index(i).Interface())
}
}
return out
}
+
+type NodeArgsTransformer func(nodeType types.NodeType, offset Offset, text string, args []any) (string, []any, []error)
+
+func AddTreeConstructionNodeArgsTransformer(transformer NodeArgsTransformer) func() {
+ id := nodeArgsTransformerCounter
+ nodeArgsTransformerCounter++
+ nodeArgsTransformers = append(nodeArgsTransformers, registeredNodeArgsTransformer{id, transformer})
+ return func() {
+ nodeArgsTransformers = slices.DeleteFunc(nodeArgsTransformers, func(transformer registeredNodeArgsTransformer) bool {
+ return transformer.id == id
+ })
+ }
+}
+
+var (
+ nodeArgsTransformerCounter int64
+ nodeArgsTransformers []registeredNodeArgsTransformer
+)
+
+type registeredNodeArgsTransformer struct {
+ id int64
+ transformer NodeArgsTransformer
+}
+
+// TransformNewNodeArgs is the helper for DSL functions which handles NodeArgsTransformers.
+//
+// Its return valus are intentionally the same as the internal.NewNode parameters,
+// which makes it possible to chain the invocations:
+//
+// NewNode(transformNewNodeArgs(...))
+func TransformNewNodeArgs(exitIfErrors func([]error), deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...any) (*types.DeprecationTracker, types.NodeType, string, []any) {
+ var errs []error
+
+ // Most recent first...
+ //
+ // This intentionally doesn't use slices.Backward because
+ // using iterators influences stack unwinding.
+ for i := len(nodeArgsTransformers) - 1; i >= 0; i-- {
+ transformer := nodeArgsTransformers[i].transformer
+ args = UnrollInterfaceSlice(args)
+
+ // We do not really need to recompute this on additional loop iterations,
+ // but its fast and simpler this way.
+ var offset Offset
+ for _, arg := range args {
+ if o, ok := arg.(Offset); ok {
+ offset = o
+ }
+ }
+ offset += 3 // The DSL function, this helper, and the TransformNodeArgs implementation.
+
+ text, args, errs = transformer(nodeType, offset, text, args)
+ exitIfErrors(errs)
+ }
+ return deprecationTracker, nodeType, text, args
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
index 84eea0a5..da58d54f 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
@@ -125,7 +125,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
// pick out a representative spec
representativeSpec := specs[executionGroups[groupID][0]]
- // and grab the node on the spec that will represent which shufflable group this execution group belongs tu
+ // and grab the node on the spec that will represent which shufflable group this execution group belongs to
shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
//add the execution group to its shufflable group
@@ -138,14 +138,35 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices,
}
}
+ // now, for each shuffleable group, we compute the priority
+ shufflableGroupingIDPriorities := map[uint]int{}
+ for shufflableGroupingID, groupIDs := range shufflableGroupingIDToGroupIDs {
+ // the priority of a shufflable grouping is the max priority of any spec in any execution group in the shufflable grouping
+ maxPriority := -1 << 31 // min int
+ for _, groupID := range groupIDs {
+ for _, specIdx := range executionGroups[groupID] {
+ specPriority := specs[specIdx].Nodes.GetSpecPriority()
+ maxPriority = max(specPriority, maxPriority)
+ }
+ }
+ shufflableGroupingIDPriorities[shufflableGroupingID] = maxPriority
+ }
+
// now we permute the sorted shufflable grouping IDs and build the ordered Groups
- orderedGroups := GroupedSpecIndices{}
permutation := r.Perm(len(shufflableGroupingIDs))
- for _, j := range permutation {
- //let's get the execution group IDs for this shufflable group:
- executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
- // and we'll add their associated specindices to the orderedGroups slice:
- for _, executionGroupID := range executionGroupIDsForJ {
+ shuffledGroupingIds := make([]uint, len(shufflableGroupingIDs))
+ for i, j := range permutation {
+ shuffledGroupingIds[i] = shufflableGroupingIDs[j]
+ }
+ // now, we need to stable sort the shuffledGroupingIds by priority (higher priority first)
+ sort.SliceStable(shuffledGroupingIds, func(i, j int) bool {
+ return shufflableGroupingIDPriorities[shuffledGroupingIds[i]] > shufflableGroupingIDPriorities[shuffledGroupingIds[j]]
+ })
+
+ // we can now take these prioritized, shuffled, groupings and form the final set of ordered spec groups
+ orderedGroups := GroupedSpecIndices{}
+ for _, id := range shuffledGroupingIds {
+ for _, executionGroupID := range shufflableGroupingIDToGroupIDs[id] {
orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
index 4a1c0946..5598f15c 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -69,7 +69,7 @@ type pipePair struct {
writer *os.File
}
-func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan any) {
for {
//make the next pipe...
pair := pipePair{}
@@ -101,8 +101,8 @@ type genericOutputInterceptor struct {
stderrClone *os.File
pipe pipePair
- shutdown chan interface{}
- emergencyBailout chan interface{}
+ shutdown chan any
+ emergencyBailout chan any
pipeChannel chan pipePair
interceptedContent chan string
@@ -139,7 +139,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
interceptor.intercepting = true
if interceptor.stdoutClone == nil {
interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
- interceptor.shutdown = make(chan interface{})
+ interceptor.shutdown = make(chan any)
go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
}
@@ -147,13 +147,13 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
// we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
interceptor.pipe = <-interceptor.pipeChannel
- interceptor.emergencyBailout = make(chan interface{})
+ interceptor.emergencyBailout = make(chan any)
//Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
go func() {
buffer := &bytes.Buffer{}
destination := io.MultiWriter(buffer, interceptor.forwardTo)
- copyFinished := make(chan interface{})
+ copyFinished := make(chan any)
reader := interceptor.pipe.reader
go func() {
io.Copy(destination, reader)
@@ -224,7 +224,7 @@ func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
- shutdown: make(chan interface{}),
+ shutdown: make(chan any),
implementation: &osGlobalReassigningOutputInterceptorImpl{},
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
index 8a237f44..e0f1431d 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -13,7 +13,7 @@ func NewOutputInterceptor() OutputInterceptor {
return &genericOutputInterceptor{
interceptedContent: make(chan string),
pipeChannel: make(chan pipePair),
- shutdown: make(chan interface{}),
+ shutdown: make(chan any),
implementation: &dupSyscallOutputInterceptorImpl{},
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
index b3cd6429..4234d802 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -30,7 +30,7 @@ type Server interface {
Close()
Address() string
RegisterAlive(node int, alive func() bool)
- GetSuiteDone() chan interface{}
+ GetSuiteDone() chan any
GetOutputDestination() io.Writer
SetOutputDestination(io.Writer)
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
index 6547c7a6..4aa10ae4 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -34,7 +34,7 @@ func (client *httpClient) Close() error {
return nil
}
-func (client *httpClient) post(path string, data interface{}) error {
+func (client *httpClient) post(path string, data any) error {
var body io.Reader
if data != nil {
encoded, err := json.Marshal(data)
@@ -54,7 +54,7 @@ func (client *httpClient) post(path string, data interface{}) error {
return nil
}
-func (client *httpClient) poll(path string, data interface{}) error {
+func (client *httpClient) poll(path string, data any) error {
for {
resp, err := http.Get(client.serverHost + path)
if err != nil {
@@ -153,10 +153,7 @@ func (client *httpClient) PostAbort() error {
func (client *httpClient) ShouldAbort() bool {
err := client.poll("/abort", nil)
- if err == ErrorGone {
- return true
- }
- return false
+ return err == ErrorGone
}
func (client *httpClient) Write(p []byte) (int, error) {
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
index d2c71ab1..8a1b7a5b 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -75,7 +75,7 @@ func (server *httpServer) Address() string {
return "http://" + server.listener.Addr().String()
}
-func (server *httpServer) GetSuiteDone() chan interface{} {
+func (server *httpServer) GetSuiteDone() chan any {
return server.handler.done
}
@@ -96,7 +96,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) {
//
// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
-func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object any) bool {
defer request.Body.Close()
if json.NewDecoder(request.Body).Decode(object) != nil {
writer.WriteHeader(http.StatusBadRequest)
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
index 59e8e6fd..bb4675a0 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -35,7 +35,7 @@ func (client *rpcClient) Close() error {
return client.client.Close()
}
-func (client *rpcClient) poll(method string, data interface{}) error {
+func (client *rpcClient) poll(method string, data any) error {
for {
err := client.client.Call(method, voidSender, data)
if err == nil {
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
index 2620fd56..1574f99a 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -25,7 +25,7 @@ type RPCServer struct {
handler *ServerHandler
}
-//Create a new server, automatically selecting a port
+// Create a new server, automatically selecting a port
func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
@@ -37,7 +37,7 @@ func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, e
}, nil
}
-//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
func (server *RPCServer) Start() {
rpcServer := rpc.NewServer()
rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
@@ -48,17 +48,17 @@ func (server *RPCServer) Start() {
go httpServer.Serve(server.listener)
}
-//Stop the server
+// Stop the server
func (server *RPCServer) Close() {
server.listener.Close()
}
-//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
func (server *RPCServer) Address() string {
return server.listener.Addr().String()
}
-func (server *RPCServer) GetSuiteDone() chan interface{} {
+func (server *RPCServer) GetSuiteDone() chan any {
return server.handler.done
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
index a6d98793..ab9e1137 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -18,7 +18,7 @@ var voidSender Void
// It handles all the business logic to avoid duplication between the two servers
type ServerHandler struct {
- done chan interface{}
+ done chan any
outputDestination io.Writer
reporter reporters.Reporter
alives []func() bool
@@ -46,7 +46,7 @@ func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHan
parallelTotal: parallelTotal,
outputDestination: os.Stdout,
- done: make(chan interface{}),
+ done: make(chan any),
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
index 11269cf1..165cbc4b 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
@@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) {
}
functionCall.Filename = line[:delimiterIdx]
line = strings.Split(line[delimiterIdx+1:], " ")[0]
- lineNumber, err := strconv.ParseInt(line, 10, 64)
+ lineNumber, err := strconv.ParseInt(line, 10, 32)
functionCall.Line = int(lineNumber)
if err != nil {
return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
index cc351a39..9c18dc8e 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -8,7 +8,7 @@ import (
type ReportEntry = types.ReportEntry
-func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+func NewReportEntry(name string, cl types.CodeLocation, args ...any) (ReportEntry, error) {
out := ReportEntry{
Visibility: types.ReportEntryVisibilityAlways,
Name: name,
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go
new file mode 100644
index 00000000..8b7a9cea
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson.go
@@ -0,0 +1,158 @@
+package reporters
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/go/packages"
+)
+
+func ptr[T any](in T) *T {
+ return &in
+}
+
+type encoder interface {
+ Encode(v any) error
+}
+
+// gojsonEvent matches the format from go internals
+// https://github.com/golang/go/blob/master/src/cmd/internal/test2json/test2json.go#L31-L41
+// https://pkg.go.dev/cmd/test2json
+type gojsonEvent struct {
+ Time *time.Time `json:",omitempty"`
+ Action GoJSONAction
+ Package string `json:",omitempty"`
+ Test string `json:",omitempty"`
+ Elapsed *float64 `json:",omitempty"`
+ Output *string `json:",omitempty"`
+ FailedBuild string `json:",omitempty"`
+}
+
+type GoJSONAction string
+
+const (
+ // start - the test binary is about to be executed
+ GoJSONStart GoJSONAction = "start"
+ // run - the test has started running
+ GoJSONRun GoJSONAction = "run"
+ // pause - the test has been paused
+ GoJSONPause GoJSONAction = "pause"
+ // cont - the test has continued running
+ GoJSONCont GoJSONAction = "cont"
+ // pass - the test passed
+ GoJSONPass GoJSONAction = "pass"
+ // bench - the benchmark printed log output but did not fail
+ GoJSONBench GoJSONAction = "bench"
+ // fail - the test or benchmark failed
+ GoJSONFail GoJSONAction = "fail"
+ // output - the test printed output
+ GoJSONOutput GoJSONAction = "output"
+ // skip - the test was skipped or the package contained no tests
+ GoJSONSkip GoJSONAction = "skip"
+)
+
+func goJSONActionFromSpecState(state types.SpecState) GoJSONAction {
+ switch state {
+ case types.SpecStateInvalid:
+ return GoJSONFail
+ case types.SpecStatePending:
+ return GoJSONSkip
+ case types.SpecStateSkipped:
+ return GoJSONSkip
+ case types.SpecStatePassed:
+ return GoJSONPass
+ case types.SpecStateFailed:
+ return GoJSONFail
+ case types.SpecStateAborted:
+ return GoJSONFail
+ case types.SpecStatePanicked:
+ return GoJSONFail
+ case types.SpecStateInterrupted:
+ return GoJSONFail
+ case types.SpecStateTimedout:
+ return GoJSONFail
+ default:
+ panic("unexpected state should not happen")
+ }
+}
+
+// gojsonReport wraps types.Report and calcualtes extra fields requires by gojson
+type gojsonReport struct {
+ o types.Report
+ // Extra calculated fields
+ goPkg string
+ elapsed float64
+}
+
+func newReport(in types.Report) *gojsonReport {
+ return &gojsonReport{
+ o: in,
+ }
+}
+
+func (r *gojsonReport) Fill() error {
+ // NOTE: could the types.Report include the go package name?
+ goPkg, err := suitePathToPkg(r.o.SuitePath)
+ if err != nil {
+ return err
+ }
+ r.goPkg = goPkg
+ r.elapsed = r.o.RunTime.Seconds()
+ return nil
+}
+
+// gojsonSpecReport wraps types.SpecReport and calculates extra fields required by gojson
+type gojsonSpecReport struct {
+ o types.SpecReport
+ // extra calculated fields
+ testName string
+ elapsed float64
+ action GoJSONAction
+}
+
+func newSpecReport(in types.SpecReport) *gojsonSpecReport {
+ return &gojsonSpecReport{
+ o: in,
+ }
+}
+
+func (sr *gojsonSpecReport) Fill() error {
+ sr.elapsed = sr.o.RunTime.Seconds()
+ sr.testName = createTestName(sr.o)
+ sr.action = goJSONActionFromSpecState(sr.o.State)
+ return nil
+}
+
+func suitePathToPkg(dir string) (string, error) {
+ cfg := &packages.Config{
+ Mode: packages.NeedFiles | packages.NeedSyntax,
+ }
+ pkgs, err := packages.Load(cfg, dir)
+ if err != nil {
+ return "", err
+ }
+ if len(pkgs) != 1 {
+ return "", errors.New("error")
+ }
+ return pkgs[0].ID, nil
+}
+
+func createTestName(spec types.SpecReport) string {
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ semVerConstraints := spec.SemVerConstraints()
+ if len(semVerConstraints) > 0 {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
+ name = strings.TrimSpace(name)
+ return name
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go
new file mode 100644
index 00000000..ec5311d0
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_event_writer.go
@@ -0,0 +1,111 @@
+package reporters
+
+type GoJSONEventWriter struct {
+ enc encoder
+ specSystemErrFn specSystemExtractFn
+ specSystemOutFn specSystemExtractFn
+}
+
+func NewGoJSONEventWriter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONEventWriter {
+ return &GoJSONEventWriter{
+ enc: enc,
+ specSystemErrFn: errFn,
+ specSystemOutFn: outFn,
+ }
+}
+
+func (r *GoJSONEventWriter) writeEvent(e *gojsonEvent) error {
+ return r.enc.Encode(e)
+}
+
+func (r *GoJSONEventWriter) WriteSuiteStart(report *gojsonReport) error {
+ e := &gojsonEvent{
+ Time: &report.o.StartTime,
+ Action: GoJSONStart,
+ Package: report.goPkg,
+ Output: nil,
+ FailedBuild: "",
+ }
+ return r.writeEvent(e)
+}
+
+func (r *GoJSONEventWriter) WriteSuiteResult(report *gojsonReport) error {
+ var action GoJSONAction
+ switch {
+ case report.o.PreRunStats.SpecsThatWillRun == 0:
+ action = GoJSONSkip
+ case report.o.SuiteSucceeded:
+ action = GoJSONPass
+ default:
+ action = GoJSONFail
+ }
+ e := &gojsonEvent{
+ Time: &report.o.EndTime,
+ Action: action,
+ Package: report.goPkg,
+ Output: nil,
+ FailedBuild: "",
+ Elapsed: ptr(report.elapsed),
+ }
+ return r.writeEvent(e)
+}
+
+func (r *GoJSONEventWriter) WriteSpecStart(report *gojsonReport, specReport *gojsonSpecReport) error {
+ e := &gojsonEvent{
+ Time: &specReport.o.StartTime,
+ Action: GoJSONRun,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Output: nil,
+ FailedBuild: "",
+ }
+ return r.writeEvent(e)
+}
+
+func (r *GoJSONEventWriter) WriteSpecOut(report *gojsonReport, specReport *gojsonSpecReport) error {
+ events := []*gojsonEvent{}
+
+ stdErr := r.specSystemErrFn(specReport.o)
+ if stdErr != "" {
+ events = append(events, &gojsonEvent{
+ Time: &specReport.o.EndTime,
+ Action: GoJSONOutput,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Output: ptr(stdErr),
+ FailedBuild: "",
+ })
+ }
+ stdOut := r.specSystemOutFn(specReport.o)
+ if stdOut != "" {
+ events = append(events, &gojsonEvent{
+ Time: &specReport.o.EndTime,
+ Action: GoJSONOutput,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Output: ptr(stdOut),
+ FailedBuild: "",
+ })
+ }
+
+ for _, ev := range events {
+ err := r.writeEvent(ev)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *GoJSONEventWriter) WriteSpecResult(report *gojsonReport, specReport *gojsonSpecReport) error {
+ e := &gojsonEvent{
+ Time: &specReport.o.EndTime,
+ Action: specReport.action,
+ Test: specReport.testName,
+ Package: report.goPkg,
+ Elapsed: ptr(specReport.elapsed),
+ Output: nil,
+ FailedBuild: "",
+ }
+ return r.writeEvent(e)
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go
new file mode 100644
index 00000000..633e49b8
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/reporters/gojson_reporter.go
@@ -0,0 +1,45 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type GoJSONReporter struct {
+ ev *GoJSONEventWriter
+}
+
+type specSystemExtractFn func (spec types.SpecReport) string
+
+func NewGoJSONReporter(enc encoder, errFn specSystemExtractFn, outFn specSystemExtractFn) *GoJSONReporter {
+ return &GoJSONReporter{
+ ev: NewGoJSONEventWriter(enc, errFn, outFn),
+ }
+}
+
+func (r *GoJSONReporter) Write(originalReport types.Report) error {
+ // suite start events
+ report := newReport(originalReport)
+ err := report.Fill()
+ if err != nil {
+ return err
+ }
+ r.ev.WriteSuiteStart(report)
+ for _, originalSpecReport := range originalReport.SpecReports {
+ specReport := newSpecReport(originalSpecReport)
+ err := specReport.Fill()
+ if err != nil {
+ return err
+ }
+ if specReport.o.LeafNodeType == types.NodeTypeIt {
+ // handle any It leaf node as a spec
+ r.ev.WriteSpecStart(report, specReport)
+ r.ev.WriteSpecOut(report, specReport)
+ r.ev.WriteSpecResult(report, specReport)
+ } else {
+ // handle any other leaf node as generic output
+ r.ev.WriteSpecOut(report, specReport)
+ }
+ }
+ r.ev.WriteSuiteResult(report)
+ return nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
index 2d2ea2fc..99c9c5f5 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
@@ -2,6 +2,7 @@ package internal
import (
"context"
+ "reflect"
"github.com/onsi/ginkgo/v2/types"
)
@@ -11,6 +12,7 @@ type SpecContext interface {
SpecReport() types.SpecReport
AttachProgressReporter(func() string) func()
+ WrappedContext() context.Context
}
type specContext struct {
@@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext {
func (sc *specContext) SpecReport() types.SpecReport {
return sc.suite.CurrentSpecReport()
}
+
+func (sc *specContext) WrappedContext() context.Context {
+ return sc.Context
+}
+
+/*
+The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext.
+We support this by taking their context.Context and returning a SpecContext that wraps it.
+*/
+func wrapContextChain(ctx context.Context) SpecContext {
+ if ctx == nil {
+ return nil
+ }
+ if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) {
+ return ctx.(*specContext)
+ } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok {
+ return &specContext{
+ Context: ctx,
+ ProgressReporterManager: sc.ProgressReporterManager,
+ cancel: sc.cancel,
+ suite: sc.suite,
+ }
+ }
+ return nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
index 3edf5077..ef76cd09 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -32,6 +32,7 @@ type Suite struct {
suiteNodes Nodes
cleanupNodes Nodes
+ aroundNodes types.AroundNodes
failer *Failer
reporter reporters.Reporter
@@ -41,6 +42,8 @@ type Suite struct {
config types.SuiteConfig
deadline time.Time
+ currentConstructionNodeReport *types.ConstructionNodeReport
+
skipAll bool
report types.Report
currentSpecReport types.SpecReport
@@ -87,6 +90,7 @@ func (suite *Suite) Clone() (*Suite, error) {
ProgressReporterManager: NewProgressReporterManager(),
topLevelContainers: suite.topLevelContainers.Clone(),
suiteNodes: suite.suiteNodes.Clone(),
+ aroundNodes: suite.aroundNodes.Clone(),
selectiveLock: &sync.Mutex{},
}, nil
}
@@ -104,13 +108,14 @@ func (suite *Suite) BuildTree() error {
return nil
}
-func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
+func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
if suite.phase != PhaseBuildTree {
panic("cannot run before building the tree = call suite.BuildTree() first")
}
ApplyNestedFocusPolicyToTree(suite.tree)
specs := GenerateSpecsFromTreeRoot(suite.tree)
- specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
+ specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig)
+ specs = ComputeAroundNodes(specs)
suite.phase = PhaseRun
suite.client = client
@@ -120,6 +125,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string
suite.outputInterceptor = outputInterceptor
suite.interruptHandler = interruptHandler
suite.config = suiteConfig
+ suite.aroundNodes = suiteAroundNodes
if suite.config.Timeout > 0 {
suite.deadline = time.Now().Add(suite.config.Timeout)
@@ -127,7 +133,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string
cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
- success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
+ success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs)
cancelProgressHandler()
@@ -199,6 +205,14 @@ func (suite *Suite) PushNode(node Node) error {
err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
}
}()
+
+ // Ensure that code running in the body of the container node
+ // has access to information about the current container node(s).
+ suite.currentConstructionNodeReport = constructionNodeReportForTreeNode(suite.tree)
+ defer func() {
+ suite.currentConstructionNodeReport = nil
+ }()
+
node.Body(nil)
return err
}()
@@ -259,6 +273,7 @@ func (suite *Suite) pushCleanupNode(node Node) error {
node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
node.NestingLevel = suite.currentNode.NestingLevel
+ node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...)
suite.selectiveLock.Lock()
suite.cleanupNodes = append(suite.cleanupNodes, node)
suite.selectiveLock.Unlock()
@@ -327,6 +342,16 @@ func (suite *Suite) By(text string, callback ...func()) error {
return nil
}
+func (suite *Suite) CurrentConstructionNodeReport() types.ConstructionNodeReport {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ report := suite.currentConstructionNodeReport
+ if report == nil {
+ panic("CurrentConstructionNodeReport may only be called during construction of the spec tree")
+ }
+ return *report
+}
+
/*
Spec Running methods - used during PhaseRun
*/
@@ -428,13 +453,14 @@ func (suite *Suite) processCurrentSpecReport() {
}
}
-func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
+func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
numSpecsThatWillBeRun := specs.CountWithoutSkip()
suite.report = types.Report{
SuitePath: suitePath,
SuiteDescription: description,
SuiteLabels: suiteLabels,
+ SuiteSemVerConstraints: suiteSemVerConstraints,
SuiteConfig: suite.config,
SuiteHasProgrammaticFocus: hasProgrammaticFocus,
PreRunStats: types.PreRunStats{
@@ -891,7 +917,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
failureC <- failureFromRun
}()
- node.Body(sc)
+ aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...)
+ if len(aroundNodes) > 0 {
+ i := 0
+ var f func(context.Context)
+ f = func(c context.Context) {
+ sc := wrapContextChain(c)
+ if sc == nil {
+ suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation)
+ return
+ }
+ i++
+ if i < len(aroundNodes) {
+ aroundNodes[i].Body(sc, f)
+ } else {
+ node.Body(sc)
+ }
+ }
+ aroundNodes[0].Body(sc, f)
+ if i != len(aroundNodes) {
+ suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation)
+ }
+ } else {
+ node.Body(sc)
+ }
finished = true
}()
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
index 73e26556..9806e315 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -1,6 +1,7 @@
package testingtproxy
import (
+ "context"
"fmt"
"io"
"os"
@@ -19,9 +20,9 @@ type addReportEntryFunc func(names string, args ...any)
type ginkgoWriterInterface interface {
io.Writer
- Print(a ...interface{})
- Printf(format string, a ...interface{})
- Println(a ...interface{})
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
}
type ginkgoRecoverFunc func()
type attachProgressReporterFunc func(func() string) func()
@@ -80,11 +81,31 @@ func (t *ginkgoTestingTProxy) Setenv(key, value string) {
}
}
-func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Chdir(dir string) {
+ currentDir, err := os.Getwd()
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to get current directory: %v", err), 1)
+ }
+
+ t.cleanup(os.Chdir, currentDir, internal.Offset(1))
+
+ err = os.Chdir(dir)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to change directory: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Context() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ t.cleanup(cancel, internal.Offset(1))
+ return ctx
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...any) {
t.fail(fmt.Sprintln(args...), t.offset)
}
-func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...any) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
@@ -100,11 +121,11 @@ func (t *ginkgoTestingTProxy) Failed() bool {
return t.report().Failed()
}
-func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Fatal(args ...any) {
t.fail(fmt.Sprintln(args...), t.offset)
}
-func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...any) {
t.fail(fmt.Sprintf(format, args...), t.offset)
}
@@ -112,11 +133,11 @@ func (t *ginkgoTestingTProxy) Helper() {
types.MarkAsHelper(1)
}
-func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Log(args ...any) {
fmt.Fprintln(t.writer, args...)
}
-func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Logf(format string, args ...any) {
t.Log(fmt.Sprintf(format, args...))
}
@@ -128,7 +149,7 @@ func (t *ginkgoTestingTProxy) Parallel() {
// No-op
}
-func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+func (t *ginkgoTestingTProxy) Skip(args ...any) {
t.skip(fmt.Sprintln(args...), t.offset)
}
@@ -136,7 +157,7 @@ func (t *ginkgoTestingTProxy) SkipNow() {
t.skip("skip", t.offset)
}
-func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...any) {
t.skip(fmt.Sprintf(format, args...), t.offset)
}
@@ -208,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int {
func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
return t.attachProgressReporter(f)
}
+func (t *ginkgoTestingTProxy) Output() io.Writer {
+ return t.writer
+}
+func (t *ginkgoTestingTProxy) Attr(key, value string) {
+ t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose)
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/operator/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
index aab42d5f..1c4e0534 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -121,15 +121,15 @@ func (w *Writer) ClearTeeWriters() {
w.teeWriters = []io.Writer{}
}
-func (w *Writer) Print(a ...interface{}) {
+func (w *Writer) Print(a ...any) {
fmt.Fprint(w, a...)
}
-func (w *Writer) Printf(format string, a ...interface{}) {
+func (w *Writer) Printf(format string, a ...any) {
fmt.Fprintf(w, format, a...)
}
-func (w *Writer) Println(a ...interface{}) {
+func (w *Writer) Println(a ...any) {
fmt.Fprintln(w, a...)
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
index 48073048..026d9cf9 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
if len(report.SuiteLabels) > 0 {
r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
}
+ if len(report.SuiteSemVerConstraints) > 0 {
+ r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", ")))
+ }
r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
if report.SuiteConfig.ParallelTotal > 1 {
r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
@@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
bannerWidth = len(labels) + 2
}
}
+ if len(report.SuiteSemVerConstraints) > 0 {
+ semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ")
+ r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints))
+ if len(semVerConstraints)+2 > bannerWidth {
+ bannerWidth = len(semVerConstraints) + 2
+ }
+ }
r.emitBlock(strings.Repeat("=", bannerWidth))
out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
@@ -371,13 +381,22 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim
cursor := 0
for _, entry := range timeline {
tl := entry.GetTimelineLocation()
- if tl.Offset < len(gw) {
- r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
- cursor = tl.Offset
- } else if cursor < len(gw) {
+
+ end := tl.Offset
+ if end > len(gw) {
+ end = len(gw)
+ }
+ if end < cursor {
+ end = cursor
+ }
+ if cursor < end && cursor <= len(gw) && end <= len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:end]))
+ cursor = end
+ } else if cursor < len(gw) && end == len(gw) {
r.emit(r.fi(indent, "%s", gw[cursor:]))
cursor = len(gw)
}
+
switch x := entry.(type) {
case types.Failure:
if isVeryVerbose {
@@ -394,7 +413,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim
case types.ReportEntry:
r.emitReportEntry(indent, x)
case types.ProgressReport:
- r.emitProgressReport(indent, false, x)
+ r.emitProgressReport(indent, false, isVeryVerbose, x)
case types.SpecEvent:
if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
r.emitSpecEvent(indent, x, isVeryVerbose)
@@ -448,7 +467,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur
if !failure.ProgressReport.IsZero() {
r.emitBlock("\n")
- r.emitProgressReport(indent, false, failure.ProgressReport)
+ r.emitProgressReport(indent, false, false, failure.ProgressReport)
}
if failure.AdditionalFailure != nil && includeAdditionalFailure {
@@ -464,11 +483,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
}
shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
- r.emitProgressReport(1, shouldEmitGW, report)
+ r.emitProgressReport(1, shouldEmitGW, true, report)
r.emitDelimiter(1)
}
-func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
+func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) {
if report.Message != "" {
r.emitBlock(r.fi(indent, report.Message+"\n"))
indent += 1
@@ -504,6 +523,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
indent -= 1
}
+ if r.conf.GithubOutput && emitGroup {
+ r.emitBlock(r.fi(indent, "::group::Progress Report"))
+ }
+
if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
r.emit("\n")
r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
@@ -550,6 +573,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput
r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
r.emitGoroutines(indent, otherGoroutines...)
}
+
+ if r.conf.GithubOutput && emitGroup {
+ r.emitBlock(r.fi(indent, "::endgroup::"))
+ }
}
func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
@@ -685,11 +712,11 @@ func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
}
/* Rendering text */
-func (r *DefaultReporter) f(format string, args ...interface{}) string {
+func (r *DefaultReporter) f(format string, args ...any) string {
return r.formatter.F(format, args...)
}
-func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+func (r *DefaultReporter) fi(indentation uint, format string, args ...any) string {
return r.formatter.Fi(indentation, format, args...)
}
@@ -698,8 +725,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
}
func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
- texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
- texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
+ texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{}
+ texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...)
if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
@@ -707,6 +734,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
texts = append(texts, r.f(report.LeafNodeText))
}
labels = append(labels, report.LeafNodeLabels)
+ semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints)
locations = append(locations, report.LeafNodeLocation)
failureLocation := report.Failure.FailureNodeLocation
@@ -720,6 +748,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
locations = append([]types.CodeLocation{failureLocation}, locations...)
labels = append([][]string{{}}, labels...)
+ semVerConstraints = append([][]string{{}}, semVerConstraints...)
highlightIndex = 0
case types.FailureNodeInContainer:
i := report.Failure.FailureNodeContainerIndex
@@ -747,6 +776,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
if len(labels[i]) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
}
+ if len(semVerConstraints[i]) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", "))
+ }
out += "\n"
out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
}
@@ -770,6 +802,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo
if len(flattenedLabels) > 0 {
out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
}
+ flattenedSemVerConstraints := report.SemVerConstraints()
+ if len(flattenedSemVerConstraints) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", "))
+ }
out += "\n"
if usePreciseFailureLocation {
out += r.f("{{gray}}%s{{/}}", failureLocation)
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go
new file mode 100644
index 00000000..d02fb7a1
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/gojson_report.go
@@ -0,0 +1,61 @@
+package reporters
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/internal/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// GenerateGoTestJSONReport produces a JSON-formatted in the test2json format used by `go test -json`
+func GenerateGoTestJSONReport(report types.Report, destination string) error {
+ // walk report and generate test2json-compatible objects
+ // JSON-encode the objects into filename
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ r := reporters.NewGoJSONReporter(
+ enc,
+ systemErrForUnstructuredReporters,
+ systemOutForUnstructuredReporters,
+ )
+ return r.Write(report)
+}
+
+// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
+// It skips over reports that fail to decode but reports on them via the returned messages []string
+func MergeAndCleanupGoTestJSONReports(sources []string, destination string) ([]string, error) {
+ messages := []string{}
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return messages, err
+ }
+ defer f.Close()
+
+ for _, source := range sources {
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ _, err = f.Write(data)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not write to %s:\n%s", destination, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ }
+ return messages, nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
index 562e0f62..828f893f 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -36,6 +36,9 @@ type JunitReportConfig struct {
// Enable OmitSpecLabels to prevent labels from appearing in the spec name
OmitSpecLabels bool
+ // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name
+ OmitSpecSemVerConstraints bool
+
// Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
OmitLeafNodeType bool
@@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
{"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
{"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
{"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
+ {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))},
{"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
{"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
{"LabelFilter", report.SuiteConfig.LabelFilter},
+ {"SemVerFilter", report.SuiteConfig.SemVerFilter},
{"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
{"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
{"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
@@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit
owner = matches[1]
}
}
+ semVerConstraints := spec.SemVerConstraints()
+ if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
name = strings.TrimSpace(name)
test := JUnitTestCase{
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
index e990ad82..55e1d1f4 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
@@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error {
name := report.SuiteDescription
labels := report.SuiteLabels
+ semVerConstraints := report.SuiteSemVerConstraints
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
+ if len(semVerConstraints) > 0 {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
for _, spec := range report.SpecReports {
name := fmt.Sprintf("[%s]", spec.LeafNodeType)
@@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error {
if len(labels) > 0 {
name = name + " [" + strings.Join(labels, ", ") + "]"
}
+ semVerConstraints := spec.SemVerConstraints()
+ if len(semVerConstraints) > 0 {
+ name = name + " [" + strings.Join(semVerConstraints, ", ") + "]"
+ }
name = tcEscape(name)
fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/operator/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
index aa1a3517..4e86dba8 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -27,6 +27,8 @@ CurrentSpecReport returns information about the current running spec.
The returned object is a types.SpecReport which includes helper methods
to make extracting information about the spec easier.
+During construction of the test tree the result is empty.
+
You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
*/
@@ -34,6 +36,31 @@ func CurrentSpecReport() SpecReport {
return global.Suite.CurrentSpecReport()
}
+/*
+ConstructionNodeReport describes the container nodes during construction of
+the spec tree. It provides a subset of the information that is provided
+by SpecReport at runtime.
+
+It is documented here: [types.ConstructionNodeReport]
+*/
+type ConstructionNodeReport = types.ConstructionNodeReport
+
+/*
+CurrentConstructionNodeReport returns information about the current container nodes
+that are leading to the current path in the spec tree.
+The returned object is a types.ConstructionNodeReport which includes helper methods
+to make extracting information about the spec easier.
+
+May only be called during construction of the spec tree. It panics when
+called while tests are running. Use CurrentSpecReport instead in that
+phase.
+
+You can learn more about ConstructionNodeReport here: [types.ConstructionNodeReport]
+*/
+func CurrentTreeConstructionNodeReport() ConstructionNodeReport {
+ return global.Suite.CurrentConstructionNodeReport()
+}
+
/*
ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
@@ -60,7 +87,7 @@ AddReportEntry() must be called within a Subject or Setup node - not in a Contai
You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
*/
-func AddReportEntry(name string, args ...interface{}) {
+func AddReportEntry(name string, args ...any) {
cl := types.NewCodeLocation(1)
reportEntry, err := internal.NewReportEntry(name, cl, args...)
if err != nil {
@@ -89,10 +116,10 @@ You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#g
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
func ReportBeforeEach(body any, args ...any) bool {
- combinedArgs := []interface{}{body}
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)))
}
/*
@@ -113,10 +140,10 @@ You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#ge
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
func ReportAfterEach(body any, args ...any) bool {
- combinedArgs := []interface{}{body}
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)))
}
/*
@@ -143,9 +170,9 @@ You can learn more about Ginkgo's reporting infrastructure, including generating
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
func ReportBeforeSuite(body any, args ...any) bool {
- combinedArgs := []interface{}{body}
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)))
}
/*
@@ -165,7 +192,7 @@ ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Co
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
all parallel nodes
-In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
+In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, GoJSON, JUnit, and Teamcity formatted reports using the --json-report, --gojson-report, --junit-report, and --teamcity-report ginkgo CLI flags.
You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
@@ -174,10 +201,10 @@ You can learn more about Ginkgo's reporting infrastructure, including generating
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/
-func ReportAfterSuite(text string, body any, args ...interface{}) bool {
- combinedArgs := []interface{}{body}
+func ReportAfterSuite(text string, body any, args ...any) bool {
+ combinedArgs := []any{body}
combinedArgs = append(combinedArgs, args...)
- return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
+ return pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)))
}
func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
@@ -188,6 +215,12 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re
Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
}
}
+ if reporterConfig.GoJSONReport != "" {
+ err := reporters.GenerateGoTestJSONReport(report, reporterConfig.GoJSONReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Go JSON report:\n%s", err.Error()))
+ }
+ }
if reporterConfig.JUnitReport != "" {
err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
if err != nil {
@@ -206,6 +239,9 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re
if reporterConfig.JSONReport != "" {
flags = append(flags, "--json-report")
}
+ if reporterConfig.GoJSONReport != "" {
+ flags = append(flags, "--gojson-report")
+ }
if reporterConfig.JUnitReport != "" {
flags = append(flags, "--junit-report")
}
@@ -213,9 +249,11 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re
flags = append(flags, "--teamcity-report")
}
pushNode(internal.NewNode(
- deprecationTracker, types.NodeTypeReportAfterSuite,
- fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
- body,
- types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ internal.TransformNewNodeArgs(
+ exitIfErrors, deprecationTracker, types.NodeTypeReportAfterSuite,
+ fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
+ body,
+ types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ ),
))
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/operator/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
index 9074a57a..1031aa85 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -23,7 +23,7 @@ You can learn more about generating EntryDescriptions here: https://onsi.github.
*/
type EntryDescription string
-func (ed EntryDescription) render(args ...interface{}) string {
+func (ed EntryDescription) render(args ...any) string {
return fmt.Sprintf(string(ed), args...)
}
@@ -44,7 +44,7 @@ For example:
You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
*/
-func DescribeTable(description string, args ...interface{}) bool {
+func DescribeTable(description string, args ...any) bool {
GinkgoHelper()
generateTable(description, false, args...)
return true
@@ -53,7 +53,7 @@ func DescribeTable(description string, args ...interface{}) bool {
/*
You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
*/
-func FDescribeTable(description string, args ...interface{}) bool {
+func FDescribeTable(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Focus)
generateTable(description, false, args...)
@@ -63,7 +63,7 @@ func FDescribeTable(description string, args ...interface{}) bool {
/*
You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
*/
-func PDescribeTable(description string, args ...interface{}) bool {
+func PDescribeTable(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Pending)
generateTable(description, false, args...)
@@ -109,7 +109,7 @@ Note that you **must** place define an It inside the body function.
You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs
And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
*/
-func DescribeTableSubtree(description string, args ...interface{}) bool {
+func DescribeTableSubtree(description string, args ...any) bool {
GinkgoHelper()
generateTable(description, true, args...)
return true
@@ -118,7 +118,7 @@ func DescribeTableSubtree(description string, args ...interface{}) bool {
/*
You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`.
*/
-func FDescribeTableSubtree(description string, args ...interface{}) bool {
+func FDescribeTableSubtree(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Focus)
generateTable(description, true, args...)
@@ -128,7 +128,7 @@ func FDescribeTableSubtree(description string, args ...interface{}) bool {
/*
You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`.
*/
-func PDescribeTableSubtree(description string, args ...interface{}) bool {
+func PDescribeTableSubtree(description string, args ...any) bool {
GinkgoHelper()
args = append(args, internal.Pending)
generateTable(description, true, args...)
@@ -144,9 +144,9 @@ var XDescribeTableSubtree = PDescribeTableSubtree
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
- description interface{}
- decorations []interface{}
- parameters []interface{}
+ description any
+ decorations []any
+ parameters []any
codeLocation types.CodeLocation
}
@@ -162,7 +162,7 @@ If you want to generate interruptible specs simply write a Table function that a
You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
*/
-func Entry(description interface{}, args ...interface{}) TableEntry {
+func Entry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
@@ -171,7 +171,7 @@ func Entry(description interface{}, args ...interface{}) TableEntry {
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
-func FEntry(description interface{}, args ...interface{}) TableEntry {
+func FEntry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
decorations = append(decorations, internal.Focus)
@@ -181,7 +181,7 @@ func FEntry(description interface{}, args ...interface{}) TableEntry {
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
-func PEntry(description interface{}, args ...interface{}) TableEntry {
+func PEntry(description any, args ...any) TableEntry {
GinkgoHelper()
decorations, parameters := internal.PartitionDecorations(args...)
decorations = append(decorations, internal.Pending)
@@ -196,17 +196,17 @@ var XEntry = PEntry
var contextType = reflect.TypeOf(new(context.Context)).Elem()
var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
-func generateTable(description string, isSubtree bool, args ...interface{}) {
+func generateTable(description string, isSubtree bool, args ...any) {
GinkgoHelper()
cl := types.NewCodeLocation(0)
- containerNodeArgs := []interface{}{cl}
+ containerNodeArgs := []any{cl}
entries := []TableEntry{}
- var internalBody interface{}
+ var internalBody any
var internalBodyType reflect.Type
- var tableLevelEntryDescription interface{}
- tableLevelEntryDescription = func(args ...interface{}) string {
+ var tableLevelEntryDescription any
+ tableLevelEntryDescription = func(args ...any) string {
out := []string{}
for _, arg := range args {
out = append(out, fmt.Sprint(arg))
@@ -265,7 +265,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
}
- internalNodeArgs := []interface{}{entry.codeLocation}
+ internalNodeArgs := []any{entry.codeLocation}
internalNodeArgs = append(internalNodeArgs, entry.decorations...)
hasContext := false
@@ -290,7 +290,7 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
if err != nil {
panic(err)
}
- invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...))
+ invokeFunction(internalBody, append([]any{c}, entry.parameters...))
})
if isSubtree {
exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl))
@@ -309,14 +309,14 @@ func generateTable(description string, isSubtree bool, args ...interface{}) {
internalNodeType = types.NodeTypeContainer
}
- pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...))
+ pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, internalNodeType, description, internalNodeArgs...)))
}
})
- pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
+ pushNode(internal.NewNode(internal.TransformNewNodeArgs(exitIfErrors, deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...)))
}
-func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+func invokeFunction(function any, parameters []any) []reflect.Value {
inValues := make([]reflect.Value, len(parameters))
funcType := reflect.TypeOf(function)
@@ -339,7 +339,7 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va
return reflect.ValueOf(function).Call(inValues)
}
-func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+func validateParameters(function any, parameters []any, kind string, cl types.CodeLocation, hasContext bool) error {
funcType := reflect.TypeOf(function)
limit := funcType.NumIn()
offset := 0
@@ -377,7 +377,7 @@ func validateParameters(function interface{}, parameters []interface{}, kind str
return nil
}
-func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+func computeValue(parameter any, t reflect.Type) reflect.Value {
if parameter == nil {
return reflect.Zero(t)
} else {
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/around_node.go
new file mode 100644
index 00000000..a069e062
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/around_node.go
@@ -0,0 +1,56 @@
+package types
+
+import (
+ "context"
+)
+
+type AroundNodeAllowedFuncs interface {
+ ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func()
+}
+type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context))
+
+func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator {
+ if f == nil {
+ panic("BuildAroundNode cannot be called with a nil function.")
+ }
+ var aroundNodeFunc func(context.Context, func(context.Context))
+ switch x := any(f).(type) {
+ case func(context.Context, func(context.Context)):
+ aroundNodeFunc = x
+ case func(context.Context) context.Context:
+ aroundNodeFunc = func(ctx context.Context, body func(context.Context)) {
+ ctx = x(ctx)
+ body(ctx)
+ }
+ case func():
+ aroundNodeFunc = func(ctx context.Context, body func(context.Context)) {
+ x()
+ body(ctx)
+ }
+ }
+
+ return AroundNodeDecorator{
+ Body: aroundNodeFunc,
+ CodeLocation: cl,
+ }
+}
+
+type AroundNodeDecorator struct {
+ Body AroundNodeFunc
+ CodeLocation CodeLocation
+}
+
+type AroundNodes []AroundNodeDecorator
+
+func (an AroundNodes) Clone() AroundNodes {
+ out := make(AroundNodes, len(an))
+ copy(out, an)
+ return out
+}
+
+func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes {
+ out := make(AroundNodes, len(an)+len(other))
+ copy(out, an)
+ copy(out[len(an):], other)
+ return out
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/config.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/config.go
index 8c0dfab8..f8470360 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/config.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -24,6 +24,7 @@ type SuiteConfig struct {
FocusFiles []string
SkipFiles []string
LabelFilter string
+ SemVerFilter string
FailOnPending bool
FailOnEmpty bool
FailFast bool
@@ -95,6 +96,7 @@ type ReporterConfig struct {
ForceNewlines bool
JSONReport string
+ GoJSONReport string
JUnitReport string
TeamcityReport string
}
@@ -111,7 +113,7 @@ func (rc ReporterConfig) Verbosity() VerbosityLevel {
}
func (rc ReporterConfig) WillGenerateReport() bool {
- return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
+ return rc.JSONReport != "" || rc.GoJSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
}
func NewDefaultReporterConfig() ReporterConfig {
@@ -159,7 +161,7 @@ func (g CLIConfig) ComputedProcs() int {
n := 1
if g.Parallel {
- n = runtime.NumCPU()
+ n = runtime.GOMAXPROCS(-1)
if n > 4 {
n = n - 1
}
@@ -172,7 +174,7 @@ func (g CLIConfig) ComputedNumCompilers() int {
return g.NumCompilers
}
- return runtime.NumCPU()
+ return runtime.GOMAXPROCS(-1)
}
// Configuration for the Ginkgo CLI capturing available go flags
@@ -231,6 +233,10 @@ func (g GoFlagsConfig) BinaryMustBePreserved() bool {
return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
}
+func (g GoFlagsConfig) NeedsSymbols() bool {
+ return g.BinaryMustBePreserved()
+}
+
// Configuration that were deprecated in 2.0
type deprecatedConfig struct {
DebugParallel bool
@@ -257,8 +263,12 @@ var FlagSections = GinkgoFlagSections{
{Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
{Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
{Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
- {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
- {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis",
+ Description: "When generating a cover files, please pass a filename {{bold}}not{{/}} a path. To specify a different directory use {{magenta}}--output-dir{{/}}.",
+ },
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis",
+ Description: "When generating profile files, please pass filenames {{bold}}not{{/}} a path. Ginkgo will generate a profile file with the given name in the package's directory. To specify a different directory use {{magenta}}--output-dir{{/}}.",
+ },
{Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
{Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
@@ -300,6 +310,8 @@ var SuiteConfigFlags = GinkgoFlags{
{KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
+ {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version",
+ Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"},
{KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
{KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
@@ -348,6 +360,8 @@ var ReporterConfigFlags = GinkgoFlags{
{KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
+ {KeyPath: "R.GoJSONReport", Name: "gojson-report", UsageArgument: "filename.json", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a Go JSON-formatted test report at the specified location."},
{KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
{KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
@@ -365,7 +379,7 @@ var ReporterConfigFlags = GinkgoFlags{
func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
flags = flags.WithPrefix("ginkgo")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"D": &deprecatedConfig{},
@@ -435,6 +449,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re
}
}
+ if suiteConfig.SemVerFilter != "" {
+ _, err := ParseSemVerFilter(suiteConfig.SemVerFilter)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
case "", "dup", "swap", "none":
default:
@@ -515,7 +536,7 @@ var GoBuildFlags = GinkgoFlags{
{KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."},
{KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
- Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty (by explicitly passing --vet=""), "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
{KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
{KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
@@ -565,6 +586,9 @@ var GoBuildFlags = GinkgoFlags{
Usage: "print the name of the temporary work directory and do not delete it when exiting."},
{KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
Usage: "print the commands."},
+}
+
+var GoBuildOFlags = GinkgoFlags{
{KeyPath: "Go.O", Name: "o", SectionKey: "go-build",
Usage: "output binary path (including name)."},
}
@@ -572,7 +596,7 @@ var GoBuildFlags = GinkgoFlags{
// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
var GoRunFlags = GinkgoFlags{
{KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
- Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover. Must be passed a filename, not a path. Use output-dir to control the location of the output.`},
{KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
{KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
@@ -600,6 +624,22 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
}
+ if strings.ContainsRune(goFlagsConfig.CoverProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--coverprofile", goFlagsConfig.CoverProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.CPUProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--cpuprofile", goFlagsConfig.CPUProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.MemProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--memprofile", goFlagsConfig.MemProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.BlockProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--blockprofile", goFlagsConfig.BlockProfile))
+ }
+ if strings.ContainsRune(goFlagsConfig.MutexProfile, os.PathSeparator) {
+ errors = append(errors, GinkgoErrors.ExpectFilenameNotPath("--mutexprofile", goFlagsConfig.MutexProfile))
+ }
+
//initialize the output directory
if cliConfig.OutputDir != "" {
err := os.MkdirAll(cliConfig.OutputDir, 0777)
@@ -620,7 +660,7 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo
}
// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
-func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild string, pathToInvocationPath string, preserveSymbols bool) ([]string, error) {
// if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
// the built test binary can generate a coverprofile
if goFlagsConfig.CoverProfile != "" {
@@ -643,10 +683,14 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin
goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
}
+ if !goFlagsConfig.NeedsSymbols() && goFlagsConfig.LDFlags == "" && !preserveSymbols {
+ goFlagsConfig.LDFlags = "-w -s"
+ }
+
args := []string{"test", "-c", packageToBuild}
goArgs, err := GenerateFlagArgs(
- GoBuildFlags,
- map[string]interface{}{
+ GoBuildFlags.CopyAppend(GoBuildOFlags...),
+ map[string]any{
"Go": &goFlagsConfig,
},
)
@@ -665,7 +709,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC
flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": &suiteConfig,
"R": &reporterConfig,
"Go": &goFlagsConfig,
@@ -677,7 +721,7 @@ func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterC
// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
flags := GoRunFlags.WithPrefix("test")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"Go": &goFlagsConfig,
}
@@ -699,7 +743,7 @@ func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterCo
flags = flags.CopyAppend(GoBuildFlags...)
flags = flags.CopyAppend(GoRunFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"C": cliConfig,
@@ -720,7 +764,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter
flags = flags.CopyAppend(GoBuildFlags...)
flags = flags.CopyAppend(GoRunFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"S": suiteConfig,
"R": reporterConfig,
"C": cliConfig,
@@ -735,8 +779,9 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter
func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
flags := GinkgoCLISharedFlags
flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoBuildOFlags...)
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"C": cliConfig,
"Go": goFlagsConfig,
"D": &deprecatedConfig{},
@@ -760,7 +805,7 @@ func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig
func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
- bindings := map[string]interface{}{
+ bindings := map[string]any{
"C": cliConfig,
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
index 17922304..518989a8 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -113,7 +113,7 @@ type DeprecatedSpecFailure struct {
type DeprecatedSpecMeasurement struct {
Name string
- Info interface{}
+ Info any
Order int
Results []float64
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/errors.go
index 6bb72d00..59313238 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/errors.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -88,7 +88,7 @@ body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, n
}
}
-func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic any, cl CodeLocation) error {
return GinkgoError{
Heading: "Assertion or Panic detected during tree construction",
Message: formatter.F(
@@ -189,7 +189,7 @@ func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl
}
}
-func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator any) error {
return GinkgoError{
Heading: "Unknown Decorator",
Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
@@ -345,7 +345,7 @@ func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
}
/* ReportEntry errors */
-func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg any) error {
return GinkgoError{
Heading: "Too Many ReportEntry Values",
Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
@@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
}
}
+func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid SemVerConstraint",
+ Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg),
+ CodeLocation: cl,
+ DocLink: "spec-semantic-version-filtering",
+ }
+}
+
+func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Empty SemVerConstraint",
+ Message: "SemVerConstraint cannot be empty",
+ CodeLocation: cl,
+ DocLink: "spec-semantic-version-filtering",
+ }
+}
+
/* Table errors */
func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
return GinkgoError{
@@ -539,7 +557,7 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
/* Configuration errors */
-func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value any) error {
return GinkgoError{
Heading: "Unknown Type passed to RunSpecs",
Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
@@ -629,6 +647,20 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
}
}
+func (g ginkgoErrors) ExpectFilenameNotPath(flag string, path string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("%s expects a filename but was given a path: %s", flag, path),
+ Message: fmt.Sprintf("%s takes a filename, not a path. Use --output-dir to specify a directory to collect all test outputs.", flag),
+ }
+}
+
+func (g ginkgoErrors) FlagAfterPositionalParameter() error {
+ return GinkgoError{
+ Heading: "Malformed arguments - detected a flag after the package liste",
+ Message: "Make sure all flags appear {{bold}}after{{/}} the Ginkgo subcommand and {{bold}}before{{/}} your list of packages (or './...').\n{{gray}}e.g. 'ginkgo run -p my_package' is valid but `ginkgo -p run my_package` is not.\n{{gray}}e.g. 'ginkgo -p -vet=\"\" ./...' is valid but 'ginkgo -p ./... -vet=\"\"' is not{{/}}",
+ }
+}
+
/* Stack-Trace parsing errors */
func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/flags.go
index de69f302..8409653f 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/flags.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -92,7 +92,7 @@ func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
type GinkgoFlagSet struct {
flags GinkgoFlags
- bindings interface{}
+ bindings any
sections GinkgoFlagSections
extraGoFlagsSection GinkgoFlagSection
@@ -101,7 +101,7 @@ type GinkgoFlagSet struct {
}
// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
-func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings any, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
return bindFlagSet(GinkgoFlagSet{
flags: flags,
bindings: bindings,
@@ -110,7 +110,7 @@ func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFl
}
// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
-func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings any, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
return bindFlagSet(GinkgoFlagSet{
flags: flags,
bindings: bindings,
@@ -335,7 +335,7 @@ func (f GinkgoFlagSet) substituteUsage() {
fmt.Fprintln(f.flagSet.Output(), f.Usage())
}
-func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+func valueAtKeyPath(root any, keyPath string) (reflect.Value, bool) {
if len(keyPath) == 0 {
return reflect.Value{}, false
}
@@ -433,7 +433,7 @@ func (ssv stringSliceVar) Set(s string) error {
}
// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
-func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+func GenerateFlagArgs(flags GinkgoFlags, bindings any) ([]string, error) {
result := []string{}
for _, flag := range flags {
name := flag.ExportAs
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
index 7fdc8aa2..40a909b6 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -343,7 +343,7 @@ func tokenize(input string) func() (*treeNode, error) {
consumeUntil := func(cutset string) (string, int) {
j := i
for ; j < len(runes); j++ {
- if strings.IndexRune(cutset, runes[j]) >= 0 {
+ if strings.ContainsRune(cutset, runes[j]) {
break
}
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
index 7b1524b5..63f7a9f6 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -9,18 +9,18 @@ import (
// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
// and across the network connection when running in parallel
type ReportEntryValue struct {
- raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ raw any //unexported to prevent gob from freaking out about unregistered structs
AsJSON string
Representation string
}
-func WrapEntryValue(value interface{}) ReportEntryValue {
+func WrapEntryValue(value any) ReportEntryValue {
return ReportEntryValue{
raw: value,
}
}
-func (rev ReportEntryValue) GetRawValue() interface{} {
+func (rev ReportEntryValue) GetRawValue() any {
return rev.raw
}
@@ -118,7 +118,7 @@ func (entry ReportEntry) StringRepresentation() string {
// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
// field yourself.
-func (entry ReportEntry) GetRawValue() interface{} {
+func (entry ReportEntry) GetRawValue() any {
return entry.Value.GetRawValue()
}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go
new file mode 100644
index 00000000..3fc2ed14
--- /dev/null
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go
@@ -0,0 +1,60 @@
+package types
+
+import (
+ "fmt"
+
+ "github.com/Masterminds/semver/v3"
+)
+
+type SemVerFilter func([]string) bool
+
+func MustParseSemVerFilter(input string) SemVerFilter {
+ filter, err := ParseSemVerFilter(input)
+ if err != nil {
+ panic(err)
+ }
+ return filter
+}
+
+func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) {
+ if filterVersion == "" {
+ return func(_ []string) bool { return true }, nil
+ }
+
+ targetVersion, err := semver.NewVersion(filterVersion)
+ if err != nil {
+ return nil, fmt.Errorf("invalid filter version: %w", err)
+ }
+
+ return func(constraints []string) bool {
+ // unconstrained specs always run
+ if len(constraints) == 0 {
+ return true
+ }
+
+ for _, constraintStr := range constraints {
+ constraint, err := semver.NewConstraint(constraintStr)
+ if err != nil {
+ return false
+ }
+
+ if !constraint.Check(targetVersion) {
+ return false
+ }
+ }
+
+ return true
+ }, nil
+}
+
+func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) {
+ if len(semVerConstraint) == 0 {
+ return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl)
+ }
+ _, err := semver.NewConstraint(semVerConstraint)
+ if err != nil {
+ return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl)
+ }
+
+ return semVerConstraint, nil
+}
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/types.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/types.go
index ddcbec1b..9981a0dd 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/types.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"os"
+ "slices"
"sort"
"strings"
"time"
@@ -19,6 +20,57 @@ func init() {
}
}
+// ConstructionNodeReport captures information about a Ginkgo spec.
+type ConstructionNodeReport struct {
+ // ContainerHierarchyTexts is a slice containing the text strings of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyTexts []string
+
+ // ContainerHierarchyLocations is a slice containing the CodeLocations of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyLocations []CodeLocation
+
+ // ContainerHierarchyLabels is a slice containing the labels of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchyLabels [][]string
+
+ // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchySemVerConstraints [][]string
+
+ // IsSerial captures whether the any container has the Serial decorator
+ IsSerial bool
+
+ // IsInOrderedContainer captures whether any container is an Ordered container
+ IsInOrderedContainer bool
+}
+
+// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
+func (report ConstructionNodeReport) FullText() string {
+ texts := []string{}
+ texts = append(texts, report.ContainerHierarchyTexts...)
+ texts = slices.DeleteFunc(texts, func(t string) bool {
+ return t == ""
+ })
+ return strings.Join(texts, " ")
+}
+
+// Labels returns a deduped set of all the spec's Labels.
+func (report ConstructionNodeReport) Labels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, labels := range report.ContainerHierarchyLabels {
+ for _, label := range labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+
+ return out
+}
+
// Report captures information about a Ginkgo test run
type Report struct {
//SuitePath captures the absolute path to the test suite
@@ -30,6 +82,9 @@ type Report struct {
//SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
SuiteLabels []string
+ //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function
+ SuiteSemVerConstraints []string
+
//SuiteSucceeded captures the success or failure status of the test run
//If true, the test run is considered successful.
//If false, the test run is considered unsuccessful
@@ -129,13 +184,21 @@ type SpecReport struct {
// all Describe/Context/When containers in this spec's hierarchy
ContainerHierarchyLabels [][]string
- // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
+ // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchySemVerConstraints [][]string
+
+ // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text
// of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
// one of the NodeTypesForSuiteLevelNodes node types)
- LeafNodeType NodeType
- LeafNodeLocation CodeLocation
- LeafNodeLabels []string
- LeafNodeText string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeSemVerConstraints []string
+ LeafNodeText string
+
+ // Captures the Spec Priority
+ SpecPriority int
// State captures whether the spec has passed, failed, etc.
State SpecState
@@ -198,48 +261,52 @@ type SpecReport struct {
func (report SpecReport) MarshalJSON() ([]byte, error) {
//All this to avoid emitting an empty Failure struct in the JSON
out := struct {
- ContainerHierarchyTexts []string
- ContainerHierarchyLocations []CodeLocation
- ContainerHierarchyLabels [][]string
- LeafNodeType NodeType
- LeafNodeLocation CodeLocation
- LeafNodeLabels []string
- LeafNodeText string
- State SpecState
- StartTime time.Time
- EndTime time.Time
- RunTime time.Duration
- ParallelProcess int
- Failure *Failure `json:",omitempty"`
- NumAttempts int
- MaxFlakeAttempts int
- MaxMustPassRepeatedly int
- CapturedGinkgoWriterOutput string `json:",omitempty"`
- CapturedStdOutErr string `json:",omitempty"`
- ReportEntries ReportEntries `json:",omitempty"`
- ProgressReports []ProgressReport `json:",omitempty"`
- AdditionalFailures []AdditionalFailure `json:",omitempty"`
- SpecEvents SpecEvents `json:",omitempty"`
+ ContainerHierarchyTexts []string
+ ContainerHierarchyLocations []CodeLocation
+ ContainerHierarchyLabels [][]string
+ ContainerHierarchySemVerConstraints [][]string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeSemVerConstraints []string
+ LeafNodeText string
+ State SpecState
+ StartTime time.Time
+ EndTime time.Time
+ RunTime time.Duration
+ ParallelProcess int
+ Failure *Failure `json:",omitempty"`
+ NumAttempts int
+ MaxFlakeAttempts int
+ MaxMustPassRepeatedly int
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ CapturedStdOutErr string `json:",omitempty"`
+ ReportEntries ReportEntries `json:",omitempty"`
+ ProgressReports []ProgressReport `json:",omitempty"`
+ AdditionalFailures []AdditionalFailure `json:",omitempty"`
+ SpecEvents SpecEvents `json:",omitempty"`
}{
- ContainerHierarchyTexts: report.ContainerHierarchyTexts,
- ContainerHierarchyLocations: report.ContainerHierarchyLocations,
- ContainerHierarchyLabels: report.ContainerHierarchyLabels,
- LeafNodeType: report.LeafNodeType,
- LeafNodeLocation: report.LeafNodeLocation,
- LeafNodeLabels: report.LeafNodeLabels,
- LeafNodeText: report.LeafNodeText,
- State: report.State,
- StartTime: report.StartTime,
- EndTime: report.EndTime,
- RunTime: report.RunTime,
- ParallelProcess: report.ParallelProcess,
- Failure: nil,
- ReportEntries: nil,
- NumAttempts: report.NumAttempts,
- MaxFlakeAttempts: report.MaxFlakeAttempts,
- MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
- CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
- CapturedStdOutErr: report.CapturedStdOutErr,
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ ContainerHierarchyLocations: report.ContainerHierarchyLocations,
+ ContainerHierarchyLabels: report.ContainerHierarchyLabels,
+ ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints,
+ LeafNodeType: report.LeafNodeType,
+ LeafNodeLocation: report.LeafNodeLocation,
+ LeafNodeLabels: report.LeafNodeLabels,
+ LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints,
+ LeafNodeText: report.LeafNodeText,
+ State: report.State,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ RunTime: report.RunTime,
+ ParallelProcess: report.ParallelProcess,
+ Failure: nil,
+ ReportEntries: nil,
+ NumAttempts: report.NumAttempts,
+ MaxFlakeAttempts: report.MaxFlakeAttempts,
+ MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
+ CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
+ CapturedStdOutErr: report.CapturedStdOutErr,
}
if !report.Failure.IsZero() {
@@ -287,6 +354,9 @@ func (report SpecReport) FullText() string {
if report.LeafNodeText != "" {
texts = append(texts, report.LeafNodeText)
}
+ texts = slices.DeleteFunc(texts, func(t string) bool {
+ return t == ""
+ })
return strings.Join(texts, " ")
}
@@ -312,6 +382,28 @@ func (report SpecReport) Labels() []string {
return out
}
+// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints.
+func (report SpecReport) SemVerConstraints() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints {
+ for _, semVerConstraint := range semVerConstraints {
+ if !seen[semVerConstraint] {
+ seen[semVerConstraint] = true
+ out = append(out, semVerConstraint)
+ }
+ }
+ }
+ for _, semVerConstraint := range report.LeafNodeSemVerConstraints {
+ if !seen[semVerConstraint] {
+ seen[semVerConstraint] = true
+ out = append(out, semVerConstraint)
+ }
+ }
+
+ return out
+}
+
// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
filter, err := ParseLabelFilter(query)
@@ -321,6 +413,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
return filter(report.Labels()), nil
}
+// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query
+func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) {
+ filter, err := ParseSemVerFilter(version)
+ if err != nil {
+ return false, err
+ }
+ return filter(report.SemVerConstraints()), nil
+}
+
// FileName() returns the name of the file containing the spec
func (report SpecReport) FileName() string {
return report.LeafNodeLocation.FileName
diff --git a/operator/vendor/github.com/onsi/ginkgo/v2/types/version.go b/operator/vendor/github.com/onsi/ginkgo/v2/types/version.go
index 879e1d86..b9c1ea98 100644
--- a/operator/vendor/github.com/onsi/ginkgo/v2/types/version.go
+++ b/operator/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -1,3 +1,3 @@
package types
-const VERSION = "2.22.2"
+const VERSION = "2.27.2"
diff --git a/operator/vendor/github.com/onsi/gomega/CHANGELOG.md b/operator/vendor/github.com/onsi/gomega/CHANGELOG.md
index a20d997c..b7d7309f 100644
--- a/operator/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/operator/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,53 @@
+## 1.38.2
+
+- roll back to go 1.23.0 [c404969]
+
+## 1.38.1
+
+### Fixes
+
+Numerous minor fixes and dependency bumps
+
+## 1.38.0
+
+### Features
+- gstruct handles extra unexported fields [4ee7ed0]
+
+### Fixes
+- support [] in IgnoringTopFunction function signatures (#851) [36bbf72]
+
+### Maintenance
+- Bump golang.org/x/net from 0.40.0 to 0.41.0 (#846) [529d408]
+- Fix typo [acd1f55]
+- Bump google.golang.org/protobuf from 1.36.5 to 1.36.6 (#835) [bae65a0]
+- Bump nokogiri from 1.18.4 to 1.18.8 in /docs (#842) [8dda91f]
+- Bump golang.org/x/net from 0.39.0 to 0.40.0 (#843) [212d812]
+- Bump github.com/onsi/ginkgo/v2 from 2.23.3 to 2.23.4 (#839) [59bd7f9]
+- Bump nokogiri from 1.18.1 to 1.18.4 in /docs (#834) [328c729]
+- Bump uri from 1.0.2 to 1.0.3 in /docs (#826) [9a798a1]
+- Bump golang.org/x/net from 0.37.0 to 0.39.0 (#841) [04a72c6]
+
+## 1.37.0
+
+### Features
+- add To/ToNot/NotTo aliases for AsyncAssertion [5666f98]
+
+## 1.36.3
+
+### Maintenance
+
+- bump all the things [adb8b49]
+- chore: replace `interface{}` with `any` [7613216]
+- Bump google.golang.org/protobuf from 1.36.1 to 1.36.5 (#822) [9fe5259]
+- remove spurious "toolchain" from go.mod (#819) [a0e85b9]
+- Bump golang.org/x/net from 0.33.0 to 0.35.0 (#823) [604a8b1]
+- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#772) [36fbc84]
+- Bump github-pages from 231 to 232 in /docs (#778) [ced70d7]
+- Bump rexml from 3.2.6 to 3.3.9 in /docs (#788) [c8b4a07]
+- Bump github.com/onsi/ginkgo/v2 from 2.22.1 to 2.22.2 (#812) [06431b9]
+- Bump webrick from 1.8.1 to 1.9.1 in /docs (#800) [b55a92d]
+- Fix typos (#813) [a1d518b]
+
## 1.36.2
### Maintenance
@@ -322,7 +372,7 @@ Require Go 1.22+
### Features
-Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
+Introducing [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable.
@@ -461,7 +511,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/
- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
- Fix for Go 1.18 (#532) [56d2a29]
-- Document precendence of timeouts (#533) [b607941]
+- Document precedence of timeouts (#533) [b607941]
## 1.18.1
@@ -478,7 +528,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/
## Fixes
- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
-## Maintenace
+## Maintenance
- Remove Travis workflow (#491) [72e6040]
- Upgrade to Ginkgo 2.0.0 GA [f383637]
- chore: fix description of HaveField matcher (#487) [2b4b2c0]
@@ -726,7 +776,7 @@ Improvements:
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
-- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
+- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShouldNot(Receive()) always passes with a closed channel.
- Added `HavePrefix` and `HaveSuffix` matchers.
- `ghttp` can now handle concurrent requests.
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
@@ -736,7 +786,7 @@ Improvements:
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
-- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time.
+- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the allotted time.
- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer.
Bug Fixes:
@@ -781,7 +831,7 @@ New Matchers:
Updated Matchers:
-- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
+- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an object that satisfies the passed-in matcher.
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
Misc:
diff --git a/operator/vendor/github.com/onsi/gomega/format/format.go b/operator/vendor/github.com/onsi/gomega/format/format.go
index 6c168063..96f04b21 100644
--- a/operator/vendor/github.com/onsi/gomega/format/format.go
+++ b/operator/vendor/github.com/onsi/gomega/format/format.go
@@ -57,7 +57,7 @@ var Indent = " "
var longFormThreshold = 20
-// GomegaStringer allows for custom formating of objects for gomega.
+// GomegaStringer allows for custom formatting of objects for gomega.
type GomegaStringer interface {
// GomegaString will be used to custom format an object.
// It does not follow UseStringerRepresentation value and will always be called regardless.
@@ -73,7 +73,7 @@ If the CustomFormatter does not want to handle the object it should return ("",
Strings returned by CustomFormatters are not truncated
*/
-type CustomFormatter func(value interface{}) (string, bool)
+type CustomFormatter func(value any) (string, bool)
type CustomFormatterKey uint
var customFormatterKey CustomFormatterKey = 1
@@ -125,7 +125,7 @@ If expected is omitted, then the message looks like:
*/
-func Message(actual interface{}, message string, expected ...interface{}) string {
+func Message(actual any, message string, expected ...any) string {
if len(expected) == 0 {
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
}
@@ -255,7 +255,7 @@ recursing into the object.
Set PrintContextObjects to true to print the content of objects implementing context.Context
*/
-func Object(object interface{}, indentation uint) string {
+func Object(object any, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
commonRepresentation := ""
@@ -392,7 +392,7 @@ func formatValue(value reflect.Value, indentation uint) string {
}
}
-func formatString(object interface{}, indentation uint) string {
+func formatString(object any, indentation uint) string {
if indentation == 1 {
s := fmt.Sprintf("%s", object)
components := strings.Split(s, "\n")
diff --git a/operator/vendor/github.com/onsi/gomega/gomega_dsl.go b/operator/vendor/github.com/onsi/gomega/gomega_dsl.go
index 9a028f3f..fdba34ee 100644
--- a/operator/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/operator/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.36.2"
+const GOMEGA_VERSION = "1.38.2"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
@@ -178,7 +178,7 @@ func ensureDefaultGomegaIsConfigured() {
// All subsequent arguments will be required to be nil/zero.
//
// This is convenient if you want to make an assertion on a method/function that returns
-// a value and an error - a common patter in Go.
+// a value and an error - a common pattern in Go.
//
// For example, given a function with signature:
//
@@ -191,7 +191,7 @@ func ensureDefaultGomegaIsConfigured() {
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Ω and Expect are identical
-func Ω(actual interface{}, extra ...interface{}) Assertion {
+func Ω(actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.Ω(actual, extra...)
}
@@ -217,7 +217,7 @@ func Ω(actual interface{}, extra ...interface{}) Assertion {
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Expect and Ω are identical
-func Expect(actual interface{}, extra ...interface{}) Assertion {
+func Expect(actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.Expect(actual, extra...)
}
@@ -233,7 +233,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion {
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
// set the first argument of `ExpectWithOffset` appropriately.
-func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
+func ExpectWithOffset(offset int, actual any, extra ...any) Assertion {
ensureDefaultGomegaIsConfigured()
return Default.ExpectWithOffset(offset, actual, extra...)
}
@@ -319,19 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in
Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
}, SpecTimeout(time.Second))
-Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
+Either way the context passed to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example:
Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17))
-now either the context cacnellation or the timeout will cause Eventually to stop polling.
+now either the context cancellation or the timeout will cause Eventually to stop polling.
If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call:
EnforceDefaultTimeoutsWhenUsingContexts()
-in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses.
+in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if either the context is cancelled or the default timeout elapses.
**Category 3: Making assertions _in_ the function passed into Eventually**
@@ -390,7 +390,7 @@ is equivalent to
Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
*/
-func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func Eventually(actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.Eventually(actualOrCtx, args...)
}
@@ -404,7 +404,7 @@ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
// the same as `Eventually(...).WithOffset(...).WithTimeout` or
// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
-func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
}
@@ -424,7 +424,7 @@ Consistently is useful in cases where you want to assert that something *does no
This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
*/
-func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func Consistently(actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.Consistently(actualOrCtx, args...)
}
@@ -435,13 +435,13 @@ func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
//
// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
// optional `WithTimeout` and `WithPolling`.
-func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+func ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion {
ensureDefaultGomegaIsConfigured()
return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
}
/*
-StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
+StopTrying can be used to signal to Eventually and Consistently that they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution.
diff --git a/operator/vendor/github.com/onsi/gomega/internal/assertion.go b/operator/vendor/github.com/onsi/gomega/internal/assertion.go
index 08356a61..cc846e7c 100644
--- a/operator/vendor/github.com/onsi/gomega/internal/assertion.go
+++ b/operator/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -9,19 +9,19 @@ import (
)
type Assertion struct {
- actuals []interface{} // actual value plus all extra values
- actualIndex int // value to pass to the matcher
- vet vetinari // the vet to call before calling Gomega matcher
+ actuals []any // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
offset int
g *Gomega
}
// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
-type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+type vetinari func(assertion *Assertion, optionalDescription ...any) bool
-func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
+func NewAssertion(actualInput any, g *Gomega, offset int, extra ...any) *Assertion {
return &Assertion{
- actuals: append([]interface{}{actualInput}, extra...),
+ actuals: append([]any{actualInput}, extra...),
actualIndex: 0,
vet: (*Assertion).vetActuals,
offset: offset,
@@ -44,37 +44,37 @@ func (assertion *Assertion) Error() types.Assertion {
}
}
-func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Assertion", optionalDescription...)
return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+func (assertion *Assertion) buildDescription(optionalDescription ...any) string {
switch len(optionalDescription) {
case 0:
return ""
@@ -86,7 +86,7 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{})
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool {
actualInput := assertion.actuals[assertion.actualIndex]
matches, err := matcher.Match(actualInput)
assertion.g.THelper()
@@ -113,7 +113,7 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool
// vetActuals vets the actual values, with the (optional) exception of a
// specific value, such as the first value in case non-error assertions, or the
// last value in case of Error()-based assertions.
-func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+func (assertion *Assertion) vetActuals(optionalDescription ...any) bool {
success, message := vetActuals(assertion.actuals, assertion.actualIndex)
if success {
return true
@@ -129,7 +129,7 @@ func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool
// the final error value is non-zero. Otherwise, it doesn't vet the actual
// values, as these are allowed to take on any values unless there is a non-zero
// error value.
-func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+func (assertion *Assertion) vetError(optionalDescription ...any) bool {
if err := assertion.actuals[assertion.actualIndex]; err != nil {
// Go error result idiom: all other actual values must be zero values.
return assertion.vetActuals(optionalDescription...)
@@ -139,7 +139,7 @@ func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
// vetActuals vets a slice of actual values, optionally skipping a particular
// value slice element, such as the first or last value slice element.
-func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+func vetActuals(actuals []any, skipIndex int) (bool, string) {
for i, actual := range actuals {
if i == skipIndex {
continue
diff --git a/operator/vendor/github.com/onsi/gomega/internal/async_assertion.go b/operator/vendor/github.com/onsi/gomega/internal/async_assertion.go
index 8b4cd1f5..4121505b 100644
--- a/operator/vendor/github.com/onsi/gomega/internal/async_assertion.go
+++ b/operator/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -69,8 +69,8 @@ type AsyncAssertion struct {
asyncType AsyncAssertionType
actualIsFunc bool
- actual interface{}
- argsToForward []interface{}
+ actual any
+ argsToForward []any
timeoutInterval time.Duration
pollingInterval time.Duration
@@ -80,7 +80,7 @@ type AsyncAssertion struct {
g *Gomega
}
-func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
+func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput any, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
out := &AsyncAssertion{
asyncType: asyncType,
timeoutInterval: timeoutInterval,
@@ -129,7 +129,7 @@ func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAss
return assertion
}
-func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion {
+func (assertion *AsyncAssertion) WithArguments(argsToForward ...any) types.AsyncAssertion {
assertion.argsToForward = argsToForward
return assertion
}
@@ -139,19 +139,31 @@ func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssert
return assertion
}
-func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, true, optionalDescription...)
}
-func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool {
+ return assertion.Should(matcher, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
assertion.g.THelper()
vetOptionalDescription("Asynchronous assertion", optionalDescription...)
return assertion.match(matcher, false, optionalDescription...)
}
-func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+func (assertion *AsyncAssertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool {
+ return assertion.ShouldNot(matcher, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool {
+ return assertion.ShouldNot(matcher, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...any) string {
switch len(optionalDescription) {
case 0:
return ""
@@ -163,7 +175,7 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
}
-func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) {
+func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (any, error) {
if len(values) == 0 {
return nil, &asyncPolledActualError{
message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType),
@@ -224,7 +236,7 @@ func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvid
if numProvided == 1 {
have = "has"
}
- return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments.
+ return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the correct set of arguments.
You can learn more at https://onsi.github.io/gomega/#eventually
`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType)
@@ -237,9 +249,9 @@ You can learn more at https://onsi.github.io/gomega/#eventually
`, assertion.asyncType, reason)
}
-func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) {
+func (assertion *AsyncAssertion) buildActualPoller() (func() (any, error), error) {
if !assertion.actualIsFunc {
- return func() (interface{}, error) { return assertion.actual, nil }, nil
+ return func() (any, error) { return assertion.actual, nil }, nil
}
actualValue := reflect.ValueOf(assertion.actual)
actualType := reflect.TypeOf(assertion.actual)
@@ -301,7 +313,7 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error
return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1")
}
- return func() (actual interface{}, err error) {
+ return func() (actual any, err error) {
var values []reflect.Value
assertionFailure = nil
defer func() {
@@ -354,14 +366,14 @@ func (assertion *AsyncAssertion) afterPolling() <-chan time.Time {
}
}
-func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool {
+func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value any) bool {
if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) {
return false
}
return true
}
-func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) {
+func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value any) (matches bool, err error) {
defer func() {
if e := recover(); e != nil {
if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
@@ -377,13 +389,13 @@ func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value
return
}
-func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool {
timer := time.Now()
timeout := assertion.afterTimeout()
lock := sync.Mutex{}
var matches, hasLastValidActual bool
- var actual, lastValidActual interface{}
+ var actual, lastValidActual any
var actualErr, matcherErr error
var oracleMatcherSaysStop bool
@@ -440,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch
}
} else {
var fgErr formattedGomegaError
- if errors.As(actualErr, &fgErr) {
+ if errors.As(matcherErr, &fgErr) {
message += fgErr.FormattedGomegaError() + "\n"
} else {
message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr)
diff --git a/operator/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/operator/vendor/github.com/onsi/gomega/internal/duration_bundle.go
index 2e026c33..1019deb8 100644
--- a/operator/vendor/github.com/onsi/gomega/internal/duration_bundle.go
+++ b/operator/vendor/github.com/onsi/gomega/internal/duration_bundle.go
@@ -49,7 +49,7 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration {
return duration
}
-func toDuration(input interface{}) (time.Duration, error) {
+func toDuration(input any) (time.Duration, error) {
duration, ok := input.(time.Duration)
if ok {
return duration, nil
diff --git a/operator/vendor/github.com/onsi/gomega/internal/gomega.go b/operator/vendor/github.com/onsi/gomega/internal/gomega.go
index c6e2fcc0..66dfe7d0 100644
--- a/operator/vendor/github.com/onsi/gomega/internal/gomega.go
+++ b/operator/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -40,45 +40,45 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
return g
}
-func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) Ω(actual any, extra ...any) types.Assertion {
return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) Expect(actual any, extra ...any) types.Assertion {
return g.ExpectWithOffset(0, actual, extra...)
}
-func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
+func (g *Gomega) ExpectWithOffset(offset int, actual any, extra ...any) types.Assertion {
return NewAssertion(actual, g, offset, extra...)
}
-func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) Eventually(actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
}
-func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
}
-func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) Consistently(actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
}
-func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
}
-func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx any, args ...any) types.AsyncAssertion {
baseOffset := 3
timeoutInterval := -time.Duration(1)
pollingInterval := -time.Duration(1)
- intervals := []interface{}{}
+ intervals := []any{}
var ctx context.Context
actual := actualOrCtx
startingIndex := 0
if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
- // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
+ // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argument **and** the second argument is not a parseable duration
// this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
if _, err := toDuration(args[0]); err != nil {
ctx = actualOrCtx.(context.Context)
diff --git a/operator/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/operator/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
index 3a4f7ddd..450c4033 100644
--- a/operator/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
+++ b/operator/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
@@ -100,7 +100,7 @@ func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration {
return s.duration
}
-func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) {
+func AsPollingSignalError(actual any) (*PollingSignalErrorImpl, bool) {
if actual == nil {
return nil, false
}
diff --git a/operator/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/operator/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
index f2958764..b748de41 100644
--- a/operator/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
+++ b/operator/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
@@ -10,7 +10,7 @@ import (
// Gomega matcher at the beginning it panics. This allows for rendering Gomega
// matchers as part of an optional Description, as long as they're not in the
// first slot.
-func vetOptionalDescription(assertion string, optionalDescription ...interface{}) {
+func vetOptionalDescription(assertion string, optionalDescription ...any) {
if len(optionalDescription) == 0 {
return
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers.go b/operator/vendor/github.com/onsi/gomega/matchers.go
index 7ef27dc9..10b6693f 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers.go
@@ -12,7 +12,7 @@ import (
// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
// types when performing comparisons.
// It is an error for both actual and expected to be nil. Use BeNil() instead.
-func Equal(expected interface{}) types.GomegaMatcher {
+func Equal(expected any) types.GomegaMatcher {
return &matchers.EqualMatcher{
Expected: expected,
}
@@ -22,7 +22,7 @@ func Equal(expected interface{}) types.GomegaMatcher {
// This is done by converting actual to have the type of expected before
// attempting equality with reflect.DeepEqual.
// It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
+func BeEquivalentTo(expected any) types.GomegaMatcher {
return &matchers.BeEquivalentToMatcher{
Expected: expected,
}
@@ -31,7 +31,7 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison.
// You can pass cmp.Option as options.
// It is an error for actual and expected to be nil. Use BeNil() instead.
-func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher {
+func BeComparableTo(expected any, opts ...cmp.Option) types.GomegaMatcher {
return &matchers.BeComparableToMatcher{
Expected: expected,
Options: opts,
@@ -41,7 +41,7 @@ func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatche
// BeIdenticalTo uses the == operator to compare actual with expected.
// BeIdenticalTo is strict about types when performing comparisons.
// It is an error for both actual and expected to be nil. Use BeNil() instead.
-func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
+func BeIdenticalTo(expected any) types.GomegaMatcher {
return &matchers.BeIdenticalToMatcher{
Expected: expected,
}
@@ -139,7 +139,7 @@ func Succeed() types.GomegaMatcher {
// Error interface
//
// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases.
-func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher {
+func MatchError(expected any, functionErrorDescription ...any) types.GomegaMatcher {
return &matchers.MatchErrorMatcher{
Expected: expected,
FuncErrDescription: functionErrorDescription,
@@ -202,11 +202,11 @@ func BeClosed() types.GomegaMatcher {
// Expect(myThing.IsValid()).Should(BeTrue())
//
// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
-// you can pass a pointer to a variable of the approriate type first, and second a matcher:
+// you can pass a pointer to a variable of the appropriate type first, and second a matcher:
//
// var myThing thing
// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
-func Receive(args ...interface{}) types.GomegaMatcher {
+func Receive(args ...any) types.GomegaMatcher {
return &matchers.ReceiveMatcher{
Args: args,
}
@@ -224,7 +224,7 @@ func Receive(args ...interface{}) types.GomegaMatcher {
//
// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
-func BeSent(arg interface{}) types.GomegaMatcher {
+func BeSent(arg any) types.GomegaMatcher {
return &matchers.BeSentMatcher{
Arg: arg,
}
@@ -233,7 +233,7 @@ func BeSent(arg interface{}) types.GomegaMatcher {
// MatchRegexp succeeds if actual is a string or stringer that matches the
// passed-in regexp. Optional arguments can be provided to construct a regexp
// via fmt.Sprintf().
-func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
+func MatchRegexp(regexp string, args ...any) types.GomegaMatcher {
return &matchers.MatchRegexpMatcher{
Regexp: regexp,
Args: args,
@@ -243,7 +243,7 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
// ContainSubstring succeeds if actual is a string or stringer that contains the
// passed-in substring. Optional arguments can be provided to construct the substring
// via fmt.Sprintf().
-func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
+func ContainSubstring(substr string, args ...any) types.GomegaMatcher {
return &matchers.ContainSubstringMatcher{
Substr: substr,
Args: args,
@@ -253,7 +253,7 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
// HavePrefix succeeds if actual is a string or stringer that contains the
// passed-in string as a prefix. Optional arguments can be provided to construct
// via fmt.Sprintf().
-func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
+func HavePrefix(prefix string, args ...any) types.GomegaMatcher {
return &matchers.HavePrefixMatcher{
Prefix: prefix,
Args: args,
@@ -263,7 +263,7 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
// HaveSuffix succeeds if actual is a string or stringer that contains the
// passed-in string as a suffix. Optional arguments can be provided to construct
// via fmt.Sprintf().
-func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
+func HaveSuffix(suffix string, args ...any) types.GomegaMatcher {
return &matchers.HaveSuffixMatcher{
Suffix: suffix,
Args: args,
@@ -273,7 +273,7 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
// MatchJSON succeeds if actual is a string or stringer of JSON that matches
// the expected JSON. The JSONs are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchJSON(json interface{}) types.GomegaMatcher {
+func MatchJSON(json any) types.GomegaMatcher {
return &matchers.MatchJSONMatcher{
JSONToMatch: json,
}
@@ -282,7 +282,7 @@ func MatchJSON(json interface{}) types.GomegaMatcher {
// MatchXML succeeds if actual is a string or stringer of XML that matches
// the expected XML. The XMLs are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like whitespaces shouldn't matter.
-func MatchXML(xml interface{}) types.GomegaMatcher {
+func MatchXML(xml any) types.GomegaMatcher {
return &matchers.MatchXMLMatcher{
XMLToMatch: xml,
}
@@ -291,7 +291,7 @@ func MatchXML(xml interface{}) types.GomegaMatcher {
// MatchYAML succeeds if actual is a string or stringer of YAML that matches
// the expected YAML. The YAML's are decoded and the resulting objects are compared via
// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
-func MatchYAML(yaml interface{}) types.GomegaMatcher {
+func MatchYAML(yaml any) types.GomegaMatcher {
return &matchers.MatchYAMLMatcher{
YAMLToMatch: yaml,
}
@@ -338,7 +338,7 @@ func BeZero() types.GomegaMatcher {
//
// var findings []string
// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
-func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
+func ContainElement(element any, result ...any) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
Result: result,
@@ -358,7 +358,7 @@ func ContainElement(element interface{}, result ...interface{}) types.GomegaMatc
// Expect(2).Should(BeElementOf(1, 2))
//
// Actual must be typed.
-func BeElementOf(elements ...interface{}) types.GomegaMatcher {
+func BeElementOf(elements ...any) types.GomegaMatcher {
return &matchers.BeElementOfMatcher{
Elements: elements,
}
@@ -368,7 +368,7 @@ func BeElementOf(elements ...interface{}) types.GomegaMatcher {
// BeKeyOf() always uses Equal() to perform the match between actual and the map keys.
//
// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false}))
-func BeKeyOf(element interface{}) types.GomegaMatcher {
+func BeKeyOf(element any) types.GomegaMatcher {
return &matchers.BeKeyOfMatcher{
Map: element,
}
@@ -388,14 +388,14 @@ func BeKeyOf(element interface{}) types.GomegaMatcher {
//
// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
//
-// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
-func ConsistOf(elements ...interface{}) types.GomegaMatcher {
+// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []any are different types - hence the need for this special rule.
+func ConsistOf(elements ...any) types.GomegaMatcher {
return &matchers.ConsistOfMatcher{
Elements: elements,
}
}
-// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter.
+// HaveExactElements succeeds if actual contains elements that precisely match the elements passed into the matcher. The ordering of the elements does matter.
// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
//
// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar"))
@@ -403,7 +403,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher {
// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo")))
//
// Actual must be an array or slice.
-func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
+func HaveExactElements(elements ...any) types.GomegaMatcher {
return &matchers.HaveExactElementsMatcher{
Elements: elements,
}
@@ -417,7 +417,7 @@ func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
//
// Actual must be an array, slice or map.
// For maps, ContainElements searches through the map's values.
-func ContainElements(elements ...interface{}) types.GomegaMatcher {
+func ContainElements(elements ...any) types.GomegaMatcher {
return &matchers.ContainElementsMatcher{
Elements: elements,
}
@@ -432,7 +432,7 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher {
//
// Actual must be an array, slice or map.
// For maps, HaveEach searches through the map's values.
-func HaveEach(element interface{}) types.GomegaMatcher {
+func HaveEach(element any) types.GomegaMatcher {
return &matchers.HaveEachMatcher{
Element: element,
}
@@ -443,7 +443,7 @@ func HaveEach(element interface{}) types.GomegaMatcher {
// matcher can be passed in instead:
//
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
-func HaveKey(key interface{}) types.GomegaMatcher {
+func HaveKey(key any) types.GomegaMatcher {
return &matchers.HaveKeyMatcher{
Key: key,
}
@@ -455,7 +455,7 @@ func HaveKey(key interface{}) types.GomegaMatcher {
//
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
-func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
+func HaveKeyWithValue(key any, value any) types.GomegaMatcher {
return &matchers.HaveKeyWithValueMatcher{
Key: key,
Value: value,
@@ -483,7 +483,7 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
-func HaveField(field string, expected interface{}) types.GomegaMatcher {
+func HaveField(field string, expected any) types.GomegaMatcher {
return &matchers.HaveFieldMatcher{
Field: field,
Expected: expected,
@@ -535,7 +535,7 @@ func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
// Expect(1.0).Should(BeNumerically(">=", 1.0))
// Expect(1.0).Should(BeNumerically("<", 3))
// Expect(1.0).Should(BeNumerically("<=", 1.0))
-func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
+func BeNumerically(comparator string, compareTo ...any) types.GomegaMatcher {
return &matchers.BeNumericallyMatcher{
Comparator: comparator,
CompareTo: compareTo,
@@ -562,7 +562,7 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura
// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
-func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
+func BeAssignableToTypeOf(expected any) types.GomegaMatcher {
return &matchers.AssignableToTypeOfMatcher{
Expected: expected,
}
@@ -581,7 +581,7 @@ func Panic() types.GomegaMatcher {
// matcher can be passed in instead:
//
// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
-func PanicWith(expected interface{}) types.GomegaMatcher {
+func PanicWith(expected any) types.GomegaMatcher {
return &matchers.PanicMatcher{Expected: expected}
}
@@ -610,7 +610,7 @@ func BeADirectory() types.GomegaMatcher {
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
-func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
+func HaveHTTPStatus(expected ...any) types.GomegaMatcher {
return &matchers.HaveHTTPStatusMatcher{Expected: expected}
}
@@ -618,7 +618,7 @@ func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
// Actual must be either a *http.Response or *httptest.ResponseRecorder.
// Expected must be a string header name, followed by a header value which
// can be a string, or another matcher.
-func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher {
+func HaveHTTPHeaderWithValue(header string, value any) types.GomegaMatcher {
return &matchers.HaveHTTPHeaderWithValueMatcher{
Header: header,
Value: value,
@@ -628,7 +628,7 @@ func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatch
// HaveHTTPBody matches if the body matches.
// Actual must be either a *http.Response or *httptest.ResponseRecorder.
// Expected must be either a string, []byte, or other matcher
-func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
+func HaveHTTPBody(expected any) types.GomegaMatcher {
return &matchers.HaveHTTPBodyMatcher{Expected: expected}
}
@@ -687,15 +687,15 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
// Expect(1).To(WithTransform(failingplus1, Equal(2)))
//
// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
-func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
+func WithTransform(transform any, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
}
// Satisfy matches the actual value against the `predicate` function.
-// The given predicate must be a function of one paramter that returns bool.
+// The given predicate must be a function of one parameter that returns bool.
//
// var isEven = func(i int) bool { return i%2 == 0 }
// Expect(2).To(Satisfy(isEven))
-func Satisfy(predicate interface{}) types.GomegaMatcher {
+func Satisfy(predicate any) types.GomegaMatcher {
return matchers.NewSatisfyMatcher(predicate)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/and.go b/operator/vendor/github.com/onsi/gomega/matchers/and.go
index 6bd826ad..db48e90b 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/and.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/and.go
@@ -14,7 +14,7 @@ type AndMatcher struct {
firstFailedMatcher types.GomegaMatcher
}
-func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *AndMatcher) Match(actual any) (success bool, err error) {
m.firstFailedMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -26,16 +26,16 @@ func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
return true, nil
}
-func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *AndMatcher) FailureMessage(actual any) (message string) {
return m.firstFailedMatcher.FailureMessage(actual)
}
-func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *AndMatcher) NegatedFailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
}
-func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *AndMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
index be483952..a100e5c0 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
@@ -10,10 +10,10 @@ import (
)
type AssignableToTypeOfMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *AssignableToTypeOfMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
} else if matcher.Expected == nil {
@@ -28,10 +28,10 @@ func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success boo
return actualType.AssignableTo(expectedType), nil
}
-func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
+func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual any) string {
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
}
-func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
+func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual any) string {
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/operator/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
index 93d4497c..1d823604 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -24,11 +24,11 @@ func (t notADirectoryError) Error() string {
}
type BeADirectoryMatcher struct {
- expected interface{}
+ expected any
err error
}
-func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeADirectoryMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
@@ -47,10 +47,10 @@ func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err
return true, nil
}
-func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeADirectoryMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
}
-func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not be a directory")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/operator/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
index 8fefc4de..3e53d628 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -24,11 +24,11 @@ func (t notARegularFileError) Error() string {
}
type BeARegularFileMatcher struct {
- expected interface{}
+ expected any
err error
}
-func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeARegularFileMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
@@ -47,10 +47,10 @@ func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, e
return true, nil
}
-func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeARegularFileMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
}
-func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not be a regular file")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/operator/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
index e2bdd281..04f156db 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -10,10 +10,10 @@ import (
)
type BeAnExistingFileMatcher struct {
- expected interface{}
+ expected any
}
-func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeAnExistingFileMatcher) Match(actual any) (success bool, err error) {
actualFilename, ok := actual.(string)
if !ok {
return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
@@ -31,10 +31,10 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool,
return true, nil
}
-func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeAnExistingFileMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to exist")
}
-func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to exist")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
index f13c2449..4319dde4 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
@@ -12,7 +12,7 @@ import (
type BeClosedMatcher struct {
}
-func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeClosedMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -39,10 +39,10 @@ func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err err
return closed, nil
}
-func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeClosedMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be closed")
}
-func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeClosedMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to be open")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
index 4e389785..ce74eee4 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
@@ -2,6 +2,7 @@ package matchers
import (
"bytes"
+ "errors"
"fmt"
"github.com/google/go-cmp/cmp"
@@ -9,11 +10,11 @@ import (
)
type BeComparableToMatcher struct {
- Expected interface{}
+ Expected any
Options cmp.Options
}
-func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m
if err, ok := r.(error); ok {
matchErr = err
} else if errMsg, ok := r.(string); ok {
- matchErr = fmt.Errorf(errMsg)
+ matchErr = errors.New(errMsg)
}
}
}()
@@ -40,10 +41,10 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m
return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil
}
-func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeComparableToMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...))
}
-func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be comparable to", matcher.Expected)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
index 9ee75a5d..406fe548 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
@@ -10,10 +10,10 @@ import (
)
type BeElementOfMatcher struct {
- Elements []interface{}
+ Elements []any
}
-func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeElementOfMatcher) Match(actual any) (success bool, err error) {
if reflect.TypeOf(actual) == nil {
return false, fmt.Errorf("BeElement matcher expects actual to be typed")
}
@@ -34,10 +34,10 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
return false, lastError
}
-func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeElementOfMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be an element of", presentable(matcher.Elements))
}
-func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be an element of", presentable(matcher.Elements))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
index bd7f0b96..e9e0644f 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
@@ -13,7 +13,7 @@ import (
type BeEmptyMatcher struct {
}
-func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeEmptyMatcher) Match(actual any) (success bool, err error) {
// short-circuit the iterator case, as we only need to see the first
// element, if any.
if miter.IsIter(actual) {
@@ -34,10 +34,10 @@ func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err erro
return length == 0, nil
}
-func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeEmptyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be empty")
}
-func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be empty")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
index 263627f4..37b3080b 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
@@ -10,10 +10,10 @@ import (
)
type BeEquivalentToMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeEquivalentToMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Both actual and expected must not be nil.")
}
@@ -27,10 +27,10 @@ func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, e
return reflect.DeepEqual(convertedActual, matcher.Expected), nil
}
-func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeEquivalentToMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be equivalent to", matcher.Expected)
}
-func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be equivalent to", matcher.Expected)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
index 8ee2b1c5..55e86951 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -12,7 +12,7 @@ type BeFalseMatcher struct {
Reason string
}
-func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeFalseMatcher) Match(actual any) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
@@ -20,7 +20,7 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro
return actual == false, nil
}
-func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeFalseMatcher) FailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "to be false")
} else {
@@ -28,7 +28,7 @@ func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message strin
}
}
-func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeFalseMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "not to be false")
} else {
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/operator/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
index 631ce11e..579aa41b 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_identical_to.go
@@ -10,10 +10,10 @@ import (
)
type BeIdenticalToMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+func (matcher *BeIdenticalToMatcher) Match(actual any) (success bool, matchErr error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -30,10 +30,10 @@ func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, ma
return actual == matcher.Expected, nil
}
-func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string {
+func (matcher *BeIdenticalToMatcher) FailureMessage(actual any) string {
return format.Message(actual, "to be identical to", matcher.Expected)
}
-func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string {
+func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual any) string {
return format.Message(actual, "not to be identical to", matcher.Expected)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
index 449a291e..3fff3df7 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
@@ -8,10 +8,10 @@ import (
)
type BeKeyOfMatcher struct {
- Map interface{}
+ Map any
}
-func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeKeyOfMatcher) Match(actual any) (success bool, err error) {
if !isMap(matcher.Map) {
return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type")
}
@@ -36,10 +36,10 @@ func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err erro
return false, lastError
}
-func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeKeyOfMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map)))
}
-func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map)))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
index 551d99d7..cab37f4f 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
@@ -7,14 +7,14 @@ import "github.com/onsi/gomega/format"
type BeNilMatcher struct {
}
-func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeNilMatcher) Match(actual any) (success bool, err error) {
return isNil(actual), nil
}
-func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeNilMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be nil")
}
-func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeNilMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be nil")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
index 100735de..7e6ce154 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
@@ -11,18 +11,18 @@ import (
type BeNumericallyMatcher struct {
Comparator string
- CompareTo []interface{}
+ CompareTo []any
}
-func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeNumericallyMatcher) FailureMessage(actual any) (message string) {
return matcher.FormatFailureMessage(actual, false)
}
-func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual any) (message string) {
return matcher.FormatFailureMessage(actual, true)
}
-func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) {
+func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual any, negated bool) (message string) {
if len(matcher.CompareTo) == 1 {
message = fmt.Sprintf("to be %s", matcher.Comparator)
} else {
@@ -34,7 +34,7 @@ func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, ne
return format.Message(actual, message, matcher.CompareTo[0])
}
-func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeNumericallyMatcher) Match(actual any) (success bool, err error) {
if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
index cf582a3f..14ffbf6c 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
@@ -10,11 +10,11 @@ import (
)
type BeSentMatcher struct {
- Arg interface{}
+ Arg any
channelClosed bool
}
-func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeSentMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -56,15 +56,15 @@ func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error
return didSend, nil
}
-func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeSentMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to send:", matcher.Arg)
}
-func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeSentMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to send:", matcher.Arg)
}
-func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
index dec4db02..edb647c6 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
@@ -15,17 +15,17 @@ type BeTemporallyMatcher struct {
Threshold []time.Duration
}
-func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeTemporallyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
}
-func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
}
-func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *BeTemporallyMatcher) Match(actual any) (bool, error) {
// predicate to test for time.Time type
- isTime := func(t interface{}) bool {
+ isTime := func(t any) bool {
_, ok := t.(time.Time)
return ok
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
index 3576aac8..a010bec5 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -12,7 +12,7 @@ type BeTrueMatcher struct {
Reason string
}
-func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeTrueMatcher) Match(actual any) (success bool, err error) {
if !isBool(actual) {
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
}
@@ -20,7 +20,7 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error
return actual.(bool), nil
}
-func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeTrueMatcher) FailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "to be true")
} else {
@@ -28,7 +28,7 @@ func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string
}
}
-func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeTrueMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Reason == "" {
return format.Message(actual, "not to be true")
} else {
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
index 26196f16..f5f5d7f7 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
@@ -9,7 +9,7 @@ import (
type BeZeroMatcher struct {
}
-func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *BeZeroMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return true, nil
}
@@ -19,10 +19,10 @@ func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error
}
-func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *BeZeroMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to be zero-valued")
}
-func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *BeZeroMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to be zero-valued")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/consist_of.go b/operator/vendor/github.com/onsi/gomega/matchers/consist_of.go
index a1118818..05c751b6 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/consist_of.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -12,12 +12,12 @@ import (
)
type ConsistOfMatcher struct {
- Elements []interface{}
- missingElements []interface{}
- extraElements []interface{}
+ Elements []any
+ missingElements []any
+ extraElements []any
}
-func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ConsistOfMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
@@ -35,19 +35,19 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er
return true, nil
}
- var missingMatchers []interface{}
+ var missingMatchers []any
matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges)
matcher.missingElements = equalMatchersToElements(missingMatchers)
return false, nil
}
-func neighbours(value, matcher interface{}) (bool, error) {
+func neighbours(value, matcher any) (bool, error) {
match, err := matcher.(omegaMatcher).Match(value)
return match && err == nil, nil
}
-func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
+func equalMatchersToElements(matchers []any) (elements []any) {
for _, matcher := range matchers {
if equalMatcher, ok := matcher.(*EqualMatcher); ok {
elements = append(elements, equalMatcher.Expected)
@@ -60,7 +60,7 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
return
}
-func flatten(elems []interface{}) []interface{} {
+func flatten(elems []any) []any {
if len(elems) != 1 ||
!(isArrayOrSlice(elems[0]) ||
(miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) {
@@ -77,14 +77,14 @@ func flatten(elems []interface{}) []interface{} {
}
value := reflect.ValueOf(elems[0])
- flattened := make([]interface{}, value.Len())
+ flattened := make([]any, value.Len())
for i := 0; i < value.Len(); i++ {
flattened[i] = value.Index(i).Interface()
}
return flattened
}
-func matchers(expectedElems []interface{}) (matchers []interface{}) {
+func matchers(expectedElems []any) (matchers []any) {
for _, e := range flatten(expectedElems) {
if e == nil {
matchers = append(matchers, &BeNilMatcher{})
@@ -97,11 +97,11 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) {
return
}
-func presentable(elems []interface{}) interface{} {
+func presentable(elems []any) any {
elems = flatten(elems)
if len(elems) == 0 {
- return []interface{}{}
+ return []any{}
}
sv := reflect.ValueOf(elems)
@@ -125,9 +125,9 @@ func presentable(elems []interface{}) interface{} {
return ss.Interface()
}
-func valuesOf(actual interface{}) []interface{} {
+func valuesOf(actual any) []any {
value := reflect.ValueOf(actual)
- values := []interface{}{}
+ values := []any{}
if miter.IsIter(actual) {
if miter.IsSeq2(actual) {
miter.IterateKV(actual, func(k, v reflect.Value) bool {
@@ -154,7 +154,7 @@ func valuesOf(actual interface{}) []interface{} {
return values
}
-func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ConsistOfMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to consist of", presentable(matcher.Elements))
message = appendMissingElements(message, matcher.missingElements)
if len(matcher.extraElements) > 0 {
@@ -164,7 +164,7 @@ func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message str
return
}
-func appendMissingElements(message string, missingElements []interface{}) string {
+func appendMissingElements(message string, missingElements []any) string {
if len(missingElements) == 0 {
return message
}
@@ -172,6 +172,6 @@ func appendMissingElements(message string, missingElements []interface{}) string
format.Object(presentable(missingElements), 1))
}
-func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to consist of", presentable(matcher.Elements))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
index 830239c7..8337a526 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -12,11 +12,11 @@ import (
)
type ContainElementMatcher struct {
- Element interface{}
- Result []interface{}
+ Element any
+ Result []any
}
-func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainElementMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1))
}
@@ -132,14 +132,14 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
var lastError error
if !miter.IsIter(actual) {
- var valueAt func(int) interface{}
+ var valueAt func(int) any
var foundAt func(int)
// We're dealing with an array/slice/map, so in all cases we can iterate
// over the elements in actual using indices (that can be considered
// keys in case of maps).
if isMap(actual) {
keys := value.MapKeys()
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.MapIndex(keys[i]).Interface()
}
if result.Kind() != reflect.Invalid {
@@ -150,7 +150,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
}
}
} else {
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.Index(i).Interface()
}
if result.Kind() != reflect.Invalid {
@@ -251,7 +251,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
}
// pick up any findings the test is interested in as it specified a non-nil
- // result reference. However, the expection always is that there are at
+ // result reference. However, the expectation always is that there are at
// least one or multiple findings. So, if a result is expected, but we had
// no findings, then this is an error.
findings := getFindings()
@@ -284,10 +284,10 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
return true, nil
}
-func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
-func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
index d9fcb8b8..ce304189 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
@@ -9,11 +9,11 @@ import (
)
type ContainElementsMatcher struct {
- Elements []interface{}
- missingElements []interface{}
+ Elements []any
+ missingElements []any
}
-func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainElementsMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1))
}
@@ -35,11 +35,11 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to contain elements", presentable(matcher.Elements))
return appendMissingElements(message, matcher.missingElements)
}
-func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
index e725f8c2..d9980ee2 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go
@@ -11,10 +11,10 @@ import (
type ContainSubstringMatcher struct {
Substr string
- Args []interface{}
+ Args []any
}
-func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ContainSubstringMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -31,10 +31,10 @@ func (matcher *ContainSubstringMatcher) stringToMatch() string {
return stringToMatch
}
-func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *ContainSubstringMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain substring", matcher.stringToMatch())
}
-func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain substring", matcher.stringToMatch())
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
index befb7bdf..4ad16615 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/equal_matcher.go
@@ -9,10 +9,10 @@ import (
)
type EqualMatcher struct {
- Expected interface{}
+ Expected any
}
-func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *EqualMatcher) Match(actual any) (success bool, err error) {
if actual == nil && matcher.Expected == nil {
return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -27,7 +27,7 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error)
return reflect.DeepEqual(actual, matcher.Expected), nil
}
-func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) FailureMessage(actual any) (message string) {
actualString, actualOK := actual.(string)
expectedString, expectedOK := matcher.Expected.(string)
if actualOK && expectedOK {
@@ -37,6 +37,6 @@ func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string)
return format.Message(actual, "to equal", matcher.Expected)
}
-func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to equal", matcher.Expected)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
index 9856752f..a4fcfc42 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
@@ -12,7 +12,7 @@ type HaveCapMatcher struct {
Count int
}
-func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveCapMatcher) Match(actual any) (success bool, err error) {
length, ok := capOf(actual)
if !ok {
return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1))
@@ -21,10 +21,10 @@ func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
index 4111f2b8..4c45063b 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -9,10 +9,10 @@ import (
)
type HaveEachMatcher struct {
- Element interface{}
+ Element any
}
-func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveEachMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s",
format.Object(actual, 1))
@@ -61,14 +61,14 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
format.Object(actual, 1))
}
- var valueAt func(int) interface{}
+ var valueAt func(int) any
if isMap(actual) {
keys := value.MapKeys()
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.MapIndex(keys[i]).Interface()
}
} else {
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.Index(i).Interface()
}
}
@@ -89,11 +89,11 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
}
// FailureMessage returns a suitable failure message.
-func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
// NegatedFailureMessage returns a suitable negated failure message.
-func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/operator/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
index 23799f1c..8b2d297c 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -14,13 +14,13 @@ type mismatchFailure struct {
}
type HaveExactElementsMatcher struct {
- Elements []interface{}
+ Elements []any
mismatchFailures []mismatchFailure
missingIndex int
extraIndex int
}
-func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExactElementsMatcher) Match(actual any) (success bool, err error) {
matcher.resetState()
if isMap(actual) || miter.IsSeq2(actual) {
@@ -108,7 +108,7 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool
return success, nil
}
-func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
if matcher.missingIndex > 0 {
message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
@@ -125,7 +125,7 @@ func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (mes
return
}
-func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
index b5701874..a5a028e9 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -11,7 +11,7 @@ type HaveExistingFieldMatcher struct {
Field string
}
-func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExistingFieldMatcher) Match(actual any) (success bool, err error) {
// we don't care about the field's actual value, just about any error in
// trying to find the field (or method).
_, err = extractField(actual, matcher.Field, "HaveExistingField")
@@ -27,10 +27,10 @@ func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool
return false, err
}
-func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
}
-func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_field.go b/operator/vendor/github.com/onsi/gomega/matchers/have_field.go
index 293457e8..d9fbeaf7 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_field.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -17,7 +17,7 @@ func (e missingFieldError) Error() string {
return string(e)
}
-func extractField(actual interface{}, field string, matchername string) (any, error) {
+func extractField(actual any, field string, matchername string) (any, error) {
fields := strings.SplitN(field, ".", 2)
actualValue := reflect.ValueOf(actual)
@@ -68,7 +68,7 @@ func extractField(actual interface{}, field string, matchername string) (any, er
type HaveFieldMatcher struct {
Field string
- Expected interface{}
+ Expected any
}
func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
@@ -80,7 +80,7 @@ func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
return expectedMatcher
}
-func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveFieldMatcher) Match(actual any) (success bool, err error) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
return false, err
@@ -89,7 +89,7 @@ func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err er
return matcher.expectedMatcher().Match(extractedField)
}
-func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) FailureMessage(actual any) (message string) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
// this really shouldn't happen
@@ -101,7 +101,7 @@ func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message str
return message
}
-func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual any) (message string) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
// this really shouldn't happen
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index d14d9e5f..2d561b9a 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -11,12 +11,12 @@ import (
)
type HaveHTTPBodyMatcher struct {
- Expected interface{}
- cachedResponse interface{}
+ Expected any
+ cachedResponse any
cachedBody []byte
}
-func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *HaveHTTPBodyMatcher) Match(actual any) (bool, error) {
body, err := matcher.body(actual)
if err != nil {
return false, err
@@ -34,7 +34,7 @@ func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
}
}
-func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -52,7 +52,7 @@ func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message
}
}
-func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -73,7 +73,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m
// body returns the body. It is cached because once we read it in Match()
// the Reader is closed and it is not readable again in FailureMessage()
// or NegatedFailureMessage()
-func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+func (matcher *HaveHTTPBodyMatcher) body(actual any) ([]byte, error) {
if matcher.cachedResponse == actual && matcher.cachedBody != nil {
return matcher.cachedBody, nil
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
index c256f452..75672265 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -11,10 +11,10 @@ import (
type HaveHTTPHeaderWithValueMatcher struct {
Header string
- Value interface{}
+ Value any
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual any) (success bool, err error) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
return false, err
@@ -28,7 +28,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (succes
return headerMatcher.Match(headerValue)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual any) string {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -43,7 +43,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}
return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -69,7 +69,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatc
}
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual any) (string, error) {
switch r := actual.(type) {
case *http.Response:
return r.Header.Get(matcher.Header), nil
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 0f66e46e..8b25b3a9 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -12,10 +12,10 @@ import (
)
type HaveHTTPStatusMatcher struct {
- Expected []interface{}
+ Expected []any
}
-func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPStatusMatcher) Match(actual any) (success bool, err error) {
var resp *http.Response
switch a := actual.(type) {
case *http.Response:
@@ -48,11 +48,11 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e
return false, nil
}
-func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
}
-func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
}
@@ -64,7 +64,7 @@ func (matcher *HaveHTTPStatusMatcher) expectedString() string {
return strings.Join(lines, "\n")
}
-func formatHttpResponse(input interface{}) string {
+func formatHttpResponse(input any) string {
var resp *http.Response
switch r := input.(type) {
case *http.Response:
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
index b62ee93c..9e16dcf5 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -11,10 +11,10 @@ import (
)
type HaveKeyMatcher struct {
- Key interface{}
+ Key any
}
-func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) {
if !isMap(actual) && !miter.IsSeq2(actual) {
return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
@@ -52,7 +52,7 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) FailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "to have key matching", matcher.Key)
@@ -61,7 +61,7 @@ func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message strin
}
}
-func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "not to have key matching", matcher.Key)
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
index 3d608f63..1c53f1e5 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -11,11 +11,11 @@ import (
)
type HaveKeyWithValueMatcher struct {
- Key interface{}
- Value interface{}
+ Key any
+ Value any
}
-func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err error) {
if !isMap(actual) && !miter.IsSeq2(actual) {
return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
@@ -70,7 +70,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual any) (message string) {
str := "to have {key: value}"
if _, ok := matcher.Key.(omegaMatcher); ok {
str += " matching"
@@ -78,12 +78,12 @@ func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (mess
str += " matching"
}
- expect := make(map[interface{}]interface{}, 1)
+ expect := make(map[any]any, 1)
expect[matcher.Key] = matcher.Value
return format.Message(actual, str, expect)
}
-func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
kStr := "not to have key"
if _, ok := matcher.Key.(omegaMatcher); ok {
kStr = "not to have key matching"
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
index ca25713f..c334d4c0 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -10,7 +10,7 @@ type HaveLenMatcher struct {
Count int
}
-func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveLenMatcher) Match(actual any) (success bool, err error) {
length, ok := lengthOf(actual)
if !ok {
return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
@@ -19,10 +19,10 @@ func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index 22a1b673..a240f1a1 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -11,7 +11,7 @@ import (
type HaveOccurredMatcher struct {
}
-func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveOccurredMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return false, nil
@@ -26,10 +26,10 @@ func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err
return !isNil(actual), nil
}
-func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1))
}
-func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
index 1d8e8027..7987d41f 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -8,10 +8,10 @@ import (
type HavePrefixMatcher struct {
Prefix string
- Args []interface{}
+ Args []any
}
-func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HavePrefixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HavePrefixMatcher) prefix() string {
return matcher.Prefix
}
-func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have prefix", matcher.prefix())
}
-func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have prefix", matcher.prefix())
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
index 40a3526e..2aa4ceac 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -8,10 +8,10 @@ import (
type HaveSuffixMatcher struct {
Suffix string
- Args []interface{}
+ Args []any
}
-func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveSuffixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HaveSuffixMatcher) suffix() string {
return matcher.Suffix
}
-func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have suffix", matcher.suffix())
}
-func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have suffix", matcher.suffix())
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/have_value.go b/operator/vendor/github.com/onsi/gomega/matchers/have_value.go
index f6725283..4c39e0db 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/have_value.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -12,10 +12,10 @@ const maxIndirections = 31
type HaveValueMatcher struct {
Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
- resolvedActual interface{} // the ("resolved") value.
+ resolvedActual any // the ("resolved") value.
}
-func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+func (m *HaveValueMatcher) Match(actual any) (bool, error) {
val := reflect.ValueOf(actual)
for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
// return an error if value isn't valid. Please note that we cannot
@@ -45,10 +45,10 @@ func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
return false, errors.New(format.Message(actual, "too many indirections"))
}
-func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.resolvedActual)
}
-func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.resolvedActual)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index c539dd38..f9d31377 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -71,14 +71,14 @@ func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) {
format.Object(expected, 1))
}
-func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) FailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0]))
}
return format.Message(actual, "to match error", matcher.Expected)
}
-func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0]))
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
index f962f139..331f289a 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
@@ -9,18 +9,18 @@ import (
)
type MatchJSONMatcher struct {
- JSONToMatch interface{}
- firstFailurePath []interface{}
+ JSONToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchJSONMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.prettyPrint(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
// this is guarded by prettyPrint
json.Unmarshal([]byte(actualString), &aval)
@@ -30,17 +30,17 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchJSONMatcher) prettyPrint(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
index adac5db6..779be683 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -9,10 +9,10 @@ import (
type MatchRegexpMatcher struct {
Regexp string
- Args []interface{}
+ Args []any
}
-func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchRegexpMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
@@ -26,11 +26,11 @@ func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err
return match, nil
}
-func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to match regular expression", matcher.regexp())
}
-func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to match regular expression", matcher.regexp())
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
index 5c815f5a..f7dcaf6f 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -15,10 +15,10 @@ import (
)
type MatchXMLMatcher struct {
- XMLToMatch interface{}
+ XMLToMatch any
}
-func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchXMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.formattedPrint(actual)
if err != nil {
return false, err
@@ -37,17 +37,17 @@ func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err err
return reflect.DeepEqual(aval, eval), nil
}
-func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) {
+func (matcher *MatchXMLMatcher) formattedPrint(actual any) (actualString, expectedString string, err error) {
var ok bool
actualString, ok = toString(actual)
if !ok {
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
index 2cb6b47d..c3da9bd4 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -5,22 +5,22 @@ import (
"strings"
"github.com/onsi/gomega/format"
- "gopkg.in/yaml.v3"
+ "go.yaml.in/yaml/v3"
)
type MatchYAMLMatcher struct {
- YAMLToMatch interface{}
- firstFailurePath []interface{}
+ YAMLToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchYAMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil {
return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err)
@@ -34,23 +34,23 @@ func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
return normalise(actualString), normalise(expectedString), err
}
func normalise(input string) string {
- var val interface{}
+ var val any
err := yaml.Unmarshal([]byte(input), &val)
if err != nil {
panic(err) // unreachable since Match already calls Unmarshal
@@ -62,7 +62,7 @@ func normalise(input string) string {
return strings.TrimSpace(string(output))
}
-func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/not.go b/operator/vendor/github.com/onsi/gomega/matchers/not.go
index 78b71910..c598b789 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/not.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/not.go
@@ -8,7 +8,7 @@ type NotMatcher struct {
Matcher types.GomegaMatcher
}
-func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+func (m *NotMatcher) Match(actual any) (bool, error) {
success, err := m.Matcher.Match(actual)
if err != nil {
return false, err
@@ -16,14 +16,14 @@ func (m *NotMatcher) Match(actual interface{}) (bool, error) {
return !success, nil
}
-func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) FailureMessage(actual any) (message string) {
return m.Matcher.NegatedFailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) NegatedFailureMessage(actual any) (message string) {
return m.Matcher.FailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual any) bool {
return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/or.go b/operator/vendor/github.com/onsi/gomega/matchers/or.go
index 841ae26a..6578404b 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/or.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/or.go
@@ -14,7 +14,7 @@ type OrMatcher struct {
firstSuccessfulMatcher types.GomegaMatcher
}
-func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *OrMatcher) Match(actual any) (success bool, err error) {
m.firstSuccessfulMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -29,16 +29,16 @@ func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
return false, nil
}
-func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) FailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
}
-func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) NegatedFailureMessage(actual any) (message string) {
return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
}
-func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
index adc8cee6..8be5a7cc 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -8,11 +8,11 @@ import (
)
type PanicMatcher struct {
- Expected interface{}
- object interface{}
+ Expected any
+ object any
}
-func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *PanicMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
}
@@ -52,7 +52,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error)
return
}
-func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) FailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We wanted any panic to occur, but none did.
return format.Message(actual, "to panic")
@@ -91,7 +91,7 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string)
}
}
-func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We didn't want any panic to occur, but one did.
return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
index 948164ea..1d9f61d6 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -11,12 +11,12 @@ import (
)
type ReceiveMatcher struct {
- Args []interface{}
+ Args []any
receivedValue reflect.Value
channelClosed bool
}
-func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ReceiveMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -30,7 +30,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
var subMatcher omegaMatcher
var hasSubMatcher bool
- var resultReference interface{}
+ var resultReference any
// Valid arg formats are as follows, always with optional POINTER before
// optional MATCHER:
@@ -115,8 +115,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
- var matcherArg interface{}
+func (matcher *ReceiveMatcher) FailureMessage(actual any) (message string) {
+ var matcherArg any
if len(matcher.Args) > 0 {
matcherArg = matcher.Args[len(matcher.Args)-1]
}
@@ -136,8 +136,8 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
return format.Message(actual, "to receive something."+closedAddendum)
}
-func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- var matcherArg interface{}
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual any) (message string) {
+ var matcherArg any
if len(matcher.Args) > 0 {
matcherArg = matcher.Args[len(matcher.Args)-1]
}
@@ -157,7 +157,7 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag
return format.Message(actual, "not to receive anything."+closedAddendum)
}
-func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
index ec68fe8b..2adc4825 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
@@ -8,13 +8,13 @@ import (
)
type SatisfyMatcher struct {
- Predicate interface{}
+ Predicate any
// cached type
predicateArgType reflect.Type
}
-func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
+func NewSatisfyMatcher(predicate any) *SatisfyMatcher {
if predicate == nil {
panic("predicate cannot be nil")
}
@@ -35,7 +35,7 @@ func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
}
}
-func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *SatisfyMatcher) Match(actual any) (success bool, err error) {
// prepare a parameter to pass to the predicate
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) {
@@ -57,10 +57,10 @@ func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
return result[0].Bool(), nil
}
-func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to satisfy predicate", m.Predicate)
}
-func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to not satisfy predicate", m.Predicate)
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/operator/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
index 1369c1e8..30dd58f4 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
@@ -8,7 +8,7 @@ import (
"strings"
)
-func formattedMessage(comparisonMessage string, failurePath []interface{}) string {
+func formattedMessage(comparisonMessage string, failurePath []any) string {
var diffMessage string
if len(failurePath) == 0 {
diffMessage = ""
@@ -18,7 +18,7 @@ func formattedMessage(comparisonMessage string, failurePath []interface{}) strin
return fmt.Sprintf("%s%s", comparisonMessage, diffMessage)
}
-func formattedFailurePath(failurePath []interface{}) string {
+func formattedFailurePath(failurePath []any) string {
formattedPaths := []string{}
for i := len(failurePath) - 1; i >= 0; i-- {
switch p := failurePath[i].(type) {
@@ -34,33 +34,33 @@ func formattedFailurePath(failurePath []interface{}) string {
return strings.Join(formattedPaths, "")
}
-func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
- var errorPath []interface{}
+func deepEqual(a any, b any) (bool, []any) {
+ var errorPath []any
if reflect.TypeOf(a) != reflect.TypeOf(b) {
return false, errorPath
}
switch a.(type) {
- case []interface{}:
- if len(a.([]interface{})) != len(b.([]interface{})) {
+ case []any:
+ if len(a.([]any)) != len(b.([]any)) {
return false, errorPath
}
- for i, v := range a.([]interface{}) {
- elementEqual, keyPath := deepEqual(v, b.([]interface{})[i])
+ for i, v := range a.([]any) {
+ elementEqual, keyPath := deepEqual(v, b.([]any)[i])
if !elementEqual {
return false, append(keyPath, i)
}
}
return true, errorPath
- case map[interface{}]interface{}:
- if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) {
+ case map[any]any:
+ if len(a.(map[any]any)) != len(b.(map[any]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[interface{}]interface{}) {
- v2, ok := b.(map[interface{}]interface{})[k]
+ for k, v1 := range a.(map[any]any) {
+ v2, ok := b.(map[any]any)[k]
if !ok {
return false, errorPath
}
@@ -71,13 +71,13 @@ func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
}
return true, errorPath
- case map[string]interface{}:
- if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) {
+ case map[string]any:
+ if len(a.(map[string]any)) != len(b.(map[string]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[string]interface{}) {
- v2, ok := b.(map[string]interface{})[k]
+ for k, v1 := range a.(map[string]any) {
+ v2, ok := b.(map[string]any)[k]
if !ok {
return false, errorPath
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/operator/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
index 327350f7..f0b2c4aa 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -14,7 +14,7 @@ type formattedGomegaError interface {
type SucceedMatcher struct {
}
-func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *SucceedMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return true, nil
@@ -29,7 +29,7 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro
return isNil(actual), nil
}
-func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) FailureMessage(actual any) (message string) {
var fgErr formattedGomegaError
if errors.As(actual.(error), &fgErr) {
return fgErr.FormattedGomegaError()
@@ -37,6 +37,6 @@ func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message strin
return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
}
-func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual any) (message string) {
return "Expected failure, but got no error."
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
index 830e3082..0d78779d 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -11,7 +11,7 @@ type BipartiteGraph struct {
Edges EdgeSet
}
-func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+func NewBipartiteGraph(leftValues, rightValues []any, neighbours func(any, any) (bool, error)) (*BipartiteGraph, error) {
left := NodeOrderedSet{}
for i, v := range leftValues {
left = append(left, Node{ID: i, Value: v})
@@ -41,7 +41,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
// FreeLeftRight returns left node values and right node values
// of the BipartiteGraph's nodes which are not part of the given edges.
-func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) {
+func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []any) {
for _, node := range bg.Left {
if edges.Free(node) {
leftValues = append(leftValues, node.Value)
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
index cd597a2f..66d3578d 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -2,7 +2,7 @@ package node
type Node struct {
ID int
- Value interface{}
+ Value any
}
type NodeOrderedSet []Node
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/type_support.go b/operator/vendor/github.com/onsi/gomega/matchers/type_support.go
index b9440ac7..d020dedc 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/type_support.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/type_support.go
@@ -20,16 +20,16 @@ import (
)
type omegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
-func isBool(a interface{}) bool {
+func isBool(a any) bool {
return reflect.TypeOf(a).Kind() == reflect.Bool
}
-func isNumber(a interface{}) bool {
+func isNumber(a any) bool {
if a == nil {
return false
}
@@ -37,22 +37,22 @@ func isNumber(a interface{}) bool {
return reflect.Int <= kind && kind <= reflect.Float64
}
-func isInteger(a interface{}) bool {
+func isInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Int <= kind && kind <= reflect.Int64
}
-func isUnsignedInteger(a interface{}) bool {
+func isUnsignedInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Uint <= kind && kind <= reflect.Uint64
}
-func isFloat(a interface{}) bool {
+func isFloat(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Float32 <= kind && kind <= reflect.Float64
}
-func toInteger(a interface{}) int64 {
+func toInteger(a any) int64 {
if isInteger(a) {
return reflect.ValueOf(a).Int()
} else if isUnsignedInteger(a) {
@@ -63,7 +63,7 @@ func toInteger(a interface{}) int64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toUnsignedInteger(a interface{}) uint64 {
+func toUnsignedInteger(a any) uint64 {
if isInteger(a) {
return uint64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -74,7 +74,7 @@ func toUnsignedInteger(a interface{}) uint64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toFloat(a interface{}) float64 {
+func toFloat(a any) float64 {
if isInteger(a) {
return float64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -85,26 +85,26 @@ func toFloat(a interface{}) float64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func isError(a interface{}) bool {
+func isError(a any) bool {
_, ok := a.(error)
return ok
}
-func isChan(a interface{}) bool {
+func isChan(a any) bool {
if isNil(a) {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Chan
}
-func isMap(a interface{}) bool {
+func isMap(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Map
}
-func isArrayOrSlice(a interface{}) bool {
+func isArrayOrSlice(a any) bool {
if a == nil {
return false
}
@@ -116,14 +116,14 @@ func isArrayOrSlice(a interface{}) bool {
}
}
-func isString(a interface{}) bool {
+func isString(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.String
}
-func toString(a interface{}) (string, bool) {
+func toString(a any) (string, bool) {
aString, isString := a.(string)
if isString {
return aString, true
@@ -147,7 +147,7 @@ func toString(a interface{}) (string, bool) {
return "", false
}
-func lengthOf(a interface{}) (int, bool) {
+func lengthOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -169,7 +169,7 @@ func lengthOf(a interface{}) (int, bool) {
return 0, false
}
}
-func capOf(a interface{}) (int, bool) {
+func capOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -181,7 +181,7 @@ func capOf(a interface{}) (int, bool) {
}
}
-func isNil(a interface{}) bool {
+func isNil(a any) bool {
if a == nil {
return true
}
diff --git a/operator/vendor/github.com/onsi/gomega/matchers/with_transform.go b/operator/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 6f743b1b..6231c3b4 100644
--- a/operator/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/operator/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -9,20 +9,20 @@ import (
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value and an optional error
+ Transform any // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
transformArgType reflect.Type
// state
- transformedValue interface{}
+ transformedValue any
}
// reflect.Type for error
var errorT = reflect.TypeOf((*error)(nil)).Elem()
-func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+func NewWithTransformMatcher(transform any, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
}
@@ -43,7 +43,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
}
}
-func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+func (m *WithTransformMatcher) Match(actual any) (bool, error) {
// prepare a parameter to pass to the Transform function
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
@@ -72,15 +72,15 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
return m.Matcher.Match(m.transformedValue)
}
-func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ any) bool {
// TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
//
// Querying the next matcher is fine if the transformer always will return the same value.
diff --git a/operator/vendor/github.com/onsi/gomega/types/types.go b/operator/vendor/github.com/onsi/gomega/types/types.go
index 30f2beed..685a46f3 100644
--- a/operator/vendor/github.com/onsi/gomega/types/types.go
+++ b/operator/vendor/github.com/onsi/gomega/types/types.go
@@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int)
// A simple *testing.T interface wrapper
type GomegaTestingT interface {
Helper()
- Fatalf(format string, args ...interface{})
+ Fatalf(format string, args ...any)
}
-// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers
+// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers
type Gomega interface {
- Ω(actual interface{}, extra ...interface{}) Assertion
- Expect(actual interface{}, extra ...interface{}) Assertion
- ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
+ Ω(actual any, extra ...any) Assertion
+ Expect(actual any, extra ...any) Assertion
+ ExpectWithOffset(offset int, actual any, extra ...any) Assertion
- Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Eventually(actualOrCtx any, args ...any) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
- Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Consistently(actualOrCtx any, args ...any) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
SetDefaultEventuallyTimeout(time.Duration)
SetDefaultEventuallyPollingInterval(time.Duration)
@@ -37,9 +37,9 @@ type Gomega interface {
//
// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
type GomegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
/*
@@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
*/
type OracleMatcher interface {
- MatchMayChangeInTheFuture(actual interface{}) bool
+ MatchMayChangeInTheFuture(actual any) bool
}
-func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
+func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool {
oracleMatcher, ok := matcher.(OracleMatcher)
if !ok {
return true
@@ -67,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure
// they are eventually satisfied
type AsyncAssertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
+
+ // equivalent to above
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) AsyncAssertion
WithTimeout(interval time.Duration) AsyncAssertion
@@ -76,18 +81,18 @@ type AsyncAssertion interface {
Within(timeout time.Duration) AsyncAssertion
ProbeEvery(interval time.Duration) AsyncAssertion
WithContext(ctx context.Context) AsyncAssertion
- WithArguments(argsToForward ...interface{}) AsyncAssertion
+ WithArguments(argsToForward ...any) AsyncAssertion
MustPassRepeatedly(count int) AsyncAssertion
}
// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
type Assertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
- To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) Assertion
diff --git a/operator/vendor/github.com/pkg/errors/.gitignore b/operator/vendor/github.com/pkg/errors/.gitignore
deleted file mode 100644
index daf913b1..00000000
--- a/operator/vendor/github.com/pkg/errors/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/operator/vendor/github.com/pkg/errors/.travis.yml b/operator/vendor/github.com/pkg/errors/.travis.yml
deleted file mode 100644
index 9159de03..00000000
--- a/operator/vendor/github.com/pkg/errors/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go_import_path: github.com/pkg/errors
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - tip
-
-script:
- - make check
diff --git a/operator/vendor/github.com/pkg/errors/LICENSE b/operator/vendor/github.com/pkg/errors/LICENSE
deleted file mode 100644
index 835ba3e7..00000000
--- a/operator/vendor/github.com/pkg/errors/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-Copyright (c) 2015, Dave Cheney
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/operator/vendor/github.com/pkg/errors/Makefile b/operator/vendor/github.com/pkg/errors/Makefile
deleted file mode 100644
index ce9d7cde..00000000
--- a/operator/vendor/github.com/pkg/errors/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-PKGS := github.com/pkg/errors
-SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
-GO := go
-
-check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
-
-test:
- $(GO) test $(PKGS)
-
-vet: | test
- $(GO) vet $(PKGS)
-
-staticcheck:
- $(GO) get honnef.co/go/tools/cmd/staticcheck
- staticcheck -checks all $(PKGS)
-
-misspell:
- $(GO) get github.com/client9/misspell/cmd/misspell
- misspell \
- -locale GB \
- -error \
- *.md *.go
-
-unconvert:
- $(GO) get github.com/mdempsky/unconvert
- unconvert -v $(PKGS)
-
-ineffassign:
- $(GO) get github.com/gordonklaus/ineffassign
- find $(SRCDIRS) -name '*.go' | xargs ineffassign
-
-pedantic: check errcheck
-
-unparam:
- $(GO) get mvdan.cc/unparam
- unparam ./...
-
-errcheck:
- $(GO) get github.com/kisielk/errcheck
- errcheck $(PKGS)
-
-gofmt:
- @echo Checking code is gofmted
- @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/operator/vendor/github.com/pkg/errors/README.md b/operator/vendor/github.com/pkg/errors/README.md
deleted file mode 100644
index 54dfdcb1..00000000
--- a/operator/vendor/github.com/pkg/errors/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# errors [](https://travis-ci.org/pkg/errors) [](https://ci.appveyor.com/project/davecheney/errors/branch/master) [](http://godoc.org/github.com/pkg/errors) [](https://goreportcard.com/report/github.com/pkg/errors) [](https://sourcegraph.com/github.com/pkg/errors?badge)
-
-Package errors provides simple error handling primitives.
-
-`go get github.com/pkg/errors`
-
-The traditional error handling idiom in Go is roughly akin to
-```go
-if err != nil {
- return err
-}
-```
-which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
-
-## Adding context to an error
-
-The errors.Wrap function returns a new error that adds context to the original error. For example
-```go
-_, err := ioutil.ReadAll(r)
-if err != nil {
- return errors.Wrap(err, "read failed")
-}
-```
-## Retrieving the cause of an error
-
-Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
-```go
-type causer interface {
- Cause() error
-}
-```
-`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
-```go
-switch err := errors.Cause(err).(type) {
-case *MyError:
- // handle specifically
-default:
- // unknown error
-}
-```
-
-[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
-
-## Roadmap
-
-With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
-
-- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
-- 1.0. Final release.
-
-## Contributing
-
-Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
-
-Before sending a PR, please discuss your change by raising an issue.
-
-## License
-
-BSD-2-Clause
diff --git a/operator/vendor/github.com/pkg/errors/appveyor.yml b/operator/vendor/github.com/pkg/errors/appveyor.yml
deleted file mode 100644
index a932eade..00000000
--- a/operator/vendor/github.com/pkg/errors/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: build-{build}.{branch}
-
-clone_folder: C:\gopath\src\github.com\pkg\errors
-shallow_clone: true # for startup speed
-
-environment:
- GOPATH: C:\gopath
-
-platform:
- - x64
-
-# http://www.appveyor.com/docs/installed-software
-install:
- # some helpful output for debugging builds
- - go version
- - go env
- # pre-installed MinGW at C:\MinGW is 32bit only
- # but MSYS2 at C:\msys64 has mingw64
- - set PATH=C:\msys64\mingw64\bin;%PATH%
- - gcc --version
- - g++ --version
-
-build_script:
- - go install -v ./...
-
-test_script:
- - set PATH=C:\gopath\bin;%PATH%
- - go test -v ./...
-
-#artifacts:
-# - path: '%GOPATH%\bin\*.exe'
-deploy: off
diff --git a/operator/vendor/github.com/pkg/errors/errors.go b/operator/vendor/github.com/pkg/errors/errors.go
deleted file mode 100644
index 161aea25..00000000
--- a/operator/vendor/github.com/pkg/errors/errors.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Package errors provides simple error handling primitives.
-//
-// The traditional error handling idiom in Go is roughly akin to
-//
-// if err != nil {
-// return err
-// }
-//
-// which when applied recursively up the call stack results in error reports
-// without context or debugging information. The errors package allows
-// programmers to add context to the failure path in their code in a way
-// that does not destroy the original value of the error.
-//
-// Adding context to an error
-//
-// The errors.Wrap function returns a new error that adds context to the
-// original error by recording a stack trace at the point Wrap is called,
-// together with the supplied message. For example
-//
-// _, err := ioutil.ReadAll(r)
-// if err != nil {
-// return errors.Wrap(err, "read failed")
-// }
-//
-// If additional control is required, the errors.WithStack and
-// errors.WithMessage functions destructure errors.Wrap into its component
-// operations: annotating an error with a stack trace and with a message,
-// respectively.
-//
-// Retrieving the cause of an error
-//
-// Using errors.Wrap constructs a stack of errors, adding context to the
-// preceding error. Depending on the nature of the error it may be necessary
-// to reverse the operation of errors.Wrap to retrieve the original error
-// for inspection. Any error value which implements this interface
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// can be inspected by errors.Cause. errors.Cause will recursively retrieve
-// the topmost error that does not implement causer, which is assumed to be
-// the original cause. For example:
-//
-// switch err := errors.Cause(err).(type) {
-// case *MyError:
-// // handle specifically
-// default:
-// // unknown error
-// }
-//
-// Although the causer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// Formatted printing of errors
-//
-// All error values returned from this package implement fmt.Formatter and can
-// be formatted by the fmt package. The following verbs are supported:
-//
-// %s print the error. If the error has a Cause it will be
-// printed recursively.
-// %v see %s
-// %+v extended format. Each Frame of the error's StackTrace will
-// be printed in detail.
-//
-// Retrieving the stack trace of an error or wrapper
-//
-// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
-// invoked. This information can be retrieved with the following interface:
-//
-// type stackTracer interface {
-// StackTrace() errors.StackTrace
-// }
-//
-// The returned errors.StackTrace type is defined as
-//
-// type StackTrace []Frame
-//
-// The Frame type represents a call site in the stack trace. Frame supports
-// the fmt.Formatter interface that can be used for printing information about
-// the stack trace of this error. For example:
-//
-// if err, ok := err.(stackTracer); ok {
-// for _, f := range err.StackTrace() {
-// fmt.Printf("%+s:%d\n", f, f)
-// }
-// }
-//
-// Although the stackTracer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// See the documentation for Frame.Format for more details.
-package errors
-
-import (
- "fmt"
- "io"
-)
-
-// New returns an error with the supplied message.
-// New also records the stack trace at the point it was called.
-func New(message string) error {
- return &fundamental{
- msg: message,
- stack: callers(),
- }
-}
-
-// Errorf formats according to a format specifier and returns the string
-// as a value that satisfies error.
-// Errorf also records the stack trace at the point it was called.
-func Errorf(format string, args ...interface{}) error {
- return &fundamental{
- msg: fmt.Sprintf(format, args...),
- stack: callers(),
- }
-}
-
-// fundamental is an error that has a message and a stack, but no caller.
-type fundamental struct {
- msg string
- *stack
-}
-
-func (f *fundamental) Error() string { return f.msg }
-
-func (f *fundamental) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- io.WriteString(s, f.msg)
- f.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, f.msg)
- case 'q':
- fmt.Fprintf(s, "%q", f.msg)
- }
-}
-
-// WithStack annotates err with a stack trace at the point WithStack was called.
-// If err is nil, WithStack returns nil.
-func WithStack(err error) error {
- if err == nil {
- return nil
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-type withStack struct {
- error
- *stack
-}
-
-func (w *withStack) Cause() error { return w.error }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withStack) Unwrap() error { return w.error }
-
-func (w *withStack) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v", w.Cause())
- w.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, w.Error())
- case 'q':
- fmt.Fprintf(s, "%q", w.Error())
- }
-}
-
-// Wrap returns an error annotating err with a stack trace
-// at the point Wrap is called, and the supplied message.
-// If err is nil, Wrap returns nil.
-func Wrap(err error, message string) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: message,
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// Wrapf returns an error annotating err with a stack trace
-// at the point Wrapf is called, and the format specifier.
-// If err is nil, Wrapf returns nil.
-func Wrapf(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// WithMessage annotates err with a new message.
-// If err is nil, WithMessage returns nil.
-func WithMessage(err error, message string) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: message,
- }
-}
-
-// WithMessagef annotates err with the format specifier.
-// If err is nil, WithMessagef returns nil.
-func WithMessagef(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
-}
-
-type withMessage struct {
- cause error
- msg string
-}
-
-func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
-func (w *withMessage) Cause() error { return w.cause }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withMessage) Unwrap() error { return w.cause }
-
-func (w *withMessage) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v\n", w.Cause())
- io.WriteString(s, w.msg)
- return
- }
- fallthrough
- case 's', 'q':
- io.WriteString(s, w.Error())
- }
-}
-
-// Cause returns the underlying cause of the error, if possible.
-// An error value has a cause if it implements the following
-// interface:
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// If the error does not implement Cause, the original error will
-// be returned. If the error is nil, nil will be returned without further
-// investigation.
-func Cause(err error) error {
- type causer interface {
- Cause() error
- }
-
- for err != nil {
- cause, ok := err.(causer)
- if !ok {
- break
- }
- err = cause.Cause()
- }
- return err
-}
diff --git a/operator/vendor/github.com/pkg/errors/go113.go b/operator/vendor/github.com/pkg/errors/go113.go
deleted file mode 100644
index be0d10d0..00000000
--- a/operator/vendor/github.com/pkg/errors/go113.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build go1.13
-
-package errors
-
-import (
- stderrors "errors"
-)
-
-// Is reports whether any error in err's chain matches target.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error is considered to match a target if it is equal to that target or if
-// it implements a method Is(error) bool such that Is(target) returns true.
-func Is(err, target error) bool { return stderrors.Is(err, target) }
-
-// As finds the first error in err's chain that matches target, and if so, sets
-// target to that error value and returns true.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error matches target if the error's concrete value is assignable to the value
-// pointed to by target, or if the error has a method As(interface{}) bool such that
-// As(target) returns true. In the latter case, the As method is responsible for
-// setting target.
-//
-// As will panic if target is not a non-nil pointer to either a type that implements
-// error, or to any interface type. As returns false if err is nil.
-func As(err error, target interface{}) bool { return stderrors.As(err, target) }
-
-// Unwrap returns the result of calling the Unwrap method on err, if err's
-// type contains an Unwrap method returning error.
-// Otherwise, Unwrap returns nil.
-func Unwrap(err error) error {
- return stderrors.Unwrap(err)
-}
diff --git a/operator/vendor/github.com/pkg/errors/stack.go b/operator/vendor/github.com/pkg/errors/stack.go
deleted file mode 100644
index 779a8348..00000000
--- a/operator/vendor/github.com/pkg/errors/stack.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package errors
-
-import (
- "fmt"
- "io"
- "path"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Frame represents a program counter inside a stack frame.
-// For historical reasons if Frame is interpreted as a uintptr
-// its value represents the program counter + 1.
-type Frame uintptr
-
-// pc returns the program counter for this frame;
-// multiple frames may have the same PC value.
-func (f Frame) pc() uintptr { return uintptr(f) - 1 }
-
-// file returns the full path to the file that contains the
-// function for this Frame's pc.
-func (f Frame) file() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- file, _ := fn.FileLine(f.pc())
- return file
-}
-
-// line returns the line number of source code of the
-// function for this Frame's pc.
-func (f Frame) line() int {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return 0
- }
- _, line := fn.FileLine(f.pc())
- return line
-}
-
-// name returns the name of this function, if known.
-func (f Frame) name() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- return fn.Name()
-}
-
-// Format formats the frame according to the fmt.Formatter interface.
-//
-// %s source file
-// %d source line
-// %n function name
-// %v equivalent to %s:%d
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+s function name and path of source file relative to the compile time
-// GOPATH separated by \n\t (\n\t)
-// %+v equivalent to %+s:%d
-func (f Frame) Format(s fmt.State, verb rune) {
- switch verb {
- case 's':
- switch {
- case s.Flag('+'):
- io.WriteString(s, f.name())
- io.WriteString(s, "\n\t")
- io.WriteString(s, f.file())
- default:
- io.WriteString(s, path.Base(f.file()))
- }
- case 'd':
- io.WriteString(s, strconv.Itoa(f.line()))
- case 'n':
- io.WriteString(s, funcname(f.name()))
- case 'v':
- f.Format(s, 's')
- io.WriteString(s, ":")
- f.Format(s, 'd')
- }
-}
-
-// MarshalText formats a stacktrace Frame as a text string. The output is the
-// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
-func (f Frame) MarshalText() ([]byte, error) {
- name := f.name()
- if name == "unknown" {
- return []byte(name), nil
- }
- return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
-}
-
-// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
-type StackTrace []Frame
-
-// Format formats the stack of Frames according to the fmt.Formatter interface.
-//
-// %s lists source files for each Frame in the stack
-// %v lists the source file and line number for each Frame in the stack
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+v Prints filename, function, and line number for each Frame in the stack.
-func (st StackTrace) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case s.Flag('+'):
- for _, f := range st {
- io.WriteString(s, "\n")
- f.Format(s, verb)
- }
- case s.Flag('#'):
- fmt.Fprintf(s, "%#v", []Frame(st))
- default:
- st.formatSlice(s, verb)
- }
- case 's':
- st.formatSlice(s, verb)
- }
-}
-
-// formatSlice will format this StackTrace into the given buffer as a slice of
-// Frame, only valid when called with '%s' or '%v'.
-func (st StackTrace) formatSlice(s fmt.State, verb rune) {
- io.WriteString(s, "[")
- for i, f := range st {
- if i > 0 {
- io.WriteString(s, " ")
- }
- f.Format(s, verb)
- }
- io.WriteString(s, "]")
-}
-
-// stack represents a stack of program counters.
-type stack []uintptr
-
-func (s *stack) Format(st fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case st.Flag('+'):
- for _, pc := range *s {
- f := Frame(pc)
- fmt.Fprintf(st, "\n%+v", f)
- }
- }
- }
-}
-
-func (s *stack) StackTrace() StackTrace {
- f := make([]Frame, len(*s))
- for i := 0; i < len(f); i++ {
- f[i] = Frame((*s)[i])
- }
- return f
-}
-
-func callers() *stack {
- const depth = 32
- var pcs [depth]uintptr
- n := runtime.Callers(3, pcs[:])
- var st stack = pcs[0:n]
- return &st
-}
-
-// funcname removes the path prefix component of a function's name reported by func.Name().
-func funcname(name string) string {
- i := strings.LastIndex(name, "/")
- name = name[i+1:]
- i = strings.Index(name, ".")
- return name[i+1:]
-}
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index ad347113..2331b8b4 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
help: help,
variableLabels: variableLabels.compile(),
}
- if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+ if !model.NameValidationScheme.IsValidMetricName(fqName) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
index 8b016355..7bac0da3 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -453,7 +453,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
- if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
groups = append(groups, group)
}
return groups
@@ -568,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
- _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ _, err := fmt.Fprintf(buf, format, args...)
return err
}
ws := func(s string) error {
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
index f7f97ef9..d273b664 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -67,7 +67,7 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
}
// Our current conversion moves to legacy naming, so use legacy validation.
- valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name)
+ valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index c21911f2..5fe8d3b4 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error {
}
func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+ //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+ return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
}
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index 592eec3e..76e59f12 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -186,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
case pb.Counter != nil:
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
case pb.Histogram != nil:
+ h := pb.Histogram
for _, e := range m.exemplars {
- // pb.Histogram.Bucket are sorted by UpperBound.
- i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
- return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
+ if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+ len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+ e.GetTimestamp() != nil {
+ h.Exemplars = append(h.Exemplars, e)
+ if len(h.Bucket) == 0 {
+ // Don't proceed to classic buckets if there are none.
+ continue
+ }
+ }
+ // h.Bucket are sorted by UpperBound.
+ i := sort.Search(len(h.Bucket), func(i int) bool {
+ return h.Bucket[i].GetUpperBound() >= e.GetValue()
})
- if i < len(pb.Histogram.Bucket) {
- pb.Histogram.Bucket[i].Exemplar = e
+ if i < len(h.Bucket) {
+ h.Bucket[i].Exemplar = e
} else {
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
b := &dto.Bucket{
- CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
+ CumulativeCount: proto.Uint64(h.GetSampleCount()),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e,
}
- pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
+ h.Bucket = append(h.Bucket, b)
}
}
default:
@@ -227,6 +237,7 @@ type Exemplar struct {
// Only last applicable exemplar is injected from the list.
// For example for Counter it means last exemplar is injected.
// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
//
// NewMetricWithExemplars works best with MustNewConstMetric and
// MustNewConstHistogram, see example.
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
index 0a61b984..b32c95fa 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -25,9 +25,9 @@ import (
"golang.org/x/sys/unix"
)
-// notImplementedErr is returned by stub functions that replace cgo functions, when cgo
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
// isn't available.
-var notImplementedErr = errors.New("not implemented")
+var errNotImplemented = errors.New("not implemented")
type memoryInfo struct {
vsize uint64 // Virtual memory size in bytes
@@ -101,7 +101,7 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if memInfo, err := getMemory(); err == nil {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
- } else if !errors.Is(err, notImplementedErr) {
+ } else if !errors.Is(err, errNotImplemented) {
// Don't report an error when support is not compiled in.
c.reportError(ch, c.rss, err)
c.reportError(ch, c.vsize, err)
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
index 8ddb0995..37886512 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -16,7 +16,7 @@
package prometheus
func getMemory() (*memoryInfo, error) {
- return nil, notImplementedErr
+ return nil, errNotImplemented
}
// describe returns all descriptions of the collector for Darwin.
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
index 9f4b130b..8074f70f 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -66,11 +66,11 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
if netstat, err := p.Netstat(); err == nil {
var inOctets, outOctets float64
- if netstat.IpExt.InOctets != nil {
- inOctets = *netstat.IpExt.InOctets
+ if netstat.InOctets != nil {
+ inOctets = *netstat.InOctets
}
- if netstat.IpExt.OutOctets != nil {
- outOctets = *netstat.IpExt.OutOctets
+ if netstat.OutOctets != nil {
+ outOctets = *netstat.OutOctets
}
ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index 356edb78..9332b024 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
labels := prometheus.Labels{}
- if !(code || method) {
+ if !code && !method {
return labels
}
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 2c808eec..487b4665 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
return false
}
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+ return m.deleteByHashWithLabelValues(h, lvs, m.curry)
}
// Delete deletes the metric where the variable labels are the same as those
@@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
return false
}
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+ return m.deleteByHashWithLabels(h, labels, m.curry)
}
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
@@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
- return m.metricMap.deleteByLabels(labels, m.curry)
+ return m.deleteByLabels(labels, m.curry)
}
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
@@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+ return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+ return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
diff --git a/operator/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/operator/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 25da157f..2ed12850 100644
--- a/operator/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/operator/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
+// fact, those metrics are already prefixed with "go_" or "process_",
// respectively.)
//
// Conflicts between Collectors registered through the original Registerer with
@@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
}
}
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ labels: labels,
+ }
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ prefix: prefix,
+ }
+}
+
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/decode.go b/operator/vendor/github.com/prometheus/common/expfmt/decode.go
index 1448439b..7b762370 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -70,19 +70,34 @@ func ResponseFormat(h http.Header) Format {
return FmtUnknown
}
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
+// NewDecoder returns a new decoder based on the given input format. Metric
+// names are validated based on the provided Format -- if the format requires
+// escaping, raditional Prometheues validity checking is used. Otherwise, names
+// are checked for UTF-8 validity. Supported formats include delimited protobuf
+// and Prometheus text format. For historical reasons, this decoder fallbacks
+// to classic text decoding for any other format. This decoder does not fully
+// support OpenMetrics although it may often succeed due to the similarities
+// between the formats. This decoder may not support the latest features of
+// Prometheus text format and is not intended for high-performance applications.
+// See: https://github.com/prometheus/common/issues/812
func NewDecoder(r io.Reader, format Format) Decoder {
+ scheme := model.LegacyValidation
+ if format.ToEscapingScheme() == model.NoEscaping {
+ scheme = model.UTF8Validation
+ }
switch format.FormatType() {
case TypeProtoDelim:
- return &protoDecoder{r: bufio.NewReader(r)}
+ return &protoDecoder{r: bufio.NewReader(r), s: scheme}
+ case TypeProtoText, TypeProtoCompact:
+ return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)}
}
- return &textDecoder{r: r}
+ return &textDecoder{r: r, s: scheme}
}
// protoDecoder implements the Decoder interface for protocol buffers.
type protoDecoder struct {
r protodelim.Reader
+ s model.ValidationScheme
}
// Decode implements the Decoder interface.
@@ -93,7 +108,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
if err := opts.UnmarshalFrom(d.r, v); err != nil {
return err
}
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ if !d.s.IsValidMetricName(v.GetName()) {
return fmt.Errorf("invalid metric name %q", v.GetName())
}
for _, m := range v.GetMetric() {
@@ -107,7 +122,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
if !model.LabelValue(l.GetValue()).IsValid() {
return fmt.Errorf("invalid label value %q", l.GetValue())
}
- if !model.LabelName(l.GetName()).IsValid() {
+ if !d.s.IsValidLabelName(l.GetName()) {
return fmt.Errorf("invalid label name %q", l.GetName())
}
}
@@ -115,10 +130,20 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
return nil
}
+// errDecoder is an error-state decoder that always returns the same error.
+type errDecoder struct {
+ err error
+}
+
+func (d *errDecoder) Decode(*dto.MetricFamily) error {
+ return d.err
+}
+
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
fams map[string]*dto.MetricFamily
+ s model.ValidationScheme
err error
}
@@ -126,7 +151,7 @@ type textDecoder struct {
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
if d.err == nil {
// Read all metrics in one shot.
- var p TextParser
+ p := NewTextParser(d.s)
d.fams, d.err = p.TextToMetricFamilies(d.r)
// If we don't get an error, store io.EOF for the end.
if d.err == nil {
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/encode.go b/operator/vendor/github.com/prometheus/common/expfmt/encode.go
index d7f3d76f..73c24dfb 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,14 +18,12 @@ import (
"io"
"net/http"
+ "github.com/munnerz/goautoneg"
+ dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/encoding/protodelim"
"google.golang.org/protobuf/encoding/prototext"
"github.com/prometheus/common/model"
-
- "github.com/munnerz/goautoneg"
-
- dto "github.com/prometheus/client_model/go"
)
// Encoder types encode metric families into an underlying wire protocol.
@@ -61,7 +59,7 @@ func (ec encoderCloser) Close() error {
// appropriate accepted type is found, FmtText is returned (which is the
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
// as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
+// FmtOpenMetrics, use NegotiateIncludingOpenMetrics.
func Negotiate(h http.Header) Format {
escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
@@ -153,7 +151,7 @@ func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := protodelim.MarshalTo(w, v)
+ _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/expfmt.go b/operator/vendor/github.com/prometheus/common/expfmt/expfmt.go
index b2688656..c34c7de4 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -36,9 +36,11 @@ const (
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ //nolint:revive // Allow for underscores.
OpenMetricsVersion_0_0_1 = "0.0.1"
+ //nolint:revive // Allow for underscores.
OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols. Do not do direct
@@ -54,8 +56,10 @@ const (
// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ //nolint:revive // Allow for underscores.
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ //nolint:revive // Allow for underscores.
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
@@ -188,8 +192,8 @@ func (f Format) FormatType() FormatType {
// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
// "escaping" term exists, that will be used. Otherwise, the global default will
// be returned.
-func (format Format) ToEscapingScheme() model.EscapingScheme {
- for _, p := range strings.Split(string(format), ";") {
+func (f Format) ToEscapingScheme() model.EscapingScheme {
+ for _, p := range strings.Split(string(f), ";") {
toks := strings.Split(p, "=")
if len(toks) != 2 {
continue
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/fuzz.go b/operator/vendor/github.com/prometheus/common/expfmt/fuzz.go
index dfac962a..0290f6ab 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -17,7 +17,11 @@
package expfmt
-import "bytes"
+import (
+ "bytes"
+
+ "github.com/prometheus/common/model"
+)
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
@@ -26,9 +30,8 @@ import "bytes"
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
- parser := TextParser{}
+ parser := NewTextParser(model.UTF8Validation)
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
if err != nil {
return 0
}
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/operator/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index a21ed4ec..8dbf6d04 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,11 +22,10 @@ import (
"strconv"
"strings"
+ dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/common/model"
-
- dto "github.com/prometheus/client_model/go"
)
type encoderOption struct {
@@ -249,7 +248,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E
// Finally the samples, one line for each.
if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
- compliantName = compliantName + "_total"
+ compliantName += "_total"
}
for _, metric := range in.Metric {
switch metricType {
@@ -477,7 +476,7 @@ func writeOpenMetricsNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted.
- if !model.IsValidLegacyMetricName(name) {
+ if !model.LegacyValidation.IsValidMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
@@ -641,11 +640,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
- err = (*e).Timestamp.CheckValid()
+ err = e.Timestamp.CheckValid()
if err != nil {
return written, err
}
- ts := (*e).Timestamp.AsTime()
+ ts := e.Timestamp.AsTime()
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/text_create.go b/operator/vendor/github.com/prometheus/common/expfmt/text_create.go
index 4b86434b..c4e9c1bb 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -22,9 +22,9 @@ import (
"strings"
"sync"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
)
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
@@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces.
- if !model.IsValidLegacyMetricName(name) {
+ if !model.LegacyValidation.IsValidMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
@@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
// writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) {
- if model.IsValidLegacyMetricName(name) {
+ if model.LegacyValidation.IsValidMetricName(name) {
return w.WriteString(name)
}
var written int
diff --git a/operator/vendor/github.com/prometheus/common/expfmt/text_parse.go b/operator/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b4607fe4..8f2edde3 100644
--- a/operator/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/operator/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -78,6 +78,14 @@ type TextParser struct {
// These indicate if the metric name from the current line being parsed is inside
// braces and if that metric name was found respectively.
currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
+ // scheme sets the desired ValidationScheme for names. Defaults to the invalid
+ // UnsetValidation.
+ scheme model.ValidationScheme
+}
+
+// NewTextParser returns a new TextParser with the provided nameValidationScheme.
+func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser {
+ return TextParser{scheme: nameValidationScheme}
}
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@@ -126,6 +134,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
func (p *TextParser) reset(in io.Reader) {
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ p.currentLabelPairs = nil
if p.buf == nil {
p.buf = bufio.NewReader(in)
} else {
@@ -216,6 +225,9 @@ func (p *TextParser) startComment() stateFn {
return nil
}
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
@@ -244,6 +256,9 @@ func (p *TextParser) readingMetricName() stateFn {
return nil
}
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
// Now is the time to fix the type if it hasn't happened yet.
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
@@ -311,6 +326,9 @@ func (p *TextParser) startLabelName() stateFn {
switch p.currentByte {
case ',':
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
@@ -319,6 +337,10 @@ func (p *TextParser) startLabelName() stateFn {
return p.startLabelName
case '}':
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ p.currentLabelPairs = nil
+ return nil
+ }
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
}
@@ -341,25 +363,30 @@ func (p *TextParser) startLabelName() stateFn {
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ p.currentLabelPairs = nil
+ return nil
+ }
+ if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) {
+ p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName()))
+ p.currentLabelPairs = nil
return nil
}
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
// labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+ (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentLabelPairs {
lName := l.GetName()
- if _, exists := labels[lName]; !exists {
- labels[lName] = struct{}{}
- } else {
+ if _, exists := labels[lName]; exists {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
p.currentLabelPairs = nil
return nil
}
+ labels[lName] = struct{}{}
}
return p.startLabelValue
}
@@ -440,7 +467,8 @@ func (p *TextParser) readingValue() stateFn {
// When we are here, we have read all the labels, so for the
// special case of a summary/histogram, we can finally find out
// if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ switch p.currentMF.GetType() {
+ case dto.MetricType_SUMMARY:
signature := model.LabelsToSignature(p.currentLabels)
if summary := p.summaries[signature]; summary != nil {
p.currentMetric = summary
@@ -448,7 +476,7 @@ func (p *TextParser) readingValue() stateFn {
p.summaries[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ case dto.MetricType_HISTOGRAM:
signature := model.LabelsToSignature(p.currentLabels)
if histogram := p.histograms[signature]; histogram != nil {
p.currentMetric = histogram
@@ -456,7 +484,7 @@ func (p *TextParser) readingValue() stateFn {
p.histograms[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
- } else {
+ default:
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
if p.readTokenUntilWhitespace(); p.err != nil {
@@ -805,6 +833,10 @@ func (p *TextParser) setOrCreateCurrentMF() {
p.currentIsHistogramCount = false
p.currentIsHistogramSum = false
name := p.currentToken.String()
+ if !p.scheme.IsValidMetricName(name) {
+ p.parseError(fmt.Sprintf("invalid metric name %q", name))
+ return
+ }
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
return
}
diff --git a/operator/vendor/github.com/prometheus/common/model/alert.go b/operator/vendor/github.com/prometheus/common/model/alert.go
index bd3a39e3..460f554f 100644
--- a/operator/vendor/github.com/prometheus/common/model/alert.go
+++ b/operator/vendor/github.com/prometheus/common/model/alert.go
@@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool {
return a.ResolvedAt(time.Now())
}
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
diff --git a/operator/vendor/github.com/prometheus/common/model/labels.go b/operator/vendor/github.com/prometheus/common/model/labels.go
index 73b7aa3e..dfeb34be 100644
--- a/operator/vendor/github.com/prometheus/common/model/labels.go
+++ b/operator/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@ import (
)
const (
- // AlertNameLabel is the name of the label containing the an alert's name.
+ // AlertNameLabel is the name of the label containing the alert's name.
AlertNameLabel = "alertname"
// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -32,6 +32,12 @@ const (
// MetricNameLabel is the label name indicating the metric name of a
// timeseries.
MetricNameLabel = "__name__"
+ // MetricTypeLabel is the label name indicating the metric type of
+ // timeseries as per the PROM-39 proposal.
+ MetricTypeLabel = "__type__"
+ // MetricUnitLabel is the label name indicating the metric unit of
+ // timeseries as per the PROM-39 proposal.
+ MetricUnitLabel = "__unit__"
// SchemeLabel is the name of the label that holds the scheme on which to
// scrape a target.
@@ -100,33 +106,21 @@ type LabelName string
// IsValid returns true iff the name matches the pattern of LabelNameRE when
// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
// NameValidationScheme is set to UTF8Validation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [ValidationScheme.IsValidLabelName] instead.
func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- switch NameValidationScheme {
- case LegacyValidation:
- return ln.IsValidLegacy()
- case UTF8Validation:
- return utf8.ValidString(string(ln))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
+ return NameValidationScheme.IsValidLabelName(string(ln))
}
// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
// legacy names. It does not use LabelNameRE for the check but a much faster
// hardcoded implementation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [LegacyValidation.IsValidLabelName] instead.
func (ln LabelName) IsValidLegacy() bool {
- if len(ln) == 0 {
- return false
- }
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
+ return LegacyValidation.IsValidLabelName(string(ln))
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
diff --git a/operator/vendor/github.com/prometheus/common/model/labelset.go b/operator/vendor/github.com/prometheus/common/model/labelset.go
index d0ad88da..9de47b25 100644
--- a/operator/vendor/github.com/prometheus/common/model/labelset.go
+++ b/operator/vendor/github.com/prometheus/common/model/labelset.go
@@ -114,10 +114,10 @@ func (ls LabelSet) Clone() LabelSet {
}
// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
+func (ls LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(ls))
- for k, v := range l {
+ for k, v := range ls {
result[k] = v
}
@@ -140,7 +140,7 @@ func (ls LabelSet) FastFingerprint() Fingerprint {
}
// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
+func (ls *LabelSet) UnmarshalJSON(b []byte) error {
var m map[LabelName]LabelValue
if err := json.Unmarshal(b, &m); err != nil {
return err
@@ -153,6 +153,6 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
return fmt.Errorf("%q is not a valid label name", ln)
}
}
- *l = LabelSet(m)
+ *ls = LabelSet(m)
return nil
}
diff --git a/operator/vendor/github.com/prometheus/common/model/metric.go b/operator/vendor/github.com/prometheus/common/model/metric.go
index 5766107c..3feebf32 100644
--- a/operator/vendor/github.com/prometheus/common/model/metric.go
+++ b/operator/vendor/github.com/prometheus/common/model/metric.go
@@ -14,6 +14,7 @@
package model
import (
+ "encoding/json"
"errors"
"fmt"
"regexp"
@@ -23,17 +24,30 @@ import (
"unicode/utf8"
dto "github.com/prometheus/client_model/go"
+ "go.yaml.in/yaml/v2"
"google.golang.org/protobuf/proto"
)
var (
- // NameValidationScheme determines the method of name validation to be used by
- // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8
- // mode in isolation from other components that don't support UTF-8 may result
- // in bugs or other undefined behavior. This value can be set to
- // LegacyValidation during startup if a binary is not UTF-8-aware binaries. To
- // avoid need for locking, this value should be set once, ideally in an
- // init(), before multiple goroutines are started.
+ // NameValidationScheme determines the global default method of the name
+ // validation to be used by all calls to IsValidMetricName() and LabelName
+ // IsValid().
+ //
+ // Deprecated: This variable should not be used and might be removed in the
+ // far future. If you wish to stick to the legacy name validation use
+ // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+ // instead. This variable is here as an escape hatch for emergency cases,
+ // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+ // to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+ // the change. In such a case, a temporary assignment to `LegacyValidation`
+ // value in the `init()` function in your main.go or so, could be considered.
+ //
+ // Historically we opted for a global variable for feature gating different
+ // validation schemes in operations that were not otherwise easily adjustable
+ // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+ // Labels structure or package might have been a better choice. Given the
+ // change was made and many upgraded the common already, we live this as-is
+ // with this warning and learning for the future.
NameValidationScheme = UTF8Validation
// NameEscapingScheme defines the default way that names will be escaped when
@@ -50,16 +64,151 @@ var (
type ValidationScheme int
const (
- // LegacyValidation is a setting that requirets that metric and label names
+ // UnsetValidation represents an undefined ValidationScheme.
+ // Should not be used in practice.
+ UnsetValidation ValidationScheme = iota
+
+ // LegacyValidation is a setting that requires that all metric and label names
// conform to the original Prometheus character requirements described by
// MetricNameRE and LabelNameRE.
- LegacyValidation ValidationScheme = iota
+ LegacyValidation
// UTF8Validation only requires that metric and label names be valid UTF-8
// strings.
UTF8Validation
)
+var _ interface {
+ yaml.Marshaler
+ yaml.Unmarshaler
+ json.Marshaler
+ json.Unmarshaler
+ fmt.Stringer
+} = new(ValidationScheme)
+
+// String returns the string representation of s.
+func (s ValidationScheme) String() string {
+ switch s {
+ case UnsetValidation:
+ return "unset"
+ case LegacyValidation:
+ return "legacy"
+ case UTF8Validation:
+ return "utf8"
+ default:
+ panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+ }
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (s ValidationScheme) MarshalYAML() (any, error) {
+ switch s {
+ case UnsetValidation:
+ return "", nil
+ case LegacyValidation, UTF8Validation:
+ return s.String(), nil
+ default:
+ panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+ }
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error {
+ var scheme string
+ if err := unmarshal(&scheme); err != nil {
+ return err
+ }
+ return s.Set(scheme)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s ValidationScheme) MarshalJSON() ([]byte, error) {
+ switch s {
+ case UnsetValidation:
+ return json.Marshal("")
+ case UTF8Validation, LegacyValidation:
+ return json.Marshal(s.String())
+ default:
+ return nil, fmt.Errorf("unhandled ValidationScheme: %d", s)
+ }
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error {
+ var repr string
+ if err := json.Unmarshal(bytes, &repr); err != nil {
+ return err
+ }
+ return s.Set(repr)
+}
+
+// Set implements the pflag.Value interface.
+func (s *ValidationScheme) Set(text string) error {
+ switch text {
+ case "":
+ // Don't change the value.
+ case LegacyValidation.String():
+ *s = LegacyValidation
+ case UTF8Validation.String():
+ *s = UTF8Validation
+ default:
+ return fmt.Errorf("unrecognized ValidationScheme: %q", text)
+ }
+ return nil
+}
+
+// IsValidMetricName returns whether metricName is valid according to s.
+func (s ValidationScheme) IsValidMetricName(metricName string) bool {
+ switch s {
+ case LegacyValidation:
+ if len(metricName) == 0 {
+ return false
+ }
+ for i, b := range metricName {
+ if !isValidLegacyRune(b, i) {
+ return false
+ }
+ }
+ return true
+ case UTF8Validation:
+ if len(metricName) == 0 {
+ return false
+ }
+ return utf8.ValidString(metricName)
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String()))
+ }
+}
+
+// IsValidLabelName returns whether labelName is valid according to s.
+func (s ValidationScheme) IsValidLabelName(labelName string) bool {
+ switch s {
+ case LegacyValidation:
+ if len(labelName) == 0 {
+ return false
+ }
+ for i, b := range labelName {
+ // TODO: Apply De Morgan's law. Make sure there are tests for this.
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
+ return false
+ }
+ }
+ return true
+ case UTF8Validation:
+ if len(labelName) == 0 {
+ return false
+ }
+ return utf8.ValidString(labelName)
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s))
+ }
+}
+
+// Type implements the pflag.Value interface.
+func (ValidationScheme) Type() string {
+ return "validationScheme"
+}
+
type EscapingScheme int
const (
@@ -89,7 +238,7 @@ const (
// Accept header, the default NameEscapingScheme will be used.
EscapingKey = "escaping"
- // Possible values for Escaping Key:
+ // Possible values for Escaping Key.
AllowUTF8 = "allow-utf-8" // No escaping required.
EscapeUnderscores = "underscores"
EscapeDots = "dots"
@@ -163,34 +312,22 @@ func (m Metric) FastFingerprint() Fingerprint {
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
// selected.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [ValidationScheme.IsValidMetricName] instead.
func IsValidMetricName(n LabelValue) bool {
- switch NameValidationScheme {
- case LegacyValidation:
- return IsValidLegacyMetricName(string(n))
- case UTF8Validation:
- if len(n) == 0 {
- return false
- }
- return utf8.ValidString(string(n))
- default:
- panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
- }
+ return NameValidationScheme.IsValidMetricName(string(n))
}
// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [LegacyValidation.IsValidMetricName] instead.
func IsValidLegacyMetricName(n string) bool {
- if len(n) == 0 {
- return false
- }
- for i, b := range n {
- if !isValidLegacyRune(b, i) {
- return false
- }
- }
- return true
+ return LegacyValidation.IsValidMetricName(n)
}
// EscapeMetricFamily escapes the given metric names and labels with the given
@@ -298,13 +435,14 @@ func EscapeName(name string, scheme EscapingScheme) string {
case DotsEscaping:
// Do not early return for legacy valid names, we still escape underscores.
for i, b := range name {
- if b == '_' {
+ switch {
+ case b == '_':
escaped.WriteString("__")
- } else if b == '.' {
+ case b == '.':
escaped.WriteString("_dot_")
- } else if isValidLegacyRune(b, i) {
+ case isValidLegacyRune(b, i):
escaped.WriteRune(b)
- } else {
+ default:
escaped.WriteString("__")
}
}
@@ -315,13 +453,14 @@ func EscapeName(name string, scheme EscapingScheme) string {
}
escaped.WriteString("U__")
for i, b := range name {
- if b == '_' {
+ switch {
+ case b == '_':
escaped.WriteString("__")
- } else if isValidLegacyRune(b, i) {
+ case isValidLegacyRune(b, i):
escaped.WriteRune(b)
- } else if !utf8.ValidRune(b) {
+ case !utf8.ValidRune(b):
escaped.WriteString("_FFFD_")
- } else {
+ default:
escaped.WriteRune('_')
escaped.WriteString(strconv.FormatInt(int64(b), 16))
escaped.WriteRune('_')
@@ -333,7 +472,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
}
}
-// lower function taken from strconv.atoi
+// lower function taken from strconv.atoi.
func lower(c byte) byte {
return c | ('x' - 'X')
}
@@ -397,11 +536,12 @@ func UnescapeName(name string, scheme EscapingScheme) string {
}
r := lower(escapedName[i])
utf8Val *= 16
- if r >= '0' && r <= '9' {
+ switch {
+ case r >= '0' && r <= '9':
utf8Val += uint(r) - '0'
- } else if r >= 'a' && r <= 'f' {
+ case r >= 'a' && r <= 'f':
utf8Val += uint(r) - 'a' + 10
- } else {
+ default:
return name
}
i++
diff --git a/operator/vendor/github.com/prometheus/common/model/time.go b/operator/vendor/github.com/prometheus/common/model/time.go
index 5727452c..1730b0fd 100644
--- a/operator/vendor/github.com/prometheus/common/model/time.go
+++ b/operator/vendor/github.com/prometheus/common/model/time.go
@@ -126,14 +126,14 @@ func (t *Time) UnmarshalJSON(b []byte) error {
p := strings.Split(string(b), ".")
switch len(p) {
case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ v, err := strconv.ParseInt(p[0], 10, 64)
if err != nil {
return err
}
*t = Time(v * second)
case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ v, err := strconv.ParseInt(p[0], 10, 64)
if err != nil {
return err
}
@@ -143,7 +143,7 @@ func (t *Time) UnmarshalJSON(b []byte) error {
if prec < 0 {
p[1] = p[1][:dotPrecision]
} else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
+ p[1] += strings.Repeat("0", prec)
}
va, err := strconv.ParseInt(p[1], 10, 32)
@@ -170,15 +170,15 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
-// Set implements pflag/flag.Value
+// Set implements pflag/flag.Value.
func (d *Duration) Set(s string) error {
var err error
*d, err = ParseDuration(s)
return err
}
-// Type implements pflag.Value
-func (d *Duration) Type() string {
+// Type implements pflag.Value.
+func (*Duration) Type() string {
return "duration"
}
@@ -201,6 +201,7 @@ var unitMap = map[string]struct {
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
+// Negative durations are not supported.
func ParseDuration(s string) (Duration, error) {
switch s {
case "0":
@@ -253,18 +254,36 @@ func ParseDuration(s string) (Duration, error) {
return 0, errors.New("duration out of range")
}
}
+
return Duration(dur), nil
}
+// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
+func ParseDurationAllowNegative(s string) (Duration, error) {
+ if s == "" || s[0] != '-' {
+ return ParseDuration(s)
+ }
+
+ d, err := ParseDuration(s[1:])
+
+ return -d, err
+}
+
func (d Duration) String() string {
var (
- ms = int64(time.Duration(d) / time.Millisecond)
- r = ""
+ ms = int64(time.Duration(d) / time.Millisecond)
+ r = ""
+ sign = ""
)
+
if ms == 0 {
return "0s"
}
+ if ms < 0 {
+ sign, ms = "-", -ms
+ }
+
f := func(unit string, mult int64, exact bool) {
if exact && ms%mult != 0 {
return
@@ -286,7 +305,7 @@ func (d Duration) String() string {
f("s", 1000, false)
f("ms", 1, false)
- return r
+ return sign + r
}
// MarshalJSON implements the json.Marshaler interface.
diff --git a/operator/vendor/github.com/prometheus/common/model/value.go b/operator/vendor/github.com/prometheus/common/model/value.go
index 8050637d..a9995a37 100644
--- a/operator/vendor/github.com/prometheus/common/model/value.go
+++ b/operator/vendor/github.com/prometheus/common/model/value.go
@@ -191,7 +191,8 @@ func (ss SampleStream) String() string {
}
func (ss SampleStream) MarshalJSON() ([]byte, error) {
- if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ switch {
+ case len(ss.Histograms) > 0 && len(ss.Values) > 0:
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
@@ -202,7 +203,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) {
Histograms: ss.Histograms,
}
return json.Marshal(&v)
- } else if len(ss.Histograms) > 0 {
+ case len(ss.Histograms) > 0:
v := struct {
Metric Metric `json:"metric"`
Histograms []SampleHistogramPair `json:"histograms"`
@@ -211,7 +212,7 @@ func (ss SampleStream) MarshalJSON() ([]byte, error) {
Histograms: ss.Histograms,
}
return json.Marshal(&v)
- } else {
+ default:
v := struct {
Metric Metric `json:"metric"`
Values []SamplePair `json:"values"`
@@ -258,7 +259,7 @@ func (s Scalar) String() string {
// MarshalJSON implements json.Marshaler.
func (s Scalar) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+ return json.Marshal([...]interface{}{s.Timestamp, v})
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -349,9 +350,9 @@ func (m Matrix) Len() int { return len(m) }
func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
+func (m Matrix) String() string {
+ matCp := make(Matrix, len(m))
+ copy(matCp, m)
sort.Sort(matCp)
strs := make([]string, len(matCp))
diff --git a/operator/vendor/github.com/prometheus/common/model/value_histogram.go b/operator/vendor/github.com/prometheus/common/model/value_histogram.go
index 895e6a3e..91ce5b7a 100644
--- a/operator/vendor/github.com/prometheus/common/model/value_histogram.go
+++ b/operator/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -86,22 +86,22 @@ func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
}
-func (b HistogramBucket) String() string {
+func (s HistogramBucket) String() string {
var sb strings.Builder
- lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
- upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3
+ upperInclusive := s.Boundaries == 0 || s.Boundaries == 3
if lowerInclusive {
sb.WriteRune('[')
} else {
sb.WriteRune('(')
}
- fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper)
if upperInclusive {
sb.WriteRune(']')
} else {
sb.WriteRune(')')
}
- fmt.Fprintf(&sb, ":%v", b.Count)
+ fmt.Fprintf(&sb, ":%v", s.Count)
return sb.String()
}
diff --git a/operator/vendor/github.com/prometheus/common/model/value_type.go b/operator/vendor/github.com/prometheus/common/model/value_type.go
index 726c50ee..078910f4 100644
--- a/operator/vendor/github.com/prometheus/common/model/value_type.go
+++ b/operator/vendor/github.com/prometheus/common/model/value_type.go
@@ -66,8 +66,8 @@ func (et *ValueType) UnmarshalJSON(b []byte) error {
return nil
}
-func (e ValueType) String() string {
- switch e {
+func (et ValueType) String() string {
+ switch et {
case ValNone:
return ""
case ValScalar:
diff --git a/operator/vendor/github.com/prometheus/procfs/.golangci.yml b/operator/vendor/github.com/prometheus/procfs/.golangci.yml
index 126df9e6..3c3bf910 100644
--- a/operator/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/operator/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,22 +1,45 @@
----
+version: "2"
linters:
enable:
- - errcheck
- - godot
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - testifylint
- - unused
-
-linter-settings:
- godot:
- capital: true
- exclude:
- # Ignore "See: URL"
- - 'See:'
- misspell:
- locale: US
+ - forbidigo
+ - godot
+ - misspell
+ - revive
+ - testifylint
+ settings:
+ forbidigo:
+ forbid:
+ - pattern: ^fmt\.Print.*$
+ msg: Do not commit print statements.
+ godot:
+ exclude:
+ # Ignore "See: URL".
+ - 'See:'
+ capital: true
+ misspell:
+ locale: US
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ settings:
+ goimports:
+ local-prefixes:
+ - github.com/prometheus/procfs
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/operator/vendor/github.com/prometheus/procfs/Makefile.common b/operator/vendor/github.com/prometheus/procfs/Makefile.common
index 16172923..0ed55c2b 100644
--- a/operator/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/operator/vendor/github.com/prometheus/procfs/Makefile.common
@@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
GO_VERSION ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
PROMU := $(FIRST_GOPATH)/bin/promu
@@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.59.0
+GOLANGCI_LINT_VERSION ?= v2.0.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@@ -275,3 +275,9 @@ $(1)_precheck:
exit 1; \
fi
endef
+
+govulncheck: install-govulncheck
+ govulncheck ./...
+
+install-govulncheck:
+ command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/operator/vendor/github.com/prometheus/procfs/README.md b/operator/vendor/github.com/prometheus/procfs/README.md
index 1224816c..0718239c 100644
--- a/operator/vendor/github.com/prometheus/procfs/README.md
+++ b/operator/vendor/github.com/prometheus/procfs/README.md
@@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`.
The procfs library includes a set of test fixtures which include many example files from
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
```bash
rm -rf testdata/fixtures
make test
```
-Next, make the required changes to the extracted files in the `fixtures` directory. When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff testdata/fixtures.ttar`.
diff --git a/operator/vendor/github.com/prometheus/procfs/arp.go b/operator/vendor/github.com/prometheus/procfs/arp.go
index cdcc8a7c..2e533441 100644
--- a/operator/vendor/github.com/prometheus/procfs/arp.go
+++ b/operator/vendor/github.com/prometheus/procfs/arp.go
@@ -23,9 +23,9 @@ import (
// Learned from include/uapi/linux/if_arp.h.
const (
- // completed entry (ha valid).
+ // Completed entry (ha valid).
ATFComplete = 0x02
- // permanent entry.
+ // Permanent entry.
ATFPermanent = 0x04
// Publish entry.
ATFPublish = 0x08
diff --git a/operator/vendor/github.com/prometheus/procfs/fs.go b/operator/vendor/github.com/prometheus/procfs/fs.go
index 4980c875..9bdaccc7 100644
--- a/operator/vendor/github.com/prometheus/procfs/fs.go
+++ b/operator/vendor/github.com/prometheus/procfs/fs.go
@@ -24,8 +24,14 @@ type FS struct {
isReal bool
}
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+ // DefaultMountPoint is the common mount point of the proc filesystem.
+ DefaultMountPoint = fs.DefaultProcMountPoint
+
+ // SectorSize represents the size of a sector in bytes.
+ // It is specific to Linux block I/O operations.
+ SectorSize = 512
+)
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// It will error if the mount point directory can't be read or is a file.
diff --git a/operator/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/operator/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
index 134767d6..1b5bdbdf 100644
--- a/operator/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
+++ b/operator/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -17,7 +17,7 @@
package procfs
// isRealProc returns true on architectures that don't have a Type argument
-// in their Statfs_t struct
-func isRealProc(mountPoint string) (bool, error) {
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
return true, nil
}
diff --git a/operator/vendor/github.com/prometheus/procfs/fscache.go b/operator/vendor/github.com/prometheus/procfs/fscache.go
index cf2e3eaa..7db86330 100644
--- a/operator/vendor/github.com/prometheus/procfs/fscache.go
+++ b/operator/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@ type Fscacheinfo struct {
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
+ // Number of page stores canceled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@ type Fscacheinfo struct {
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
- // Number of async ops cancelled
+ // Number of async ops canceled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
- // Number of async ops initialised
+ // Number of async ops initialized
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
diff --git a/operator/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/operator/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 3c18c761..3a43e839 100644
--- a/operator/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/operator/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -28,6 +28,9 @@ const (
// DefaultConfigfsMountPoint is the common mount point of the configfs.
DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+ // DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+ DefaultSelinuxMountPoint = "/sys/fs/selinux"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/operator/vendor/github.com/prometheus/procfs/internal/util/parse.go b/operator/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 14272dc7..5a7d2df0 100644
--- a/operator/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/operator/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,6 +14,7 @@
package util
import (
+ "errors"
"os"
"strconv"
"strings"
@@ -110,3 +111,16 @@ func ParseBool(b string) *bool {
}
return &truth
}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ hexString := strings.TrimSpace(string(data))
+ if !strings.HasPrefix(hexString, "0x") {
+ return 0, errors.New("invalid format: hex string does not start with '0x'")
+ }
+ return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/operator/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/operator/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
index 1ab875ce..d5404a6d 100644
--- a/operator/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ b/operator/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -20,6 +20,8 @@ package util
import (
"bytes"
"os"
+ "strconv"
+ "strings"
"syscall"
)
@@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
return string(bytes.TrimSpace(b[:n])), nil
}
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/operator/vendor/github.com/prometheus/procfs/mountstats.go b/operator/vendor/github.com/prometheus/procfs/mountstats.go
index 75a3b6c8..50caa732 100644
--- a/operator/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/operator/vendor/github.com/prometheus/procfs/mountstats.go
@@ -45,11 +45,11 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
- // kernel version >= 4.14 MaxLen
+ // Kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
- // kernel version <= 4.2 MinLen
+ // Kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
)
@@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
switch statVersion {
case statVersion10:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport10UDPLen
- } else {
+ default:
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
@@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
}
case statVersion11:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport11UDPLen
- } else if protocol == "rdma" {
+ case "rdma":
expectedLength = fieldTransport11RDMAMinLen
- } else {
+ default:
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
@@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// For the udp RPC transport there is no connection count, connect idle time,
// or idle time (fields #3, #4, and #5); all other fields are the same. So
// we set them to 0 here.
- if protocol == "udp" {
+ switch protocol {
+ case "udp":
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
- } else if protocol == "tcp" {
+ case "tcp":
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
- } else if protocol == "rdma" {
+ case "rdma":
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
diff --git a/operator/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/operator/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 00000000..f50b38e3
--- /dev/null
+++ b/operator/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+ netDevSNMP6 := make(NetDevSNMP6)
+
+ // The net/dev_snmp6 folders contain one file per interface
+ ifaceFiles, err := os.ReadDir(dir)
+ if err != nil {
+ // On systems with IPv6 disabled, this directory won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return netDevSNMP6, err
+ }
+ return netDevSNMP6, err
+ }
+
+ for _, iFaceFile := range ifaceFiles {
+ f, err := os.Open(dir + "/" + iFaceFile.Name())
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ defer f.Close()
+
+ netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ }
+
+ return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+ m := make(map[string]uint64)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ key, val := stat[0], stat[1]
+
+ // Expect stat name to contain "6" or be "ifIndex"
+ if strings.Contains(key, "6") || key == "ifIndex" {
+ v, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return m, err
+ }
+
+ m[key] = v
+ }
+ }
+ return m, scanner.Err()
+}
diff --git a/operator/vendor/github.com/prometheus/procfs/net_ip_socket.go b/operator/vendor/github.com/prometheus/procfs/net_ip_socket.go
index b70f1fc7..19e3378f 100644
--- a/operator/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/operator/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@ import (
)
const (
- // readLimit is used by io.LimitReader while reading the content of the
+ // Maximum size limit used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -50,12 +50,12 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
- // Drops shows the total number of dropped packets of all UPD sockets.
+ // Drops shows the total number of dropped packets of all UDP sockets.
Drops *uint64
}
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // A single line parser for fields from /proc/net/{t,u}dp{,6}.
+ // Fields which are not used by IPSocket are skipped.
// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
diff --git a/operator/vendor/github.com/prometheus/procfs/net_protocols.go b/operator/vendor/github.com/prometheus/procfs/net_protocols.go
index b6c77b70..8d4b1ac0 100644
--- a/operator/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/operator/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
if err != nil {
return nil, err
}
- if fields[4] == enabled {
+ switch fields[4] {
+ case enabled:
line.Pressure = 1
- } else if fields[4] == disabled {
+ case disabled:
line.Pressure = 0
- } else {
+ default:
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
- if fields[6] == enabled {
+ switch fields[6] {
+ case enabled:
line.Slab = true
- } else if fields[6] == disabled {
+ case disabled:
line.Slab = false
- } else {
+ default:
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
}
for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
+ switch capabilities[i] {
+ case "y":
*capabilityFields[i] = true
- } else if capabilities[i] == "n" {
+ case "n":
*capabilityFields[i] = false
- } else {
+ default:
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
diff --git a/operator/vendor/github.com/prometheus/procfs/net_tcp.go b/operator/vendor/github.com/prometheus/procfs/net_tcp.go
index 52776295..0396d720 100644
--- a/operator/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/operator/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@ type (
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
diff --git a/operator/vendor/github.com/prometheus/procfs/net_unix.go b/operator/vendor/github.com/prometheus/procfs/net_unix.go
index d868cebd..d7e0cacb 100644
--- a/operator/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/operator/vendor/github.com/prometheus/procfs/net_unix.go
@@ -121,12 +121,12 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
return &nu, nil
}
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
l := len(fields)
- if l < min {
- return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
+ if l < minFields {
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
}
// Field offsets are as follows:
@@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
}
// Path field is optional.
- if l > min {
+ if l > minFields {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
diff --git a/operator/vendor/github.com/prometheus/procfs/proc.go b/operator/vendor/github.com/prometheus/procfs/proc.go
index 14279636..368187fa 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc.go
@@ -37,9 +37,9 @@ type Proc struct {
type Procs []Proc
var (
- ErrFileParse = errors.New("Error Parsing File")
- ErrFileRead = errors.New("Error Reading File")
- ErrMountPoint = errors.New("Error Accessing Mount point")
+ ErrFileParse = errors.New("error parsing file")
+ ErrFileRead = errors.New("error reading file")
+ ErrMountPoint = errors.New("error accessing mount point")
)
func (p Procs) Len() int { return len(p) }
@@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
if err != nil {
return Proc{}, err
}
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+ pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
if err != nil {
return Proc{}, err
}
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_cgroup.go b/operator/vendor/github.com/prometheus/procfs/proc_cgroup.go
index daeed7f5..4a64347c 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -24,7 +24,7 @@ import (
)
// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_io.go b/operator/vendor/github.com/prometheus/procfs/proc_io.go
index 776f3497..d15b66dd 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) {
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
"read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
+ "cancelled_write_bytes: %d\n" //nolint:misspell
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_netstat.go b/operator/vendor/github.com/prometheus/procfs/proc_netstat.go
index 8e3ff4d7..4248c171 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = &value
+ procNetstat.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = &value
+ procNetstat.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = &value
+ procNetstat.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = &value
+ procNetstat.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = &value
+ procNetstat.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = &value
+ procNetstat.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = &value
+ procNetstat.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = &value
+ procNetstat.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = &value
+ procNetstat.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = &value
+ procNetstat.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = &value
+ procNetstat.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = &value
+ procNetstat.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = &value
+ procNetstat.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = &value
+ procNetstat.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = &value
+ procNetstat.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = &value
+ procNetstat.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = &value
+ procNetstat.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = &value
+ procNetstat.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = &value
+ procNetstat.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = &value
+ procNetstat.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = &value
+ procNetstat.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = &value
+ procNetstat.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = &value
+ procNetstat.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = &value
+ procNetstat.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = &value
+ procNetstat.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = &value
+ procNetstat.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = &value
+ procNetstat.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = &value
+ procNetstat.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = &value
+ procNetstat.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = &value
+ procNetstat.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = &value
+ procNetstat.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = &value
+ procNetstat.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = &value
+ procNetstat.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = &value
+ procNetstat.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = &value
+ procNetstat.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = &value
+ procNetstat.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = &value
+ procNetstat.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = &value
+ procNetstat.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = &value
+ procNetstat.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = &value
+ procNetstat.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = &value
+ procNetstat.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = &value
+ procNetstat.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = &value
+ procNetstat.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = &value
+ procNetstat.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = &value
+ procNetstat.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = &value
+ procNetstat.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = &value
+ procNetstat.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = &value
+ procNetstat.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = &value
+ procNetstat.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = &value
+ procNetstat.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = &value
+ procNetstat.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = &value
+ procNetstat.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = &value
+ procNetstat.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = &value
+ procNetstat.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = &value
+ procNetstat.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = &value
+ procNetstat.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = &value
+ procNetstat.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = &value
+ procNetstat.TCPRcvCoalesce = &value
case "TCPRcvQDrop":
- procNetstat.TcpExt.TCPRcvQDrop = &value
+ procNetstat.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = &value
+ procNetstat.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = &value
+ procNetstat.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = &value
+ procNetstat.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = &value
+ procNetstat.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = &value
+ procNetstat.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = &value
+ procNetstat.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = &value
+ procNetstat.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = &value
+ procNetstat.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
+ procNetstat.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
+ procNetstat.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
+ procNetstat.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = &value
+ procNetstat.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
+ procNetstat.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = &value
+ procNetstat.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = &value
+ procNetstat.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
+ procNetstat.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = &value
+ procNetstat.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
+ procNetstat.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = &value
+ procNetstat.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = &value
+ procNetstat.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = &value
+ procNetstat.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = &value
+ procNetstat.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = &value
+ procNetstat.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = &value
+ procNetstat.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
+ procNetstat.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = &value
+ procNetstat.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = &value
+ procNetstat.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
+ procNetstat.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
+ procNetstat.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = &value
+ procNetstat.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = &value
+ procNetstat.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = &value
+ procNetstat.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = &value
+ procNetstat.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = &value
+ procNetstat.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = &value
+ procNetstat.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = &value
+ procNetstat.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = &value
+ procNetstat.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = &value
+ procNetstat.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = &value
+ procNetstat.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = &value
+ procNetstat.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = &value
+ procNetstat.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = &value
+ procNetstat.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = &value
+ procNetstat.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = &value
+ procNetstat.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = &value
+ procNetstat.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = &value
+ procNetstat.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = &value
+ procNetstat.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = &value
+ procNetstat.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = &value
+ procNetstat.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = &value
+ procNetstat.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = &value
+ procNetstat.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = &value
+ procNetstat.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = &value
+ procNetstat.ReasmOverlaps = &value
}
}
}
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_smaps.go b/operator/vendor/github.com/prometheus/procfs/proc_smaps.go
index 09060e82..9a297afc 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -19,7 +19,6 @@ package procfs
import (
"bufio"
"errors"
- "fmt"
"os"
"regexp"
"strconv"
@@ -29,7 +28,7 @@ import (
)
var (
- // match the header line before each mapped zone in `/proc/pid/smaps`.
+ // Match the header line before each mapped zone in `/proc/pid/smaps`.
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
@@ -117,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
- fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_snmp.go b/operator/vendor/github.com/prometheus/procfs/proc_snmp.go
index b9d2cf64..4bdc90b0 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = &value
+ procSnmp.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = &value
+ procSnmp.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = &value
+ procSnmp.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = &value
+ procSnmp.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = &value
+ procSnmp.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = &value
+ procSnmp.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = &value
+ procSnmp.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = &value
+ procSnmp.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = &value
+ procSnmp.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = &value
+ procSnmp.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = &value
+ procSnmp.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = &value
+ procSnmp.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = &value
+ procSnmp.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = &value
+ procSnmp.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = &value
+ procSnmp.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = &value
+ procSnmp.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = &value
+ procSnmp.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = &value
+ procSnmp.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = &value
+ procSnmp.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = &value
+ procSnmp.InMsgs = &value
case "InErrors":
procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = &value
+ procSnmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = &value
+ procSnmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = &value
+ procSnmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = &value
+ procSnmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = &value
+ procSnmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = &value
+ procSnmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = &value
+ procSnmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = &value
+ procSnmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = &value
+ procSnmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = &value
+ procSnmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = &value
+ procSnmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = &value
+ procSnmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = &value
+ procSnmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = &value
+ procSnmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = &value
+ procSnmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = &value
+ procSnmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = &value
+ procSnmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = &value
+ procSnmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = &value
+ procSnmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = &value
+ procSnmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = &value
+ procSnmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = &value
+ procSnmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = &value
+ procSnmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = &value
+ procSnmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = &value
+ procSnmp.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = &value
+ procSnmp.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = &value
+ procSnmp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = &value
+ procSnmp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = &value
+ procSnmp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = &value
+ procSnmp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = &value
+ procSnmp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = &value
+ procSnmp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = &value
+ procSnmp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = &value
+ procSnmp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = &value
+ procSnmp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = &value
+ procSnmp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = &value
+ procSnmp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = &value
+ procSnmp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = &value
+ procSnmp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = &value
+ procSnmp.OutRsts = &value
case "InCsumErrors":
procSnmp.Tcp.InCsumErrors = &value
}
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_snmp6.go b/operator/vendor/github.com/prometheus/procfs/proc_snmp6.go
index 3059cc6a..fb7fd399 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = &value
+ procSnmp6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = &value
+ procSnmp6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = &value
+ procSnmp6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = &value
+ procSnmp6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = &value
+ procSnmp6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = &value
+ procSnmp6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = &value
+ procSnmp6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = &value
+ procSnmp6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = &value
+ procSnmp6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = &value
+ procSnmp6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = &value
+ procSnmp6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = &value
+ procSnmp6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = &value
+ procSnmp6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = &value
+ procSnmp6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = &value
+ procSnmp6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = &value
+ procSnmp6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = &value
+ procSnmp6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = &value
+ procSnmp6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = &value
+ procSnmp6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = &value
+ procSnmp6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = &value
+ procSnmp6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = &value
+ procSnmp6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = &value
+ procSnmp6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = &value
+ procSnmp6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = &value
+ procSnmp6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = &value
+ procSnmp6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = &value
+ procSnmp6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = &value
+ procSnmp6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = &value
+ procSnmp6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = &value
+ procSnmp6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = &value
+ procSnmp6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = &value
+ procSnmp6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = &value
+ procSnmp6.InMsgs = &value
case "InErrors":
procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = &value
+ procSnmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = &value
+ procSnmp6.OutErrors = &value
case "InCsumErrors":
procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = &value
+ procSnmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = &value
+ procSnmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = &value
+ procSnmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = &value
+ procSnmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = &value
+ procSnmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = &value
+ procSnmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = &value
+ procSnmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = &value
+ procSnmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = &value
+ procSnmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = &value
+ procSnmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = &value
+ procSnmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = &value
+ procSnmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = &value
+ procSnmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = &value
+ procSnmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = &value
+ procSnmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = &value
+ procSnmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = &value
+ procSnmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = &value
+ procSnmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = &value
+ procSnmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = &value
+ procSnmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = &value
+ procSnmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = &value
+ procSnmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = &value
+ procSnmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = &value
+ procSnmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = &value
+ procSnmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = &value
+ procSnmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = &value
+ procSnmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = &value
+ procSnmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = &value
+ procSnmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = &value
+ procSnmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = &value
+ procSnmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = &value
+ procSnmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = &value
+ procSnmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = &value
+ procSnmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = &value
+ procSnmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = &value
+ procSnmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = &value
+ procSnmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = &value
+ procSnmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = &value
+ procSnmp6.OutType143 = &value
}
case "Udp6":
switch key {
@@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "InCsumErrors":
procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = &value
+ procSnmp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_status.go b/operator/vendor/github.com/prometheus/procfs/proc_status.go
index a055197c..dd8aa568 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_status.go
@@ -146,7 +146,11 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
}
}
case "NSpid":
- s.NSpids = calcNSPidsList(vString)
+ nspids, err := calcNSPidsList(vString)
+ if err != nil {
+ return err
+ }
+ s.NSpids = nspids
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -222,17 +226,17 @@ func calcCpusAllowedList(cpuString string) []uint64 {
return g
}
-func calcNSPidsList(nspidsString string) []uint64 {
- s := strings.Split(nspidsString, " ")
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+ s := strings.Split(nspidsString, "\t")
var nspids []uint64
for _, nspid := range s {
- nspid, _ := strconv.ParseUint(nspid, 10, 64)
- if nspid == 0 {
- continue
+ nspid, err := strconv.ParseUint(nspid, 10, 64)
+ if err != nil {
+ return nil, err
}
nspids = append(nspids, nspid)
}
- return nspids
+ return nspids, nil
}
diff --git a/operator/vendor/github.com/prometheus/procfs/proc_sys.go b/operator/vendor/github.com/prometheus/procfs/proc_sys.go
index 5eefbe2e..3810d1ac 100644
--- a/operator/vendor/github.com/prometheus/procfs/proc_sys.go
+++ b/operator/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -21,7 +21,7 @@ import (
)
func sysctlToPath(sysctl string) string {
- return strings.Replace(sysctl, ".", "/", -1)
+ return strings.ReplaceAll(sysctl, ".", "/")
}
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
diff --git a/operator/vendor/github.com/prometheus/procfs/softirqs.go b/operator/vendor/github.com/prometheus/procfs/softirqs.go
index 28708e07..403e6ae7 100644
--- a/operator/vendor/github.com/prometheus/procfs/softirqs.go
+++ b/operator/vendor/github.com/prometheus/procfs/softirqs.go
@@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
if len(parts) < 2 {
continue
}
- switch {
- case parts[0] == "HI:":
+ switch parts[0] {
+ case "HI:":
perCPU := parts[1:]
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "TIMER:":
+ case "TIMER:":
perCPU := parts[1:]
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "NET_TX:":
+ case "NET_TX:":
perCPU := parts[1:]
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "NET_RX:":
+ case "NET_RX:":
perCPU := parts[1:]
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "BLOCK:":
+ case "BLOCK:":
perCPU := parts[1:]
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "IRQ_POLL:":
+ case "IRQ_POLL:":
perCPU := parts[1:]
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "TASKLET:":
+ case "TASKLET:":
perCPU := parts[1:]
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "SCHED:":
+ case "SCHED:":
perCPU := parts[1:]
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "HRTIMER:":
+ case "HRTIMER:":
perCPU := parts[1:]
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
@@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
- case parts[0] == "RCU:":
+ case "RCU:":
perCPU := parts[1:]
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
diff --git a/operator/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/operator/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 7e19eba0..ffb24e8e 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
@@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
}
// Less asserts that the first element is less than the second
@@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
@@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
}
// Positive asserts that the specified element is positive
@@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not positive", e)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
}
// Negative asserts that the specified element is negative
@@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not negative", e)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
@@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
- return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ return Fail(t, failMessage, msgAndArgs...)
}
return true
diff --git a/operator/vendor/github.com/stretchr/testify/assert/assertion_format.go b/operator/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 19063416..c592f6ad 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.Errorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// assert.Errorf(t, err, "error message %s", "formatted")
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
+}
+
// IsTypef asserts that the specified objects are of the same type.
+//
+// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
@@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// Subsetf asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/operator/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/operator/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 21629087..58db9284 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
return ElementsMatchf(a.t, listA, listB, msg, args...)
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// a.Empty(obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// a.Emptyf(obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Error(err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Error(err)
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Errorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Errorf(err, "error message %s", "formatted")
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
return IsNonIncreasingf(a.t, object, msg, args...)
}
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// a.IsNotType(&NotMyStruct{}, &MyStruct{})
+func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotType(a.t, theType, object, msgAndArgs...)
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotTypef(a.t, theType, object, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
+//
+// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
}
// IsTypef asserts that the specified objects are of the same type.
+//
+// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
return NotElementsMatchf(a.t, listA, listB, msg, args...)
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
@@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
return NotEmpty(a.t, object, msgAndArgs...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
// if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1])
@@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...)
}
-// NotSubset asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
+// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...)
}
-// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...)
}
-// Subset asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
+// a.Subset([1, 2, 3], {1: "one", 2: "two"})
+// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...)
}
-// Subsetf asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
diff --git a/operator/vendor/github.com/stretchr/testify/assert/assertion_order.go b/operator/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 1d2f7182..2fdf80fd 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
diff --git a/operator/vendor/github.com/stretchr/testify/assert/assertions.go b/operator/vendor/github.com/stretchr/testify/assert/assertions.go
index 4e91332b..de8de0cb 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/
// of each stack frame leading from the current test to the assert call that
// failed.
func CallerInfo() []string {
-
var pc uintptr
- var ok bool
var file string
var line int
var name string
+ const stackFrameBufferSize = 10
+ pcs := make([]uintptr, stackFrameBufferSize)
+
callers := []string{}
- for i := 0; ; i++ {
- pc, file, line, ok = runtime.Caller(i)
- if !ok {
- // The breaks below failed to terminate the loop, and we ran off the
- // end of the call stack.
- break
- }
+ offset := 1
- // This is a huge edge case, but it will panic if this is the case, see #180
- if file == "" {
- break
- }
+ for {
+ n := runtime.Callers(offset, pcs)
- f := runtime.FuncForPC(pc)
- if f == nil {
- break
- }
- name = f.Name()
-
- // testing.tRunner is the standard library function that calls
- // tests. Subtests are called directly by tRunner, without going through
- // the Test/Benchmark/Example function that contains the t.Run calls, so
- // with subtests we should break when we hit tRunner, without adding it
- // to the list of callers.
- if name == "testing.tRunner" {
+ if n == 0 {
break
}
- parts := strings.Split(file, "/")
- if len(parts) > 1 {
- filename := parts[len(parts)-1]
- dir := parts[len(parts)-2]
- if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ frames := runtime.CallersFrames(pcs[:n])
+
+ for {
+ frame, more := frames.Next()
+ pc = frame.PC
+ file = frame.File
+ line = frame.Line
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "" {
+ break
}
- }
- // Drop the package
- segments := strings.Split(name, ".")
- name = segments[len(segments)-1]
- if isTest(name, "Test") ||
- isTest(name, "Benchmark") ||
- isTest(name, "Example") {
- break
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ if len(parts) > 1 {
+ filename := parts[len(parts)-1]
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ dotPos := strings.LastIndexByte(name, '.')
+ name = name[dotPos+1:]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+
+ if !more {
+ break
+ }
}
+
+ // Next batch
+ offset += cap(pcs)
}
return callers
@@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{},
return true
}
+func isType(expectedType, object interface{}) bool {
+ return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
+}
+
// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+//
+// assert.IsType(t, &MyStruct{}, &MyStruct{})
+func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
+ if isType(expectedType, object) {
+ return true
+ }
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
+}
- if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
- return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
+func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isType(theType, object) {
+ return true
}
-
- return true
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
}
// Equal asserts that two objects are equal.
@@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
}
return true
-
}
// validateEqualArgs checks whether provided arguments can be safely used in the
@@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
if !same {
// both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+
- "expected: %p %#v\n"+
- "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ "expected: %p %#[1]v\n"+
+ "actual : %p %#[2]v",
+ expected, actual), msgAndArgs...)
}
return true
@@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
same, ok := samePointers(expected, actual)
if !ok {
- //fails when the arguments are not pointers
+ // fails when the arguments are not pointers
return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
}
if same {
return Fail(t, fmt.Sprintf(
- "Expected and actual point to the same object: %p %#v",
- expected, expected), msgAndArgs...)
+ "Expected and actual point to the same object: %p %#[1]v",
+ expected), msgAndArgs...)
}
return true
}
@@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
- return false, false //not both are pointers
+ return false, false // not both are pointers
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
@@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
}
return true
-
}
// EqualExportedValues asserts that the types of two objects are equal and their public
@@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
}
return Equal(t, expected, actual, msgAndArgs...)
-
}
// NotNil asserts that the specified object is not nil.
@@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// isEmpty gets whether the specified object is considered empty or not.
func isEmpty(object interface{}) bool {
-
// get nil case out of the way
if object == nil {
return true
}
- objValue := reflect.ValueOf(object)
+ return isEmptyValue(reflect.ValueOf(object))
+}
+// isEmptyValue gets whether the specified reflect.Value is considered empty or not.
+func isEmptyValue(objValue reflect.Value) bool {
+ if objValue.IsZero() {
+ return true
+ }
+ // Special cases of non-zero values that we consider empty
switch objValue.Kind() {
// collection types are empty when they have no element
+ // Note: array types are empty when they match their zero-initialized state.
case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
+ // non-nil pointers are empty if the value they point to is empty
case reflect.Ptr:
- if objValue.IsNil() {
- return true
- }
- deref := objValue.Elem().Interface()
- return isEmpty(deref)
- // for all other types, compare against the zero value
- // array types are empty when they match their zero-initialized state
- default:
- zero := reflect.Zero(objValue.Type())
- return reflect.DeepEqual(object, zero.Interface())
+ return isEmptyValue(objValue.Elem())
}
+ return false
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
+//
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
//
// assert.Empty(t, obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object)
if !pass {
@@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
}
return pass
-
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
// if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1])
@@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
}
return pass
-
}
// getLen tries to get the length of an object.
@@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
}
return true
-
}
// False asserts that the specified value is false.
@@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
}
return true
-
}
// NotEqual asserts that the specified values are NOT equal.
@@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
}
return true
-
}
// NotEqualValues asserts that two objects are not equal even when converted to the same type
@@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (true, false) if element was not found.
// return (true, true) if element was found.
func containsElement(list interface{}, element interface{}) (ok, found bool) {
-
listValue := reflect.ValueOf(list)
listType := reflect.TypeOf(list)
if listType == nil {
@@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) {
}
}
return true, false
-
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
@@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
}
return true
-
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
@@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
}
return true
-
}
-// Subset asserts that the specified list(array, slice...) or map contains all
-// elements given in the specified subset list(array, slice...) or map.
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
+// assert.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
}
subsetKind := reflect.TypeOf(subset).Kind()
- if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
@@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
}
subsetList := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map {
+ keys := make([]interface{}, subsetList.Len())
+ for idx, key := range subsetList.MapKeys() {
+ keys[idx] = key.Interface()
+ }
+ subsetList = reflect.ValueOf(keys)
+ }
for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
@@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true
}
-// NotSubset asserts that the specified list(array, slice...) or map does NOT
-// contain all elements given in the specified subset list(array, slice...) or
-// map.
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
// assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
+// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
}
subsetKind := reflect.TypeOf(subset).Kind()
- if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
@@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
}
subsetList := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map {
+ keys := make([]interface{}, subsetList.Len())
+ for idx, key := range subsetList.MapKeys() {
+ keys[idx] = key.Interface()
+ }
+ subsetList = reflect.ValueOf(keys)
+ }
for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
return true
@@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.Error(t, err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// assert.Error(t, err)
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil {
if h, ok := t.(tHelper); ok {
@@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool {
default:
return r.MatchString(fmt.Sprint(v))
}
-
}
// Regexp asserts that a specified regexp matches a string.
@@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
}
return !match
-
}
// Zero asserts that i is the zero value for its type.
@@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
+ // Shortcut if same bytes
+ if actual == expected {
+ return true
+ }
+
if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
}
@@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
+ // Shortcut if same bytes
+ if actual == expected {
+ return true
+ }
+
if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
}
@@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
}
ch := make(chan bool, 1)
+ checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case v := <-ch:
if v {
return true
}
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -1964,6 +2028,9 @@ type CollectT struct {
errors []error
}
+// Helper is like [testing.T.Helper] but does nothing.
+func (CollectT) Helper() {}
+
// Errorf collects the error.
func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...))
@@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
var lastFinishedTickErrs []error
ch := make(chan *CollectT, 1)
+ checkCond := func() {
+ collect := new(CollectT)
+ defer func() {
+ ch <- collect
+ }()
+ condition(collect)
+ }
+
timer := time.NewTimer(waitFor)
defer timer.Stop()
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
for _, err := range lastFinishedTickErrs {
t.Errorf("%v", err)
}
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case <-tick:
- tick = nil
- go func() {
- collect := new(CollectT)
- defer func() {
- ch <- collect
- }()
- condition(collect)
- }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case collect := <-ch:
if !collect.failed() {
return true
}
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = collect.errors
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
}
ch := make(chan bool, 1)
+ checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
return true
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case v := <-ch:
if v {
return Fail(t, "Condition satisfied", msgAndArgs...)
}
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
var expectedText string
if target != nil {
expectedText = target.Error()
+ if err == nil {
+ return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
+ }
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+
@@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
expectedText = target.Error()
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+
@@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
return true
}
- chain := buildErrorChainString(err)
+ expectedType := reflect.TypeOf(target).Elem().String()
+ if err == nil {
+ return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
+ "expected: %s", expectedType), msgAndArgs...)
+ }
+
+ chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
- "expected: %q\n"+
- "in chain: %s", target, chain,
+ "expected: %s\n"+
+ "in chain: %s", expectedType, chain,
), msgAndArgs...)
}
@@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa
return true
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
- "found: %q\n"+
- "in chain: %s", target, chain,
+ "found: %s\n"+
+ "in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
), msgAndArgs...)
}
-func buildErrorChainString(err error) string {
+func unwrapAll(err error) (errs []error) {
+ errs = append(errs, err)
+ switch x := err.(type) {
+ case interface{ Unwrap() error }:
+ err = x.Unwrap()
+ if err == nil {
+ return
+ }
+ errs = append(errs, unwrapAll(err)...)
+ case interface{ Unwrap() []error }:
+ for _, err := range x.Unwrap() {
+ errs = append(errs, unwrapAll(err)...)
+ }
+ }
+ return
+}
+
+func buildErrorChainString(err error, withType bool) string {
if err == nil {
return ""
}
- e := errors.Unwrap(err)
- chain := fmt.Sprintf("%q", err.Error())
- for e != nil {
- chain += fmt.Sprintf("\n\t%q", e.Error())
- e = errors.Unwrap(e)
+ var chain string
+ errs := unwrapAll(err)
+ for i := range errs {
+ if i != 0 {
+ chain += "\n\t"
+ }
+ chain += fmt.Sprintf("%q", errs[i].Error())
+ if withType {
+ chain += fmt.Sprintf(" (%T)", errs[i])
+ }
}
return chain
}
diff --git a/operator/vendor/github.com/stretchr/testify/assert/doc.go b/operator/vendor/github.com/stretchr/testify/assert/doc.go
index 4953981d..a0b953aa 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/doc.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/doc.go
@@ -1,5 +1,9 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
+// # Note
+//
+// All functions in this package return a bool value indicating whether the assertion has passed.
+//
// # Example Usage
//
// The following is a complete example using assert in a standard test function:
diff --git a/operator/vendor/github.com/stretchr/testify/assert/http_assertions.go b/operator/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 861ed4b7..5a6bb75f 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
+ Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return contains
@@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
+ Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return !contains
diff --git a/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
index baa0cc7d..5a74c4f4 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -1,5 +1,4 @@
//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
-// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
//
diff --git a/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
index b83c6cf6..0bae80e3 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -1,5 +1,4 @@
//go:build !testify_yaml_fail && !testify_yaml_custom
-// +build !testify_yaml_fail,!testify_yaml_custom
// Package yaml is just an indirection to handle YAML deserialization.
//
diff --git a/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
index e78f7dfe..8041803f 100644
--- a/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
+++ b/operator/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -1,5 +1,4 @@
//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
-// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
// Package yaml is an implementation of YAML functions that always fail.
//
diff --git a/operator/vendor/github.com/stretchr/testify/mock/mock.go b/operator/vendor/github.com/stretchr/testify/mock/mock.go
index eb5682df..efc89def 100644
--- a/operator/vendor/github.com/stretchr/testify/mock/mock.go
+++ b/operator/vendor/github.com/stretchr/testify/mock/mock.go
@@ -208,9 +208,16 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call {
return c.Parent.On(methodName, arguments...)
}
-// Unset removes a mock handler from being called.
+// Unset removes all mock handlers that satisfy the call instance arguments from being
+// called. Only supported on call instances with static input arguments.
//
-// test.On("func", mock.Anything).Unset()
+// For example, the only handler remaining after the following would be "MyMethod(2, 2)":
+//
+// Mock.
+// On("MyMethod", 2, 2).Return(0).
+// On("MyMethod", 3, 3).Return(0).
+// On("MyMethod", Anything, Anything).Return(0)
+// Mock.On("MyMethod", 3, 3).Unset()
func (c *Call) Unset() *Call {
var unlockOnce sync.Once
@@ -331,7 +338,10 @@ func (m *Mock) TestData() objx.Map {
Setting expectations
*/
-// Test sets the test struct variable of the mock object
+// Test sets the [TestingT] on which errors will be reported, otherwise errors
+// will cause a panic.
+// Test should not be called on an object that is going to be used in a
+// goroutine other than the one running the test function.
func (m *Mock) Test(t TestingT) {
m.mutex.Lock()
defer m.mutex.Unlock()
@@ -494,7 +504,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
// expected call found, but it has already been called with repeatable times
if call != nil {
m.mutex.Unlock()
- m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo())
+ m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(%#v).Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo())
}
// we have to fail here - because we don't know what to do
// as the return arguments. This is because:
@@ -514,7 +524,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen
assert.CallerInfo(),
)
} else {
- m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())
+ m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(%#v).Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo())
}
}
@@ -661,7 +671,7 @@ func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls
actualCalls++
}
}
- return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls))
+ return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) of method %s does not match the actual number of calls (%d).", expectedCalls, methodName, actualCalls))
}
// AssertCalled asserts that the method was called.
diff --git a/operator/vendor/golang.org/x/mod/semver/semver.go b/operator/vendor/golang.org/x/mod/semver/semver.go
index 9a2dfd33..628f8fd6 100644
--- a/operator/vendor/golang.org/x/mod/semver/semver.go
+++ b/operator/vendor/golang.org/x/mod/semver/semver.go
@@ -22,7 +22,10 @@
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
-import "sort"
+import (
+ "slices"
+ "strings"
+)
// parsed returns the parsed form of a semantic version string.
type parsed struct {
@@ -154,19 +157,22 @@ func Max(v, w string) string {
// ByVersion implements [sort.Interface] for sorting semantic version strings.
type ByVersion []string
-func (vs ByVersion) Len() int { return len(vs) }
-func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
-func (vs ByVersion) Less(i, j int) bool {
- cmp := Compare(vs[i], vs[j])
- if cmp != 0 {
- return cmp < 0
- }
- return vs[i] < vs[j]
-}
+func (vs ByVersion) Len() int { return len(vs) }
+func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
+func (vs ByVersion) Less(i, j int) bool { return compareVersion(vs[i], vs[j]) < 0 }
-// Sort sorts a list of semantic version strings using [ByVersion].
+// Sort sorts a list of semantic version strings using [Compare] and falls back
+// to use [strings.Compare] if both versions are considered equal.
func Sort(list []string) {
- sort.Sort(ByVersion(list))
+ slices.SortFunc(list, compareVersion)
+}
+
+func compareVersion(a, b string) int {
+ cmp := Compare(a, b)
+ if cmp != 0 {
+ return cmp
+ }
+ return strings.Compare(a, b)
}
func parse(v string) (p parsed, ok bool) {
diff --git a/operator/vendor/golang.org/x/net/html/escape.go b/operator/vendor/golang.org/x/net/html/escape.go
index 04c6bec2..12f22737 100644
--- a/operator/vendor/golang.org/x/net/html/escape.go
+++ b/operator/vendor/golang.org/x/net/html/escape.go
@@ -299,7 +299,7 @@ func escape(w writer, s string) error {
case '\r':
esc = "
"
default:
- panic("unrecognized escape character")
+ panic("html: unrecognized escape character")
}
s = s[i+1:]
if _, err := w.WriteString(esc); err != nil {
diff --git a/operator/vendor/golang.org/x/net/html/parse.go b/operator/vendor/golang.org/x/net/html/parse.go
index 518ee4c9..88fc0056 100644
--- a/operator/vendor/golang.org/x/net/html/parse.go
+++ b/operator/vendor/golang.org/x/net/html/parse.go
@@ -136,7 +136,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
return -1
}
default:
- panic("unreachable")
+ panic(fmt.Sprintf("html: internal error: indexOfElementInScope unknown scope: %d", s))
}
}
switch s {
@@ -179,7 +179,7 @@ func (p *parser) clearStackToContext(s scope) {
return
}
default:
- panic("unreachable")
+ panic(fmt.Sprintf("html: internal error: clearStackToContext unknown scope: %d", s))
}
}
}
@@ -231,7 +231,14 @@ func (p *parser) addChild(n *Node) {
}
if n.Type == ElementNode {
- p.oe = append(p.oe, n)
+ p.insertOpenElement(n)
+ }
+}
+
+func (p *parser) insertOpenElement(n *Node) {
+ p.oe = append(p.oe, n)
+ if len(p.oe) > 512 {
+ panic("html: open stack of elements exceeds 512 nodes")
}
}
@@ -810,7 +817,7 @@ func afterHeadIM(p *parser) bool {
p.im = inFramesetIM
return true
case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
- p.oe = append(p.oe, p.head)
+ p.insertOpenElement(p.head)
defer p.oe.remove(p.head)
return inHeadIM(p)
case a.Head:
@@ -1678,7 +1685,7 @@ func inTableBodyIM(p *parser) bool {
return inTableIM(p)
}
-// Section 12.2.6.4.14.
+// Section 13.2.6.4.14.
func inRowIM(p *parser) bool {
switch p.tok.Type {
case StartTagToken:
@@ -1690,7 +1697,9 @@ func inRowIM(p *parser) bool {
p.im = inCellIM
return true
case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return false
}
@@ -1700,22 +1709,28 @@ func inRowIM(p *parser) bool {
case EndTagToken:
switch p.tok.DataAtom {
case a.Tr:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return true
}
// Ignore the token.
return true
case a.Table:
- if p.popUntil(tableScope, a.Tr) {
+ if p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
p.im = inTableBodyIM
return false
}
// Ignore the token.
return true
case a.Tbody, a.Tfoot, a.Thead:
- if p.elementInScope(tableScope, p.tok.DataAtom) {
- p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
+ if p.elementInScope(tableScope, p.tok.DataAtom) && p.elementInScope(tableScope, a.Tr) {
+ p.clearStackToContext(tableRowScope)
+ p.oe.pop()
+ p.im = inTableBodyIM
return false
}
// Ignore the token.
@@ -2222,16 +2237,20 @@ func parseForeignContent(p *parser) bool {
p.acknowledgeSelfClosingTag()
}
case EndTagToken:
+ if strings.EqualFold(p.oe[len(p.oe)-1].Data, p.tok.Data) {
+ p.oe = p.oe[:len(p.oe)-1]
+ return true
+ }
for i := len(p.oe) - 1; i >= 0; i-- {
- if p.oe[i].Namespace == "" {
- return p.im(p)
- }
if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
p.oe = p.oe[:i]
+ return true
+ }
+ if i > 0 && p.oe[i-1].Namespace == "" {
break
}
}
- return true
+ return p.im(p)
default:
// Ignore the token.
}
@@ -2312,9 +2331,13 @@ func (p *parser) parseCurrentToken() {
}
}
-func (p *parser) parse() error {
+func (p *parser) parse() (err error) {
+ defer func() {
+ if panicErr := recover(); panicErr != nil {
+ err = fmt.Errorf("%s", panicErr)
+ }
+ }()
// Iterate until EOF. Any other error will cause an early return.
- var err error
for err != io.EOF {
// CDATA sections are allowed only in foreign content.
n := p.oe.top()
@@ -2343,6 +2366,8 @@ func (p *parser) parse() error {
// s. Conversely, explicit s in r's data can be silently dropped,
// with no corresponding node in the resulting tree.
//
+// Parse will reject HTML that is nested deeper than 512 elements.
+//
// The input is assumed to be UTF-8 encoded.
func Parse(r io.Reader) (*Node, error) {
return ParseWithOptions(r)
diff --git a/operator/vendor/golang.org/x/net/html/render.go b/operator/vendor/golang.org/x/net/html/render.go
index e8c12334..0157d89e 100644
--- a/operator/vendor/golang.org/x/net/html/render.go
+++ b/operator/vendor/golang.org/x/net/html/render.go
@@ -184,7 +184,7 @@ func render1(w writer, n *Node) error {
return err
}
- // Add initial newline where there is danger of a newline beging ignored.
+ // Add initial newline where there is danger of a newline being ignored.
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
switch n.Data {
case "pre", "listing", "textarea":
diff --git a/operator/vendor/golang.org/x/net/http2/config.go b/operator/vendor/golang.org/x/net/http2/config.go
index ca645d9a..8a7a89d0 100644
--- a/operator/vendor/golang.org/x/net/http2/config.go
+++ b/operator/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
+ StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
- fillNetHTTPServerConfig(&conf, h1)
+ fillNetHTTPConfig(&conf, h1.HTTP2)
setConfigDefaults(&conf, true)
return conf
}
@@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
- MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
- MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
- MaxReadFrameSize: h2.MaxReadFrameSize,
- SendPingTimeout: h2.ReadIdleTimeout,
- PingTimeout: h2.PingTimeout,
- WriteByteTimeout: h2.WriteByteTimeout,
+ StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config {
}
if h2.t1 != nil {
- fillNetHTTPTransportConfig(&conf, h2.t1)
+ fillNetHTTPConfig(&conf, h2.t1.HTTP2)
}
setConfigDefaults(&conf, false)
return conf
@@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 {
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if http2ConfigStrictMaxConcurrentRequests(h2) {
+ conf.StrictMaxConcurrentRequests = true
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/operator/vendor/golang.org/x/net/http2/config_go124.go b/operator/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index 5b516c55..00000000
--- a/operator/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
- fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
- fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
- if h2 == nil {
- return
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxEncoderHeaderTableSize != 0 {
- conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
- }
- if h2.MaxDecoderHeaderTableSize != 0 {
- conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxReadFrameSize != 0 {
- conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
- }
- if h2.MaxReceiveBufferPerConnection != 0 {
- conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
- }
- if h2.MaxReceiveBufferPerStream != 0 {
- conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
- }
- if h2.SendPingTimeout != 0 {
- conf.SendPingTimeout = h2.SendPingTimeout
- }
- if h2.PingTimeout != 0 {
- conf.PingTimeout = h2.PingTimeout
- }
- if h2.WriteByteTimeout != 0 {
- conf.WriteByteTimeout = h2.WriteByteTimeout
- }
- if h2.PermitProhibitedCipherSuites {
- conf.PermitProhibitedCipherSuites = true
- }
- if h2.CountError != nil {
- conf.CountError = h2.CountError
- }
-}
diff --git a/operator/vendor/golang.org/x/net/http2/config_go125.go b/operator/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 00000000..b4373fe3
--- /dev/null
+++ b/operator/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return false
+}
diff --git a/operator/vendor/golang.org/x/net/http2/config_go126.go b/operator/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 00000000..6b071c14
--- /dev/null
+++ b/operator/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return h2.StrictMaxConcurrentRequests
+}
diff --git a/operator/vendor/golang.org/x/net/http2/config_pre_go124.go b/operator/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c6..00000000
--- a/operator/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/operator/vendor/golang.org/x/net/http2/frame.go b/operator/vendor/golang.org/x/net/http2/frame.go
index 97bd8b06..9a4bd123 100644
--- a/operator/vendor/golang.org/x/net/http2/frame.go
+++ b/operator/vendor/golang.org/x/net/http2/frame.go
@@ -39,7 +39,7 @@ const (
FrameContinuation FrameType = 0x9
)
-var frameName = map[FrameType]string{
+var frameNames = [...]string{
FrameData: "DATA",
FrameHeaders: "HEADERS",
FramePriority: "PRIORITY",
@@ -53,10 +53,10 @@ var frameName = map[FrameType]string{
}
func (t FrameType) String() string {
- if s, ok := frameName[t]; ok {
- return s
+ if int(t) < len(frameNames) {
+ return frameNames[t]
}
- return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t)
}
// Flags is a bitmask of HTTP/2 flags.
@@ -124,7 +124,7 @@ var flagName = map[FrameType]map[Flags]string{
// might be 0).
type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
-var frameParsers = map[FrameType]frameParser{
+var frameParsers = [...]frameParser{
FrameData: parseDataFrame,
FrameHeaders: parseHeadersFrame,
FramePriority: parsePriorityFrame,
@@ -138,8 +138,8 @@ var frameParsers = map[FrameType]frameParser{
}
func typeFrameParser(t FrameType) frameParser {
- if f := frameParsers[t]; f != nil {
- return f
+ if int(t) < len(frameParsers) {
+ return frameParsers[t]
}
return parseUnknownFrame
}
@@ -280,6 +280,8 @@ type Framer struct {
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
+ // lastFrameType holds the type of the last frame for verifying frame order.
+ lastFrameType FrameType
maxReadSize uint32
headerBuf [frameHeaderLen]byte
@@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
- 0, // 3 bytes of length, filled in in endWrite
+ 0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool {
return err != nil
}
-// ReadFrame reads a single frame. The returned Frame is only valid
-// until the next call to ReadFrame.
+// ReadFrameHeader reads the header of the next frame.
+// It reads the 9-byte fixed frame header, and does not read any portion of the
+// frame payload. The caller is responsible for consuming the payload, either
+// with ReadFrameForHeader or directly from the Framer's io.Reader.
//
-// If the frame is larger than previously set with SetMaxReadFrameSize, the
-// returned error is ErrFrameTooLarge. Other errors may be of type
-// ConnectionError, StreamError, or anything else from the underlying
-// reader.
+// If the frame is larger than previously set with SetMaxReadFrameSize, it
+// returns the frame header and ErrFrameTooLarge.
//
-// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
-// indicates the stream responsible for the error.
-func (fr *Framer) ReadFrame() (Frame, error) {
+// If the returned FrameHeader.StreamID is non-zero, it indicates the stream
+// responsible for the error.
+func (fr *Framer) ReadFrameHeader() (FrameHeader, error) {
fr.errDetail = nil
- if fr.lastFrame != nil {
- fr.lastFrame.invalidate()
- }
fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
if err != nil {
- return nil, err
+ return fh, err
}
if fh.Length > fr.maxReadSize {
if fh == invalidHTTP1LookingFrameHeader() {
- return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
}
- return nil, ErrFrameTooLarge
+ return fh, ErrFrameTooLarge
+ }
+ if err := fr.checkFrameOrder(fh); err != nil {
+ return fh, err
+ }
+ return fh, nil
+}
+
+// ReadFrameForHeader reads the payload for the frame with the given FrameHeader.
+//
+// It behaves identically to ReadFrame, other than not checking the maximum
+// frame size.
+func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) {
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
@@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
}
return nil, err
}
- if err := fr.checkFrameOrder(f); err != nil {
- return nil, err
- }
+ fr.lastFrame = f
if fr.logReads {
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
}
@@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return f, nil
}
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame or ReadFrameBodyForHeader.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+//
+// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
+// indicates the stream responsible for the error.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fh, err := fr.ReadFrameHeader()
+ if err != nil {
+ return nil, err
+ }
+ return fr.ReadFrameForHeader(fh)
+}
+
// connError returns ConnectionError(code) but first
// stashes away a public reason to the caller can optionally relay it
// to the peer before hanging up on them. This might help others debug
@@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error {
// checkFrameOrder reports an error if f is an invalid frame to return
// next from ReadFrame. Mostly it checks whether HEADERS and
// CONTINUATION frames are contiguous.
-func (fr *Framer) checkFrameOrder(f Frame) error {
- last := fr.lastFrame
- fr.lastFrame = f
+func (fr *Framer) checkFrameOrder(fh FrameHeader) error {
+ lastType := fr.lastFrameType
+ fr.lastFrameType = fh.Type
if fr.AllowIllegalReads {
return nil
}
- fh := f.Header()
if fr.lastHeaderStream != 0 {
if fh.Type != FrameContinuation {
return fr.connError(ErrCodeProtocol,
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
fh.Type, fh.StreamID,
- last.Header().Type, fr.lastHeaderStream))
+ lastType, fr.lastHeaderStream))
}
if fh.StreamID != fr.lastHeaderStream {
return fr.connError(ErrCodeProtocol,
@@ -1152,7 +1180,16 @@ type PriorityFrame struct {
PriorityParam
}
-// PriorityParam are the stream prioritzation parameters.
+var defaultRFC9218Priority = PriorityParam{
+ incremental: 0,
+ urgency: 3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
+// PriorityParam are the stream prioritization parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
// stream that this stream depends on. Zero means no
@@ -1167,6 +1204,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
+
+ // "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+ // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+ // priority. The default is 3."
+ urgency uint8
+
+ // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+ // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+ // incrementally, i.e., provide some meaningful output as chunks of the
+ // response arrive."
+ //
+ // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+ // avoid unnecessary type conversions and because either type takes 1 byte.
+ incremental uint8
}
func (p PriorityParam) IsZero() bool {
diff --git a/operator/vendor/golang.org/x/net/http2/gotrack.go b/operator/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f8..9921ca09 100644
--- a/operator/vendor/golang.org/x/net/http2/gotrack.go
+++ b/operator/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@ import (
"runtime"
"strconv"
"sync"
+ "sync/atomic"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@ func (g goroutineLock) check() {
}
func (g goroutineLock) checkNotOn() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() == uint64(g) {
diff --git a/operator/vendor/golang.org/x/net/http2/http2.go b/operator/vendor/golang.org/x/net/http2/http2.go
index 6c18ea23..105fe12f 100644
--- a/operator/vendor/golang.org/x/net/http2/http2.go
+++ b/operator/vendor/golang.org/x/net/http2/http2.go
@@ -11,13 +11,10 @@
// requires Go 1.6 or later)
//
// See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
package http2 // import "golang.org/x/net/http2"
import (
"bufio"
- "context"
"crypto/tls"
"errors"
"fmt"
@@ -37,7 +34,6 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
- inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
@@ -257,15 +253,13 @@ func (cw closeWaiter) Wait() {
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
- group synctestGroupInterface // immutable
- conn net.Conn // immutable
- bw *bufio.Writer // non-nil when data is buffered
- byteTimeout time.Duration // immutable, WriteByteTimeout
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{
- group: group,
conn: conn,
byteTimeout: timeout,
}
@@ -316,24 +310,18 @@ func (w *bufferedWriter) Flush() error {
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
- return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+ return writeWithByteTimeout(w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
- var now time.Time
- if group == nil {
- now = time.Now()
- } else {
- now = group.Now()
- }
- conn.SetWriteDeadline(now.Add(timeout))
+ conn.SetWriteDeadline(time.Now().Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -419,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) {
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
- Join()
- Now() time.Time
- NewTimer(d time.Duration) timer
- AfterFunc(d time.Duration, f func()) timer
- ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/operator/vendor/golang.org/x/net/http2/server.go b/operator/vendor/golang.org/x/net/http2/server.go
index 51fca38f..bdc5520e 100644
--- a/operator/vendor/golang.org/x/net/http2/server.go
+++ b/operator/vendor/golang.org/x/net/http2/server.go
@@ -176,44 +176,15 @@ type Server struct {
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler {
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -638,11 +636,11 @@ type serverConn struct {
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
- readIdleTimer timer // nil if unused
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -687,12 +685,12 @@ type stream struct {
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +846,6 @@ type readFrameResult struct {
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -881,7 +878,6 @@ type frameWriteResult struct {
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) {
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
- sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
- lastFrameTime := sc.srv.now()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
- lastFrameTime = sc.srv.now()
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
- now := sc.srv.now()
+ now := time.Now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
@@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error {
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error {
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() {
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}
diff --git a/operator/vendor/golang.org/x/net/http2/timer.go b/operator/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b8..00000000
--- a/operator/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
- C() <-chan time.Time
- Reset(d time.Duration) bool
- Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
- *time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/operator/vendor/golang.org/x/net/http2/transport.go b/operator/vendor/golang.org/x/net/http2/transport.go
index f26356b9..1965913e 100644
--- a/operator/vendor/golang.org/x/net/http2/transport.go
+++ b/operator/vendor/golang.org/x/net/http2/transport.go
@@ -9,6 +9,7 @@ package http2
import (
"bufio"
"bytes"
+ "compress/flate"
"compress/gzip"
"context"
"crypto/rand"
@@ -193,50 +194,6 @@ type Transport struct {
type transportTestHooks struct {
newclientconn func(*ClientConn)
- group synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
- if t != nil && t.transportTestHooks != nil {
- t.transportTestHooks.group.Join()
- }
-}
-
-func (t *Transport) now() time.Time {
- if t != nil && t.transportTestHooks != nil {
- return t.transportTestHooks.group.Now()
- }
- return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
- if t != nil && t.transportTestHooks != nil {
- return t.now().Sub(when)
- }
- return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +323,7 @@ type ClientConn struct {
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer timer
+ idleTimer *time.Timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -399,6 +356,7 @@ type ClientConn struct {
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
+ strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@@ -534,14 +492,12 @@ func (cs *clientStream) closeReqBodyLocked() {
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
go func() {
- cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
}()
}
type stickyErrWriter struct {
- group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -551,7 +507,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
*sew.err = err
return n, err
}
@@ -650,9 +606,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- tm := t.newTimer(d)
+ tm := time.NewTimer(d)
select {
- case <-tm.C():
+ case <-tm.C:
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
@@ -699,6 +655,7 @@ var (
errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnNotEstablished = errors.New("http2: client conn could not be established")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -829,7 +786,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@@ -838,14 +796,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
- lastActive: t.now(),
+ lastActive: time.Now(),
}
- var group synctestGroupInterface
if t.transportTestHooks != nil {
- t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
- group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -857,7 +812,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
- group: group,
conn: c,
timeout: conf.WriteByteTimeout,
err: &cc.werr,
@@ -906,7 +860,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// Start the idle timer after the connection is fully initialized.
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
go cc.readLoop()
@@ -917,7 +871,7 @@ func (cc *ClientConn) healthCheck() {
pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1067,7 +1021,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
var maxConcurrentOkay bool
- if cc.t.StrictMaxConcurrentStreams {
+ if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before
@@ -1120,7 +1074,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1186,7 +1140,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
done := make(chan struct{})
cancelled := false // guarded by cc.mu
go func() {
- cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1257,8 +1210,7 @@ func (cc *ClientConn) closeForError(err error) {
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
- err := errors.New("http2: client connection force closed via ClientConn.Close")
- cc.closeForError(err)
+ cc.closeForError(errClientConnForceClosed)
return nil
}
@@ -1427,7 +1379,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
- cs.cc.t.markNewGoroutine()
err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
@@ -1558,9 +1509,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.t.newTimer(d)
+ timer := time.NewTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C()
+ respHeaderTimer = timer.C
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
@@ -1753,7 +1704,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
// Return a fatal error which aborts the retry loop.
return errClientConnNotEstablished
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
@@ -2092,10 +2043,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = cc.t.now()
+ cc.lastIdle = time.Now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2121,7 +2072,6 @@ type clientConnReadLoop struct {
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
- cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2188,9 +2138,9 @@ func (rl *clientConnReadLoop) cleanup() {
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout
}
- idleTime := cc.t.now().Sub(cc.lastActive)
+ idleTime := time.Now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
- cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
} else {
@@ -2250,9 +2200,9 @@ func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.readIdleTimeout
- var t timer
+ var t *time.Timer
if readIdleTimeout != 0 {
- t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2998,7 +2948,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
var pingError error
errc := make(chan struct{})
go func() {
- cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3128,35 +3077,102 @@ type erringRoundTripper struct{ err error }
func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body")
+
// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
+// get gzip.Reader from the pool on the first call to Read.
+// After Close is called it puts gzip.Reader to the pool immediately
+// if there is no Read in progress or later when Read completes.
type gzipReader struct {
_ incomparable
body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
+ mu sync.Mutex // guards zr and zerr
+ zr *gzip.Reader // stores gzip reader from the pool between reads
+ zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close
}
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
+type eofReader struct{}
+
+func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
+func (eofReader) ReadByte() (byte, error) { return 0, io.EOF }
+
+var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }}
+
+// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r.
+func gzipPoolGet(r io.Reader) (*gzip.Reader, error) {
+ zr := gzipPool.Get().(*gzip.Reader)
+ if err := zr.Reset(r); err != nil {
+ gzipPoolPut(zr)
+ return nil, err
+ }
+ return zr, nil
+}
+
+// gzipPoolPut puts a gzip.Reader back into the pool.
+func gzipPoolPut(zr *gzip.Reader) {
+ // Reset will allocate bufio.Reader if we pass it anything
+ // other than a flate.Reader, so ensure that it's getting one.
+ var r flate.Reader = eofReader{}
+ zr.Reset(r)
+ gzipPool.Put(zr)
+}
+
+// acquire returns a gzip.Reader for reading response body.
+// The reader must be released after use.
+func (gz *gzipReader) acquire() (*gzip.Reader, error) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
if gz.zerr != nil {
- return 0, gz.zerr
+ return nil, gz.zerr
}
if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
+ gz.zr, gz.zerr = gzipPoolGet(gz.body)
+ if gz.zerr != nil {
+ return nil, gz.zerr
}
}
- return gz.zr.Read(p)
+ ret := gz.zr
+ gz.zr, gz.zerr = nil, errConcurrentReadOnResBody
+ return ret, nil
}
-func (gz *gzipReader) Close() error {
- if err := gz.body.Close(); err != nil {
- return err
+// release returns the gzip.Reader to the pool if Close was called during Read.
+func (gz *gzipReader) release(zr *gzip.Reader) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == errConcurrentReadOnResBody {
+ gz.zr, gz.zerr = zr, nil
+ } else { // fs.ErrClosed
+ gzipPoolPut(zr)
+ }
+}
+
+// close returns the gzip.Reader to the pool immediately or
+// signals release to do so after Read completes.
+func (gz *gzipReader) close() {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == nil && gz.zr != nil {
+ gzipPoolPut(gz.zr)
+ gz.zr = nil
}
gz.zerr = fs.ErrClosed
- return nil
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ zr, err := gz.acquire()
+ if err != nil {
+ return 0, err
+ }
+ defer gz.release(zr)
+
+ return zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ gz.close()
+
+ return gz.body.Close()
}
type errorReader struct{ err error }
@@ -3228,7 +3244,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = cc.t.timeSince(cc.lastActive)
+ ci.IdleTime = time.Since(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/operator/vendor/golang.org/x/net/http2/writesched.go b/operator/vendor/golang.org/x/net/http2/writesched.go
index cc893adc..7de27be5 100644
--- a/operator/vendor/golang.org/x/net/http2/writesched.go
+++ b/operator/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
+ // priority is used to set the priority of the newly opened stream.
+ priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.
@@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
}
// writeQueue is used by implementations of WriteScheduler.
+//
+// Each writeQueue contains a queue of FrameWriteRequests, meant to store all
+// FrameWriteRequests associated with a given stream. This is implemented as a
+// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done
+// by incrementing currPos of currQueue. Adding an item is done by appending it
+// to the nextQueue. If currQueue is empty when trying to remove an item, we
+// can swap currQueue and nextQueue to remedy the situation.
+// This two-stage queue is analogous to the use of two lists in Okasaki's
+// purely functional queue but without the overhead of reversing the list when
+// swapping stages.
+//
+// writeQueue also contains prev and next, this can be used by implementations
+// of WriteScheduler to construct data structures that represent the order of
+// writing between different streams (e.g. circular linked list).
type writeQueue struct {
- s []FrameWriteRequest
+ currQueue []FrameWriteRequest
+ nextQueue []FrameWriteRequest
+ currPos int
+
prev, next *writeQueue
}
-func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+func (q *writeQueue) empty() bool {
+ return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0
+}
func (q *writeQueue) push(wr FrameWriteRequest) {
- q.s = append(q.s, wr)
+ q.nextQueue = append(q.nextQueue, wr)
}
func (q *writeQueue) shift() FrameWriteRequest {
- if len(q.s) == 0 {
+ if q.empty() {
panic("invalid use of queue")
}
- wr := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = FrameWriteRequest{}
- q.s = q.s[:len(q.s)-1]
+ if q.currPos >= len(q.currQueue) {
+ q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0]
+ }
+ wr := q.currQueue[q.currPos]
+ q.currQueue[q.currPos] = FrameWriteRequest{}
+ q.currPos++
return wr
}
+func (q *writeQueue) peek() *FrameWriteRequest {
+ if q.currPos < len(q.currQueue) {
+ return &q.currQueue[q.currPos]
+ }
+ if len(q.nextQueue) > 0 {
+ return &q.nextQueue[0]
+ }
+ return nil
+}
+
// consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
- if len(q.s) == 0 {
+ if q.empty() {
return FrameWriteRequest{}, false
}
- consumed, rest, numresult := q.s[0].Consume(n)
+ consumed, rest, numresult := q.peek().Consume(n)
switch numresult {
case 0:
return FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
- q.s[0] = rest
+ *q.peek() = rest
}
return consumed, true
}
@@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) {
- for i := range q.s {
- q.s[i] = FrameWriteRequest{}
+ for i := range q.currQueue {
+ q.currQueue[i] = FrameWriteRequest{}
+ }
+ for i := range q.nextQueue {
+ q.nextQueue[i] = FrameWriteRequest{}
}
- q.s = q.s[:0]
+ q.currQueue = q.currQueue[:0]
+ q.nextQueue = q.nextQueue[:0]
+ q.currPos = 0
*p = append(*p, q)
}
diff --git a/operator/vendor/golang.org/x/net/http2/writesched_priority.go b/operator/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
similarity index 77%
rename from operator/vendor/golang.org/x/net/http2/writesched_priority.go
rename to operator/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
index f6783339..4e33c29a 100644
--- a/operator/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/operator/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -11,7 +11,7 @@ import (
)
// RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
}
}
- ws := &priorityWriteScheduler{
- nodes: make(map[uint32]*priorityNode),
+ ws := &priorityWriteSchedulerRFC7540{
+ nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws
}
-type priorityNodeState int
+type priorityNodeStateRFC7540 int
const (
- priorityNodeOpen priorityNodeState = iota
- priorityNodeClosed
- priorityNodeIdle
+ priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+ priorityNodeClosedRFC7540
+ priorityNodeIdleRFC7540
)
-// priorityNode is a node in an HTTP/2 priority tree.
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
-type priorityNode struct {
- q writeQueue // queue of pending frames to write
- id uint32 // id of the stream, or 0 for the root of the tree
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
- state priorityNodeState // open | closed | idle
- bytes int64 // number of bytes written by this node, or 0 if closed
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+type priorityNodeRFC7540 struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeStateRFC7540 // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
- parent *priorityNode
- kids *priorityNode // start of the kids list
- prev, next *priorityNode // doubly-linked list of siblings
+ parent *priorityNodeRFC7540
+ kids *priorityNodeRFC7540 // start of the kids list
+ prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
}
-func (n *priorityNode) setParent(parent *priorityNode) {
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent {
panic("setParent to self")
}
@@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
}
}
-func (n *priorityNode) addBytes(b int64) {
+func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
@@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
@@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
- openParent = openParent || (n.state == priorityNodeOpen)
+ openParent = openParent || (n.state == priorityNodeOpenRFC7540)
}
// Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
- sort.Sort(sortPriorityNodeSiblings(*tmp))
+ sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
@@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false
}
-type sortPriorityNodeSiblings []*priorityNode
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
-func (z sortPriorityNodeSiblings) Len() int { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
- wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
- wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
@@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk
}
-type priorityWriteScheduler struct {
+type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
- root priorityNode
+ root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes.
- nodes map[uint32]*priorityNode
+ nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes.
maxID uint32
@@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
- closedNodes, idleNodes []*priorityNode
+ closedNodes, idleNodes []*priorityNodeRFC7540
// From the config.
maxClosedNodesInTree int
@@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
- tmp []*priorityNode
+ tmp []*priorityNodeRFC7540
// pool of empty queues for reuse.
queuePool writeQueuePool
}
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
- if curr.state != priorityNodeIdle {
+ if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
- curr.state = priorityNodeOpen
+ curr.state = priorityNodeOpenRFC7540
return
}
@@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil {
parent = &ws.root
}
- n := &priorityNode{
+ n := &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeOpen,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeOpenRFC7540,
}
n.setParent(parent)
ws.nodes[streamID] = n
@@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
}
}
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
- if ws.nodes[streamID].state != priorityNodeOpen {
+ if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
- n.state = priorityNodeClosed
+ n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes)
q := n.q
ws.queuePool.put(&q)
- n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
@@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
}
}
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
@@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return
}
ws.maxID = streamID
- n = &priorityNode{
+ n = &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeIdle,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeIdleRFC7540,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
@@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
- n.weight = priorityDefaultWeight
+ n.weight = priorityDefaultWeightRFC7540
return
}
@@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight
}
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
- var n *priorityNode
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+ var n *priorityNodeRFC7540
if wr.isControl() {
n = &ws.root
} else {
@@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr)
}
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
@@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok
}
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 {
return
}
@@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n)
}
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil {
n.kids.setParent(n.parent)
}
diff --git a/operator/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/operator/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
new file mode 100644
index 00000000..cb4cadc3
--- /dev/null
+++ b/operator/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type streamMetadata struct {
+ location *writeQueue
+ priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // heads contain the head of a circular list of streams.
+ // We put these heads within a nested array that represents urgency and
+ // incremental, as defined in
+ // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+ // 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+ heads [8][2]*writeQueue
+
+ // streams contains a mapping between each stream ID and their metadata, so
+ // we can quickly locate them when needing to, for example, adjust their
+ // priority.
+ streams map[uint32]streamMetadata
+
+ // queuePool are empty queues for reuse.
+ queuePool writeQueuePool
+
+ // prioritizeIncremental is used to determine whether we should prioritize
+ // incremental streams or not, when urgency is the same in a given Pop()
+ // call.
+ prioritizeIncremental bool
+}
+
+func newPriorityWriteSchedulerRFC9218() WriteScheduler {
+ ws := &priorityWriteSchedulerRFC9218{
+ streams: make(map[uint32]streamMetadata),
+ }
+ return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+ if ws.streams[streamID].location != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: opt.priority,
+ }
+
+ u, i := opt.priority.urgency, opt.priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+
+ // Remove stream from current location.
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+
+ // Insert stream to the new queue.
+ u, i = priority.urgency, priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+
+ // Update the metadata.
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: priority,
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()].location
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+
+ // On the next Pop(), we want to prioritize incremental if we prioritized
+ // non-incremental request of the same urgency this time. Vice-versa.
+ // i.e. when there are incremental and non-incremental requests at the same
+ // priority, we give 50% of our bandwidth to the incremental ones in
+ // aggregate and 50% to the first non-incremental one (since
+ // non-incremental streams do not use round-robin writes).
+ ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+ // Always prioritize lowest u (i.e. highest urgency level).
+ for u := range ws.heads {
+ for i := range ws.heads[u] {
+ // When we want to prioritize incremental, we try to pop i=true
+ // first before i=false when u is the same.
+ if ws.prioritizeIncremental {
+ i = (i + 1) % 2
+ }
+ q := ws.heads[u][i]
+ if q == nil {
+ continue
+ }
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if i == 1 {
+ // For incremental streams, we update head to q.next so
+ // we can round-robin between multiple streams that can
+ // immediately benefit from partial writes.
+ ws.heads[u][i] = q.next
+ } else {
+ // For non-incremental streams, we try to finish one to
+ // completion rather than doing round-robin. However,
+ // we update head here so that if q.consume() is !ok
+ // (e.g. the stream has no more frame to consume), head
+ // is updated to the next q that has frames to consume
+ // on future iterations. This way, we do not prioritize
+ // writing to unavailable stream on next Pop() calls,
+ // preventing head-of-line blocking.
+ ws.heads[u][i] = q
+ }
+ return wr, true
+ }
+ q = q.next
+ if q == ws.heads[u][i] {
+ break
+ }
+ }
+
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/operator/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/operator/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe8632..737cff9e 100644
--- a/operator/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/operator/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
diff --git a/operator/vendor/golang.org/x/net/internal/httpcommon/request.go b/operator/vendor/golang.org/x/net/internal/httpcommon/request.go
index 4b705531..1e10f89e 100644
--- a/operator/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/operator/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string
}
-// EncodeHeadersParam is the result of EncodeHeaders.
+// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
- // It might be a bit odd to return errors this way rather than returing an error,
+ // It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}
diff --git a/operator/vendor/golang.org/x/oauth2/internal/doc.go b/operator/vendor/golang.org/x/oauth2/internal/doc.go
index 03265e88..8c7c475f 100644
--- a/operator/vendor/golang.org/x/oauth2/internal/doc.go
+++ b/operator/vendor/golang.org/x/oauth2/internal/doc.go
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package internal contains support packages for oauth2 package.
+// Package internal contains support packages for [golang.org/x/oauth2].
package internal
diff --git a/operator/vendor/golang.org/x/oauth2/internal/oauth2.go b/operator/vendor/golang.org/x/oauth2/internal/oauth2.go
index 14989bea..71ea6ad1 100644
--- a/operator/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/operator/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -13,7 +13,7 @@ import (
)
// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
+// to an [*rsa.PrivateKey]. It detects whether the private key is in a
// PEM container or not. If so, it extracts the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
diff --git a/operator/vendor/golang.org/x/oauth2/internal/token.go b/operator/vendor/golang.org/x/oauth2/internal/token.go
index e83ddeef..8389f246 100644
--- a/operator/vendor/golang.org/x/oauth2/internal/token.go
+++ b/operator/vendor/golang.org/x/oauth2/internal/token.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"math"
"mime"
"net/http"
@@ -26,9 +25,9 @@ import (
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
-// This type is a mirror of oauth2.Token and exists to break
+// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break
// an otherwise-circular dependency. Other internal packages
-// should convert this Token into an oauth2.Token before use.
+// should convert this Token into an [golang.org/x/oauth2.Token] before use.
type Token struct {
// AccessToken is the token that authorizes and authenticates
// the requests.
@@ -50,9 +49,16 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// Raw optionally contains extra metadata from the server
// when updating a token.
- Raw interface{}
+ Raw any
}
// tokenJSON is the struct representing the HTTP response from OAuth2
@@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
return nil
}
-// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
-//
-// Deprecated: this function no longer does anything. Caller code that
-// wants to avoid potential extra HTTP requests made during
-// auto-probing of the provider's auth style should set
-// Endpoint.AuthStyle.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
-
// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
type AuthStyle int
@@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
return c
}
+type authStyleCacheKey struct {
+ url string
+ clientID string
+}
+
// AuthStyleCache is the set of tokenURLs we've successfully used via
// RetrieveToken and which style auth we ended up using.
// It's called a cache, but it doesn't (yet?) shrink. It's expected that
@@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
// small.
type AuthStyleCache struct {
mu sync.Mutex
- m map[string]AuthStyle // keyed by tokenURL
+ m map[authStyleCacheKey]AuthStyle
}
// lookupAuthStyle reports which auth style we last used with tokenURL
// when calling RetrieveToken and whether we have ever done so.
-func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
+func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
- style, ok = c.m[tokenURL]
+ style, ok = c.m[authStyleCacheKey{tokenURL, clientID}]
return
}
// setAuthStyle adds an entry to authStyleCache, documented above.
-func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) {
+func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) {
c.mu.Lock()
defer c.mu.Unlock()
if c.m == nil {
- c.m = make(map[string]AuthStyle)
+ c.m = make(map[authStyleCacheKey]AuthStyle)
}
- c.m[tokenURL] = v
+ c.m[authStyleCacheKey{tokenURL, clientID}] = v
}
// newTokenRequest returns a new *http.Request to retrieve a new token
@@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values {
}
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) {
- needsAuthStyleProbe := authStyle == 0
+ needsAuthStyleProbe := authStyle == AuthStyleUnknown
if needsAuthStyleProbe {
- if style, ok := styleCache.lookupAuthStyle(tokenURL); ok {
+ if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok {
authStyle = style
needsAuthStyleProbe = false
} else {
@@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
token, err = doTokenRoundTrip(ctx, req)
}
if needsAuthStyleProbe && err == nil {
- styleCache.setAuthStyle(tokenURL, authStyle)
+ styleCache.setAuthStyle(tokenURL, clientID, authStyle)
}
// Don't overwrite `RefreshToken` with an empty value
// if this was a token refreshing request.
@@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
if err != nil {
return nil, err
}
- body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20))
r.Body.Close()
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
@@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
TokenType: tj.TokenType,
RefreshToken: tj.RefreshToken,
Expiry: tj.expiry(),
- Raw: make(map[string]interface{}),
+ ExpiresIn: int64(tj.ExpiresIn),
+ Raw: make(map[string]any),
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
diff --git a/operator/vendor/golang.org/x/oauth2/internal/transport.go b/operator/vendor/golang.org/x/oauth2/internal/transport.go
index b9db01dd..afc0aeb2 100644
--- a/operator/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/operator/vendor/golang.org/x/oauth2/internal/transport.go
@@ -9,8 +9,8 @@ import (
"net/http"
)
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
+// HTTPClient is the context key to use with [context.WithValue]
+// to associate an [*http.Client] value with a context.
var HTTPClient ContextKey
// ContextKey is just an empty struct. It exists so HTTPClient can be
diff --git a/operator/vendor/golang.org/x/oauth2/oauth2.go b/operator/vendor/golang.org/x/oauth2/oauth2.go
index 74f052aa..de34feb8 100644
--- a/operator/vendor/golang.org/x/oauth2/oauth2.go
+++ b/operator/vendor/golang.org/x/oauth2/oauth2.go
@@ -22,9 +22,9 @@ import (
)
// NoContext is the default context you should supply if not using
-// your own context.Context (see https://golang.org/x/net/context).
+// your own [context.Context].
//
-// Deprecated: Use context.Background() or context.TODO() instead.
+// Deprecated: Use [context.Background] or [context.TODO] instead.
var NoContext = context.TODO()
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
@@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
// Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
-// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
-// package (https://golang.org/x/oauth2/clientcredentials).
+// For the client credentials 2-legged OAuth2 flow, see the
+// [golang.org/x/oauth2/clientcredentials] package.
type Config struct {
// ClientID is the application's ID.
ClientID string
@@ -46,7 +46,7 @@ type Config struct {
// ClientSecret is the application's secret.
ClientSecret string
- // Endpoint contains the resource server's token endpoint
+ // Endpoint contains the authorization server's token endpoint
// URLs. These are constants specific to each server and are
// often available via site-specific packages, such as
// google.Endpoint or github.Endpoint.
@@ -135,7 +135,7 @@ type setParam struct{ k, v string }
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
-// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters
// to a provider's authorization endpoint.
func SetAuthURLParam(key, value string) AuthCodeOption {
return setParam{key, value}
@@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// request and callback. The authorization server includes this value when
// redirecting the user agent back to the client.
//
-// Opts may include AccessTypeOnline or AccessTypeOffline, as well
-// as ApprovalForce.
+// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well
+// as [ApprovalForce].
//
// To protect against CSRF attacks, opts should include a PKCE challenge
// (S256ChallengeOption). Not all servers support PKCE. An alternative is to
@@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
// and when other authorization grant types are not available."
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
//
-// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
v := url.Values{
"grant_type": {"password"},
@@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
// It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL).
//
-// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
+// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
//
-// The code will be in the *http.Request.FormValue("code"). Before
-// calling Exchange, be sure to validate FormValue("state") if you are
+// The code will be in the [http.Request.FormValue]("code"). Before
+// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are
// using it to protect against CSRF attacks.
//
// If using PKCE to protect against CSRF attacks, opts should include a
@@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
return NewClient(ctx, c.TokenSource(ctx, t))
}
-// TokenSource returns a TokenSource that returns t until t expires,
+// TokenSource returns a [TokenSource] that returns t until t expires,
// automatically refreshing it as necessary using the provided context.
//
-// Most users will use Config.Client instead.
+// Most users will use [Config.Client] instead.
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
tkr := &tokenRefresher{
ctx: ctx,
@@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
}
}
-// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// tokenRefresher is a TokenSource that makes "grant_type=refresh_token"
// HTTP requests to renew a token using a RefreshToken.
type tokenRefresher struct {
ctx context.Context // used to get HTTP requests
@@ -288,7 +288,7 @@ func (tf *tokenRefresher) Token() (*Token, error) {
if tf.refreshToken != tk.RefreshToken {
tf.refreshToken = tk.RefreshToken
}
- return tk, err
+ return tk, nil
}
// reuseTokenSource is a TokenSource that holds a single token in memory
@@ -305,8 +305,7 @@ type reuseTokenSource struct {
}
// Token returns the current token if it's still valid, else will
-// refresh the current token (using r.Context for HTTP client
-// information) and return the new one.
+// refresh the current token and return the new one.
func (s *reuseTokenSource) Token() (*Token, error) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
return t, nil
}
-// StaticTokenSource returns a TokenSource that always returns the same token.
+// StaticTokenSource returns a [TokenSource] that always returns the same token.
// Because the provided token t is never refreshed, StaticTokenSource is only
// useful for tokens that never expire.
func StaticTokenSource(t *Token) TokenSource {
@@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) {
return s.t, nil
}
-// HTTPClient is the context key to use with golang.org/x/net/context's
-// WithValue function to associate an *http.Client value with a context.
+// HTTPClient is the context key to use with [context.WithValue]
+// to associate a [*http.Client] value with a context.
var HTTPClient internal.ContextKey
-// NewClient creates an *http.Client from a Context and TokenSource.
+// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource].
// The returned client is not valid beyond the lifetime of the context.
//
-// Note that if a custom *http.Client is provided via the Context it
+// Note that if a custom [*http.Client] is provided via the [context.Context] it
// is used only for token acquisition and is not used to configure the
-// *http.Client returned from NewClient.
+// [*http.Client] returned from NewClient.
//
// As a special case, if src is nil, a non-OAuth2 client is returned
// using the provided context. This exists to support related OAuth2
@@ -356,15 +355,19 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil {
return internal.ContextClient(ctx)
}
+ cc := internal.ContextClient(ctx)
return &http.Client{
Transport: &Transport{
- Base: internal.ContextClient(ctx).Transport,
+ Base: cc.Transport,
Source: ReuseTokenSource(nil, src),
},
+ CheckRedirect: cc.CheckRedirect,
+ Jar: cc.Jar,
+ Timeout: cc.Timeout,
}
}
-// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// ReuseTokenSource returns a [TokenSource] which repeatedly returns the
// same token as long as it's valid, starting with t.
// When its cached token is invalid, a new token is obtained from src.
//
@@ -372,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
// (such as a file on disk) between runs of a program, rather than
// obtaining new tokens unnecessarily.
//
-// The initial token t may be nil, in which case the TokenSource is
+// The initial token t may be nil, in which case the [TokenSource] is
// wrapped in a caching version if it isn't one already. This also
// means it's always safe to wrap ReuseTokenSource around any other
-// TokenSource without adverse effects.
+// [TokenSource] without adverse effects.
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
// Don't wrap a reuseTokenSource in itself. That would work,
// but cause an unnecessary number of mutex operations.
@@ -393,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
}
}
-// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the
-// TokenSource returned by ReuseTokenSource, except the expiry buffer is
+// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the
+// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is
// configurable. The expiration time of a token is calculated as
// t.Expiry.Add(-earlyExpiry).
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
diff --git a/operator/vendor/golang.org/x/oauth2/pkce.go b/operator/vendor/golang.org/x/oauth2/pkce.go
index 6a95da97..cea8374d 100644
--- a/operator/vendor/golang.org/x/oauth2/pkce.go
+++ b/operator/vendor/golang.org/x/oauth2/pkce.go
@@ -1,6 +1,7 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+
package oauth2
import (
@@ -20,9 +21,9 @@ const (
// This follows recommendations in RFC 7636.
//
// A fresh verifier should be generated for each authorization.
-// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
-// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
-// (or Config.DeviceAccessToken).
+// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
+// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken]
+// with [VerifierOption].
func GenerateVerifier() string {
// "RECOMMENDED that the output of a suitable random number generator be
// used to create a 32-octet sequence. The octet sequence is then
@@ -36,22 +37,22 @@ func GenerateVerifier() string {
return base64.RawURLEncoding.EncodeToString(data)
}
-// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be
-// passed to Config.Exchange or Config.DeviceAccessToken only.
+// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be
+// passed to [Config.Exchange] or [Config.DeviceAccessToken].
func VerifierOption(verifier string) AuthCodeOption {
return setParam{k: codeVerifierKey, v: verifier}
}
// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256.
//
-// Prefer to use S256ChallengeOption where possible.
+// Prefer to use [S256ChallengeOption] where possible.
func S256ChallengeFromVerifier(verifier string) string {
sha := sha256.Sum256([]byte(verifier))
return base64.RawURLEncoding.EncodeToString(sha[:])
}
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
-// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
+// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
// only.
func S256ChallengeOption(verifier string) AuthCodeOption {
return challengeOption{
diff --git a/operator/vendor/golang.org/x/oauth2/token.go b/operator/vendor/golang.org/x/oauth2/token.go
index 109997d7..239ec329 100644
--- a/operator/vendor/golang.org/x/oauth2/token.go
+++ b/operator/vendor/golang.org/x/oauth2/token.go
@@ -44,7 +44,7 @@ type Token struct {
// Expiry is the optional expiration time of the access token.
//
- // If zero, TokenSource implementations will reuse the same
+ // If zero, [TokenSource] implementations will reuse the same
// token forever and RefreshToken or equivalent
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
@@ -58,7 +58,7 @@ type Token struct {
// raw optionally contains extra metadata from the server
// when updating a token.
- raw interface{}
+ raw any
// expiryDelta is used to calculate when a token is considered
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
@@ -86,16 +86,16 @@ func (t *Token) Type() string {
// SetAuthHeader sets the Authorization header to r using the access
// token in t.
//
-// This method is unnecessary when using Transport or an HTTP Client
+// This method is unnecessary when using [Transport] or an HTTP Client
// returned by this package.
func (t *Token) SetAuthHeader(r *http.Request) {
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
}
-// WithExtra returns a new Token that's a clone of t, but using the
+// WithExtra returns a new [Token] that's a clone of t, but using the
// provided raw extra map. This is only intended for use by packages
// implementing derivative OAuth2 flows.
-func (t *Token) WithExtra(extra interface{}) *Token {
+func (t *Token) WithExtra(extra any) *Token {
t2 := new(Token)
*t2 = *t
t2.raw = extra
@@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token {
// Extra returns an extra field.
// Extra fields are key-value pairs returned by the server as a
// part of the token retrieval response.
-func (t *Token) Extra(key string) interface{} {
- if raw, ok := t.raw.(map[string]interface{}); ok {
+func (t *Token) Extra(key string) any {
+ if raw, ok := t.raw.(map[string]any); ok {
return raw[key]
}
@@ -163,13 +163,14 @@ func tokenFromInternal(t *internal.Token) *Token {
TokenType: t.TokenType,
RefreshToken: t.RefreshToken,
Expiry: t.Expiry,
+ ExpiresIn: t.ExpiresIn,
raw: t.Raw,
}
}
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
-// with an error..
+// with an error.
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get())
if err != nil {
diff --git a/operator/vendor/golang.org/x/oauth2/transport.go b/operator/vendor/golang.org/x/oauth2/transport.go
index 90657915..8bbebbac 100644
--- a/operator/vendor/golang.org/x/oauth2/transport.go
+++ b/operator/vendor/golang.org/x/oauth2/transport.go
@@ -11,12 +11,12 @@ import (
"sync"
)
-// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
-// wrapping a base RoundTripper and adding an Authorization header
-// with a token from the supplied Sources.
+// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests,
+// wrapping a base [http.RoundTripper] and adding an Authorization header
+// with a token from the supplied [TokenSource].
//
// Transport is a low-level mechanism. Most code will use the
-// higher-level Config.Client method instead.
+// higher-level [Config.Client] method instead.
type Transport struct {
// Source supplies the token to add to outgoing requests'
// Authorization headers.
@@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, err
}
- req2 := cloneRequest(req) // per RoundTripper contract
+ req2 := req.Clone(req.Context())
token.SetAuthHeader(req2)
// req.Body is assumed to be closed by the base RoundTripper.
@@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper {
}
return http.DefaultTransport
}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
- return r2
-}
diff --git a/operator/vendor/golang.org/x/sync/errgroup/errgroup.go b/operator/vendor/golang.org/x/sync/errgroup/errgroup.go
index a4ea5d14..2f45dbc8 100644
--- a/operator/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/operator/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
-// cancelation for groups of goroutines working on subtasks of a common task.
+// cancellation for groups of goroutines working on subtasks of a common task.
//
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
// returning errors.
@@ -18,7 +18,7 @@ import (
type token struct{}
// A Group is a collection of goroutines working on subtasks that are part of
-// the same overall task.
+// the same overall task. A Group should not be reused for different tasks.
//
// A zero Group is valid, has no limit on the number of active goroutines,
// and does not cancel on error.
@@ -61,11 +61,14 @@ func (g *Group) Wait() error {
}
// Go calls the given function in a new goroutine.
+//
+// The first call to Go must happen before a Wait.
// It blocks until the new goroutine can be added without the number of
-// active goroutines in the group exceeding the configured limit.
+// goroutines in the group exceeding the configured limit.
//
-// The first call to return a non-nil error cancels the group's context, if the
-// group was created by calling WithContext. The error will be returned by Wait.
+// The first goroutine in the group that returns a non-nil error will
+// cancel the associated Context, if any. The error will be returned
+// by Wait.
func (g *Group) Go(f func() error) {
if g.sem != nil {
g.sem <- token{}
@@ -75,6 +78,18 @@ func (g *Group) Go(f func() error) {
go func() {
defer g.done()
+ // It is tempting to propagate panics from f()
+ // up to the goroutine that calls Wait, but
+ // it creates more problems than it solves:
+ // - it delays panics arbitrarily,
+ // making bugs harder to detect;
+ // - it turns f's panic stack into a mere value,
+ // hiding it from crash-monitoring tools;
+ // - it risks deadlocks that hide the panic entirely,
+ // if f's panic leaves the program in a state
+ // that prevents the Wait call from being reached.
+ // See #53757, #74275, #74304, #74306.
+
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
diff --git a/operator/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/operator/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
deleted file mode 100644
index 73687de7..00000000
--- a/operator/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.5
-
-package plan9
-
-import "syscall"
-
-func fixwd() {
- syscall.Fixwd()
-}
-
-func Getwd() (wd string, err error) {
- return syscall.Getwd()
-}
-
-func Chdir(path string) error {
- return syscall.Chdir(path)
-}
diff --git a/operator/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/operator/vendor/golang.org/x/sys/plan9/pwd_plan9.go
index fb945821..7a76489d 100644
--- a/operator/vendor/golang.org/x/sys/plan9/pwd_plan9.go
+++ b/operator/vendor/golang.org/x/sys/plan9/pwd_plan9.go
@@ -2,22 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !go1.5
-
package plan9
+import "syscall"
+
func fixwd() {
+ syscall.Fixwd()
}
func Getwd() (wd string, err error) {
- fd, err := open(".", O_RDONLY)
- if err != nil {
- return "", err
- }
- defer Close(fd)
- return Fd2path(fd)
+ return syscall.Getwd()
}
func Chdir(path string) error {
- return chdir(path)
+ return syscall.Chdir(path)
}
diff --git a/operator/vendor/golang.org/x/sys/unix/affinity_linux.go b/operator/vendor/golang.org/x/sys/unix/affinity_linux.go
index 6e5c81ac..3ea47038 100644
--- a/operator/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/operator/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error {
// Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() {
+ clear(s[:])
+}
+
+// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
+// will silently ignore any invalid CPU bits in [CPUSet] so this is an
+// efficient way of resetting the CPU affinity of a process.
+func (s *CPUSet) Fill() {
for i := range s {
- s[i] = 0
+ s[i] = ^cpuMask(0)
}
}
diff --git a/operator/vendor/golang.org/x/sys/unix/fdset.go b/operator/vendor/golang.org/x/sys/unix/fdset.go
index 9e83d18c..62ed1264 100644
--- a/operator/vendor/golang.org/x/sys/unix/fdset.go
+++ b/operator/vendor/golang.org/x/sys/unix/fdset.go
@@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds.
func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
+ clear(fds.Bits[:])
}
diff --git a/operator/vendor/golang.org/x/sys/unix/ifreq_linux.go b/operator/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 848840ae..309f5a2b 100644
--- a/operator/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/operator/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
+ clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
diff --git a/operator/vendor/golang.org/x/sys/unix/mkall.sh b/operator/vendor/golang.org/x/sys/unix/mkall.sh
index e6f31d37..d0ed6119 100644
--- a/operator/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/operator/vendor/golang.org/x/sys/unix/mkall.sh
@@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
diff --git a/operator/vendor/golang.org/x/sys/unix/mkerrors.sh b/operator/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6ab02b6c..42517077 100644
--- a/operator/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/operator/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -226,6 +226,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -349,6 +350,9 @@ struct ltchars {
#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
+// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info")
+#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME
+#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION
'
includes_NetBSD='
@@ -526,6 +530,7 @@ ccflags="$@"
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
diff --git a/operator/vendor/golang.org/x/sys/unix/syscall_darwin.go b/operator/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 099867de..7838ca5d 100644
--- a/operator/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/operator/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,6 +602,95 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
return
}
+const minIovec = 8
+
+func Readv(fd int, iovs [][]byte) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = readv(fd, iovecs)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = preadv(fd, iovecs, offset)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Writev(fd int, iovs [][]byte) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = writev(fd, iovecs)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = pwritev(fd, iovecs, offset)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
+ for _, b := range bs {
+ var v Iovec
+ v.SetLen(len(b))
+ if len(b) > 0 {
+ v.Base = &b[0]
+ } else {
+ v.Base = (*byte)(unsafe.Pointer(&_zero))
+ }
+ vecs = append(vecs, v)
+ }
+ return vecs
+}
+
+func writevRacedetect(iovecs []Iovec, n int) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+}
+
+func readvRacedetect(iovecs []Iovec, n int, err error) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+ if err == nil {
+ raceAcquire(unsafe.Pointer(&ioSync))
+ }
+}
+
//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
@@ -705,3 +794,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
//sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readv(fd int, iovecs []Iovec) (n int, err error)
+//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error)
+//sys writev(fd int, iovecs []Iovec) (n int, err error)
+//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error)
diff --git a/operator/vendor/golang.org/x/sys/unix/syscall_linux.go b/operator/vendor/golang.org/x/sys/unix/syscall_linux.go
index 230a9454..06c0eea6 100644
--- a/operator/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/operator/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,6 +13,7 @@ package unix
import (
"encoding/binary"
+ "slices"
"strconv"
"syscall"
"time"
@@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
return nil, 0, EINVAL
}
sa.raw.Family = AF_UNIX
- for i := 0; i < n; i++ {
+ for i := range n {
sa.raw.Path[i] = int8(name[i])
}
// length is family (uint16), name, NUL.
@@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
- for i := 0; i < len(sa.Addr); i++ {
+ for i := range len(sa.Addr) {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
@@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
@@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
n := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Addr[i] = n[i]
}
p := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+8] = p[i]
}
sa.raw.Addr[12] = sa.Addr
@@ -800,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
- for i := 14; i < 14+IFNAMSIZ; i++ {
- sa.raw[i] = 0
- }
+ clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
@@ -911,7 +910,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) {
// These are EBCDIC encoded by the kernel, but we still need to pad them
// with blanks. Initializing with blanks allows the caller to feed in either
// a padded or an unpadded string.
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Nodeid[i] = ' '
sa.raw.User_id[i] = ' '
sa.raw.Name[i] = ' '
@@ -1148,7 +1147,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
var user [8]byte
var name [8]byte
- for i := 0; i < 8; i++ {
+ for i := range 8 {
user[i] = byte(pp.User_id[i])
name[i] = byte(pp.Name[i])
}
@@ -1173,11 +1172,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
Ifindex: int(pp.Ifindex),
}
name := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
name[i] = pp.Addr[i]
}
pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
pgn[i] = pp.Addr[i+8]
}
addr := (*[1]byte)(unsafe.Pointer(&sa.Addr))
@@ -1188,11 +1187,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
Ifindex: int(pp.Ifindex),
}
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
rx[i] = pp.Addr[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
tx[i] = pp.Addr[i+4]
}
return sa, nil
@@ -2216,10 +2215,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2270,10 +2266,7 @@ func writevRacedetect(iovecs []Iovec, n int) {
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2320,12 +2313,7 @@ func isGroupMember(gid int) bool {
return false
}
- for _, g := range groups {
- if g == gid {
- return true
- }
- }
- return false
+ return slices.Contains(groups, gid)
}
func isCapDacOverrideSet() bool {
@@ -2655,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error)
+
+//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY
+
+func SetMemPolicy(mode int, mask *CPUSet) error {
+ return setMemPolicy(mode, mask, _CPU_SETSIZE)
+}
diff --git a/operator/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/operator/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 88162099..34a46769 100644
--- a/operator/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/operator/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT)
}
+func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
+ var (
+ _p0 unsafe.Pointer
+ bufsize uintptr
+ )
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
/*
* Exposed directly
*/
diff --git a/operator/vendor/golang.org/x/sys/unix/syscall_solaris.go b/operator/vendor/golang.org/x/sys/unix/syscall_solaris.go
index abc39554..18a3d9bd 100644
--- a/operator/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/operator/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys Kill(pid int, signum syscall.Signal) (err error)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
-//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten
+//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen
//sys Lstat(path string, stat *Stat_t) (err error)
//sys Madvise(b []byte, advice int) (err error)
//sys Mkdir(path string, mode uint32) (err error)
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 4f432bfe..d0a75da5 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -319,6 +319,7 @@ const (
AUDIT_INTEGRITY_POLICY_RULE = 0x70f
AUDIT_INTEGRITY_RULE = 0x70d
AUDIT_INTEGRITY_STATUS = 0x70a
+ AUDIT_INTEGRITY_USERSPACE = 0x710
AUDIT_IPC = 0x517
AUDIT_IPC_SET_PERM = 0x51f
AUDIT_IPE_ACCESS = 0x58c
@@ -327,6 +328,8 @@ const (
AUDIT_KERNEL = 0x7d0
AUDIT_KERNEL_OTHER = 0x524
AUDIT_KERN_MODULE = 0x532
+ AUDIT_LANDLOCK_ACCESS = 0x58f
+ AUDIT_LANDLOCK_DOMAIN = 0x590
AUDIT_LAST_FEATURE = 0x1
AUDIT_LAST_KERN_ANOM_MSG = 0x707
AUDIT_LAST_USER_MSG = 0x4af
@@ -491,6 +494,7 @@ const (
BPF_F_BEFORE = 0x8
BPF_F_ID = 0x20
BPF_F_NETFILTER_IP_DEFRAG = 0x1
+ BPF_F_PREORDER = 0x40
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REDIRECT_FLAGS = 0x19
BPF_F_REPLACE = 0x4
@@ -527,6 +531,7 @@ const (
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LL_OFF = -0x200000
+ BPF_LOAD_ACQ = 0x100
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXINSNS = 0x1000
@@ -554,6 +559,7 @@ const (
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
+ BPF_STORE_REL = 0x110
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAG_SIZE = 0x8
@@ -843,24 +849,90 @@ const (
DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00
- DM_VERSION_EXTRA = "-ioctl (2023-03-01)"
+ DM_VERSION_EXTRA = "-ioctl (2025-04-28)"
DM_VERSION_MAJOR = 0x4
- DM_VERSION_MINOR = 0x30
+ DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
+ DT_ADDRRNGHI = 0x6ffffeff
+ DT_ADDRRNGLO = 0x6ffffe00
DT_BLK = 0x6
DT_CHR = 0x2
+ DT_DEBUG = 0x15
DT_DIR = 0x4
+ DT_ENCODING = 0x20
DT_FIFO = 0x1
+ DT_FINI = 0xd
+ DT_FLAGS_1 = 0x6ffffffb
+ DT_GNU_HASH = 0x6ffffef5
+ DT_HASH = 0x4
+ DT_HIOS = 0x6ffff000
+ DT_HIPROC = 0x7fffffff
+ DT_INIT = 0xc
+ DT_JMPREL = 0x17
DT_LNK = 0xa
+ DT_LOOS = 0x6000000d
+ DT_LOPROC = 0x70000000
+ DT_NEEDED = 0x1
+ DT_NULL = 0x0
+ DT_PLTGOT = 0x3
+ DT_PLTREL = 0x14
+ DT_PLTRELSZ = 0x2
DT_REG = 0x8
+ DT_REL = 0x11
+ DT_RELA = 0x7
+ DT_RELACOUNT = 0x6ffffff9
+ DT_RELAENT = 0x9
+ DT_RELASZ = 0x8
+ DT_RELCOUNT = 0x6ffffffa
+ DT_RELENT = 0x13
+ DT_RELSZ = 0x12
+ DT_RPATH = 0xf
DT_SOCK = 0xc
+ DT_SONAME = 0xe
+ DT_STRSZ = 0xa
+ DT_STRTAB = 0x5
+ DT_SYMBOLIC = 0x10
+ DT_SYMENT = 0xb
+ DT_SYMTAB = 0x6
+ DT_TEXTREL = 0x16
DT_UNKNOWN = 0x0
+ DT_VALRNGHI = 0x6ffffdff
+ DT_VALRNGLO = 0x6ffffd00
+ DT_VERDEF = 0x6ffffffc
+ DT_VERDEFNUM = 0x6ffffffd
+ DT_VERNEED = 0x6ffffffe
+ DT_VERNEEDNUM = 0x6fffffff
+ DT_VERSYM = 0x6ffffff0
DT_WHT = 0xe
ECHO = 0x8
ECRYPTFS_SUPER_MAGIC = 0xf15f
EFD_SEMAPHORE = 0x1
EFIVARFS_MAGIC = 0xde5e81e4
EFS_SUPER_MAGIC = 0x414a53
+ EI_CLASS = 0x4
+ EI_DATA = 0x5
+ EI_MAG0 = 0x0
+ EI_MAG1 = 0x1
+ EI_MAG2 = 0x2
+ EI_MAG3 = 0x3
+ EI_NIDENT = 0x10
+ EI_OSABI = 0x7
+ EI_PAD = 0x8
+ EI_VERSION = 0x6
+ ELFCLASS32 = 0x1
+ ELFCLASS64 = 0x2
+ ELFCLASSNONE = 0x0
+ ELFCLASSNUM = 0x3
+ ELFDATA2LSB = 0x1
+ ELFDATA2MSB = 0x2
+ ELFDATANONE = 0x0
+ ELFMAG = "\177ELF"
+ ELFMAG0 = 0x7f
+ ELFMAG1 = 'E'
+ ELFMAG2 = 'L'
+ ELFMAG3 = 'F'
+ ELFOSABI_LINUX = 0x3
+ ELFOSABI_NONE = 0x0
EM_386 = 0x3
EM_486 = 0x6
EM_68K = 0x4
@@ -936,11 +1008,10 @@ const (
EPOLL_CTL_MOD = 0x3
EPOLL_IOC_TYPE = 0x8a
EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2
- ESP_V4_FLOW = 0xa
- ESP_V6_FLOW = 0xc
- ETHER_FLOW = 0x12
ETHTOOL_BUSINFO_LEN = 0x20
ETHTOOL_EROMVERS_LEN = 0x20
+ ETHTOOL_FAMILY_NAME = "ethtool"
+ ETHTOOL_FAMILY_VERSION = 0x1
ETHTOOL_FEC_AUTO = 0x2
ETHTOOL_FEC_BASER = 0x10
ETHTOOL_FEC_LLRS = 0x20
@@ -1147,14 +1218,24 @@ const (
ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8
+ ET_CORE = 0x4
+ ET_DYN = 0x3
+ ET_EXEC = 0x2
+ ET_HIPROC = 0xffff
+ ET_LOPROC = 0xff00
+ ET_NONE = 0x0
+ ET_REL = 0x1
EV_ABS = 0x3
EV_CNT = 0x20
+ EV_CURRENT = 0x1
EV_FF = 0x15
EV_FF_STATUS = 0x17
EV_KEY = 0x1
EV_LED = 0x11
EV_MAX = 0x1f
EV_MSC = 0x4
+ EV_NONE = 0x0
+ EV_NUM = 0x2
EV_PWR = 0x16
EV_REL = 0x2
EV_REP = 0x14
@@ -1203,13 +1284,18 @@ const (
FAN_DENY = 0x2
FAN_ENABLE_AUDIT = 0x40
FAN_EPIDFD = -0x2
+ FAN_ERRNO_BITS = 0x8
+ FAN_ERRNO_MASK = 0xff
+ FAN_ERRNO_SHIFT = 0x18
FAN_EVENT_INFO_TYPE_DFID = 0x3
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_ERROR = 0x5
FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_INFO_TYPE_MNT = 0x7
FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc
FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa
FAN_EVENT_INFO_TYPE_PIDFD = 0x4
+ FAN_EVENT_INFO_TYPE_RANGE = 0x6
FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000
FAN_FS_ERROR = 0x8000
@@ -1224,9 +1310,12 @@ const (
FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0
+ FAN_MARK_MNTNS = 0x110
FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8
FAN_MARK_REMOVE = 0x2
+ FAN_MNT_ATTACH = 0x1000000
+ FAN_MNT_DETACH = 0x2000000
FAN_MODIFY = 0x2
FAN_MOVE = 0xc0
FAN_MOVED_FROM = 0x40
@@ -1240,6 +1329,7 @@ const (
FAN_OPEN_EXEC = 0x1000
FAN_OPEN_EXEC_PERM = 0x40000
FAN_OPEN_PERM = 0x10000
+ FAN_PRE_ACCESS = 0x100000
FAN_Q_OVERFLOW = 0x4000
FAN_RENAME = 0x10000000
FAN_REPORT_DFID_NAME = 0xc00
@@ -1247,6 +1337,7 @@ const (
FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
+ FAN_REPORT_MNT = 0x4000
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
FAN_REPORT_TARGET_FID = 0x1000
@@ -1266,6 +1357,7 @@ const (
FIB_RULE_PERMANENT = 0x1
FIB_RULE_UNRESOLVED = 0x4
FIDEDUPERANGE = 0xc0189436
+ FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1
FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8
FSCRYPT_KEY_DESC_PREFIX = "fscrypt:"
FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8
@@ -1574,7 +1666,6 @@ const (
IPV6_DONTFRAG = 0x3e
IPV6_DROP_MEMBERSHIP = 0x15
IPV6_DSTOPTS = 0x3b
- IPV6_FLOW = 0x11
IPV6_FREEBIND = 0x4e
IPV6_HDRINCL = 0x24
IPV6_HOPLIMIT = 0x34
@@ -1625,7 +1716,6 @@ const (
IPV6_TRANSPARENT = 0x4b
IPV6_UNICAST_HOPS = 0x10
IPV6_UNICAST_IF = 0x4c
- IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
@@ -1687,7 +1777,6 @@ const (
IP_TTL = 0x2
IP_UNBLOCK_SOURCE = 0x25
IP_UNICAST_IF = 0x32
- IP_USER_FLOW = 0xd
IP_XFRM_POLICY = 0x11
ISOFS_SUPER_MAGIC = 0x9660
ISTRIP = 0x20
@@ -1809,7 +1898,11 @@ const (
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
+ LANDLOCK_CREATE_RULESET_ERRATA = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2
+ LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1
LANDLOCK_SCOPE_SIGNAL = 0x2
LINUX_REBOOT_CMD_CAD_OFF = 0x0
@@ -2259,7 +2352,167 @@ const (
NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100
+ NN_386_IOPERM = "LINUX"
+ NN_386_TLS = "LINUX"
+ NN_ARC_V2 = "LINUX"
+ NN_ARM_FPMR = "LINUX"
+ NN_ARM_GCS = "LINUX"
+ NN_ARM_HW_BREAK = "LINUX"
+ NN_ARM_HW_WATCH = "LINUX"
+ NN_ARM_PACA_KEYS = "LINUX"
+ NN_ARM_PACG_KEYS = "LINUX"
+ NN_ARM_PAC_ENABLED_KEYS = "LINUX"
+ NN_ARM_PAC_MASK = "LINUX"
+ NN_ARM_POE = "LINUX"
+ NN_ARM_SSVE = "LINUX"
+ NN_ARM_SVE = "LINUX"
+ NN_ARM_SYSTEM_CALL = "LINUX"
+ NN_ARM_TAGGED_ADDR_CTRL = "LINUX"
+ NN_ARM_TLS = "LINUX"
+ NN_ARM_VFP = "LINUX"
+ NN_ARM_ZA = "LINUX"
+ NN_ARM_ZT = "LINUX"
+ NN_AUXV = "CORE"
+ NN_FILE = "CORE"
+ NN_GNU_PROPERTY_TYPE_0 = "GNU"
+ NN_LOONGARCH_CPUCFG = "LINUX"
+ NN_LOONGARCH_CSR = "LINUX"
+ NN_LOONGARCH_HW_BREAK = "LINUX"
+ NN_LOONGARCH_HW_WATCH = "LINUX"
+ NN_LOONGARCH_LASX = "LINUX"
+ NN_LOONGARCH_LBT = "LINUX"
+ NN_LOONGARCH_LSX = "LINUX"
+ NN_MIPS_DSP = "LINUX"
+ NN_MIPS_FP_MODE = "LINUX"
+ NN_MIPS_MSA = "LINUX"
+ NN_PPC_DEXCR = "LINUX"
+ NN_PPC_DSCR = "LINUX"
+ NN_PPC_EBB = "LINUX"
+ NN_PPC_HASHKEYR = "LINUX"
+ NN_PPC_PKEY = "LINUX"
+ NN_PPC_PMU = "LINUX"
+ NN_PPC_PPR = "LINUX"
+ NN_PPC_SPE = "LINUX"
+ NN_PPC_TAR = "LINUX"
+ NN_PPC_TM_CDSCR = "LINUX"
+ NN_PPC_TM_CFPR = "LINUX"
+ NN_PPC_TM_CGPR = "LINUX"
+ NN_PPC_TM_CPPR = "LINUX"
+ NN_PPC_TM_CTAR = "LINUX"
+ NN_PPC_TM_CVMX = "LINUX"
+ NN_PPC_TM_CVSX = "LINUX"
+ NN_PPC_TM_SPR = "LINUX"
+ NN_PPC_VMX = "LINUX"
+ NN_PPC_VSX = "LINUX"
+ NN_PRFPREG = "CORE"
+ NN_PRPSINFO = "CORE"
+ NN_PRSTATUS = "CORE"
+ NN_PRXFPREG = "LINUX"
+ NN_RISCV_CSR = "LINUX"
+ NN_RISCV_TAGGED_ADDR_CTRL = "LINUX"
+ NN_RISCV_VECTOR = "LINUX"
+ NN_S390_CTRS = "LINUX"
+ NN_S390_GS_BC = "LINUX"
+ NN_S390_GS_CB = "LINUX"
+ NN_S390_HIGH_GPRS = "LINUX"
+ NN_S390_LAST_BREAK = "LINUX"
+ NN_S390_PREFIX = "LINUX"
+ NN_S390_PV_CPU_DATA = "LINUX"
+ NN_S390_RI_CB = "LINUX"
+ NN_S390_SYSTEM_CALL = "LINUX"
+ NN_S390_TDB = "LINUX"
+ NN_S390_TIMER = "LINUX"
+ NN_S390_TODCMP = "LINUX"
+ NN_S390_TODPREG = "LINUX"
+ NN_S390_VXRS_HIGH = "LINUX"
+ NN_S390_VXRS_LOW = "LINUX"
+ NN_SIGINFO = "CORE"
+ NN_TASKSTRUCT = "CORE"
+ NN_VMCOREDD = "LINUX"
+ NN_X86_SHSTK = "LINUX"
+ NN_X86_XSAVE_LAYOUT = "LINUX"
+ NN_X86_XSTATE = "LINUX"
NSFS_MAGIC = 0x6e736673
+ NT_386_IOPERM = 0x201
+ NT_386_TLS = 0x200
+ NT_ARC_V2 = 0x600
+ NT_ARM_FPMR = 0x40e
+ NT_ARM_GCS = 0x410
+ NT_ARM_HW_BREAK = 0x402
+ NT_ARM_HW_WATCH = 0x403
+ NT_ARM_PACA_KEYS = 0x407
+ NT_ARM_PACG_KEYS = 0x408
+ NT_ARM_PAC_ENABLED_KEYS = 0x40a
+ NT_ARM_PAC_MASK = 0x406
+ NT_ARM_POE = 0x40f
+ NT_ARM_SSVE = 0x40b
+ NT_ARM_SVE = 0x405
+ NT_ARM_SYSTEM_CALL = 0x404
+ NT_ARM_TAGGED_ADDR_CTRL = 0x409
+ NT_ARM_TLS = 0x401
+ NT_ARM_VFP = 0x400
+ NT_ARM_ZA = 0x40c
+ NT_ARM_ZT = 0x40d
+ NT_AUXV = 0x6
+ NT_FILE = 0x46494c45
+ NT_GNU_PROPERTY_TYPE_0 = 0x5
+ NT_LOONGARCH_CPUCFG = 0xa00
+ NT_LOONGARCH_CSR = 0xa01
+ NT_LOONGARCH_HW_BREAK = 0xa05
+ NT_LOONGARCH_HW_WATCH = 0xa06
+ NT_LOONGARCH_LASX = 0xa03
+ NT_LOONGARCH_LBT = 0xa04
+ NT_LOONGARCH_LSX = 0xa02
+ NT_MIPS_DSP = 0x800
+ NT_MIPS_FP_MODE = 0x801
+ NT_MIPS_MSA = 0x802
+ NT_PPC_DEXCR = 0x111
+ NT_PPC_DSCR = 0x105
+ NT_PPC_EBB = 0x106
+ NT_PPC_HASHKEYR = 0x112
+ NT_PPC_PKEY = 0x110
+ NT_PPC_PMU = 0x107
+ NT_PPC_PPR = 0x104
+ NT_PPC_SPE = 0x101
+ NT_PPC_TAR = 0x103
+ NT_PPC_TM_CDSCR = 0x10f
+ NT_PPC_TM_CFPR = 0x109
+ NT_PPC_TM_CGPR = 0x108
+ NT_PPC_TM_CPPR = 0x10e
+ NT_PPC_TM_CTAR = 0x10d
+ NT_PPC_TM_CVMX = 0x10a
+ NT_PPC_TM_CVSX = 0x10b
+ NT_PPC_TM_SPR = 0x10c
+ NT_PPC_VMX = 0x100
+ NT_PPC_VSX = 0x102
+ NT_PRFPREG = 0x2
+ NT_PRPSINFO = 0x3
+ NT_PRSTATUS = 0x1
+ NT_PRXFPREG = 0x46e62b7f
+ NT_RISCV_CSR = 0x900
+ NT_RISCV_TAGGED_ADDR_CTRL = 0x902
+ NT_RISCV_VECTOR = 0x901
+ NT_S390_CTRS = 0x304
+ NT_S390_GS_BC = 0x30c
+ NT_S390_GS_CB = 0x30b
+ NT_S390_HIGH_GPRS = 0x300
+ NT_S390_LAST_BREAK = 0x306
+ NT_S390_PREFIX = 0x305
+ NT_S390_PV_CPU_DATA = 0x30e
+ NT_S390_RI_CB = 0x30d
+ NT_S390_SYSTEM_CALL = 0x307
+ NT_S390_TDB = 0x308
+ NT_S390_TIMER = 0x301
+ NT_S390_TODCMP = 0x302
+ NT_S390_TODPREG = 0x303
+ NT_S390_VXRS_HIGH = 0x30a
+ NT_S390_VXRS_LOW = 0x309
+ NT_SIGINFO = 0x53494749
+ NT_TASKSTRUCT = 0x4
+ NT_VMCOREDD = 0x700
+ NT_X86_SHSTK = 0x204
+ NT_X86_XSAVE_LAYOUT = 0x205
+ NT_X86_XSTATE = 0x202
OCFS2_SUPER_MAGIC = 0x7461636f
OCRNL = 0x8
OFDEL = 0x80
@@ -2446,6 +2699,59 @@ const (
PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
+ PF_ALG = 0x26
+ PF_APPLETALK = 0x5
+ PF_ASH = 0x12
+ PF_ATMPVC = 0x8
+ PF_ATMSVC = 0x14
+ PF_AX25 = 0x3
+ PF_BLUETOOTH = 0x1f
+ PF_BRIDGE = 0x7
+ PF_CAIF = 0x25
+ PF_CAN = 0x1d
+ PF_DECnet = 0xc
+ PF_ECONET = 0x13
+ PF_FILE = 0x1
+ PF_IB = 0x1b
+ PF_IEEE802154 = 0x24
+ PF_INET = 0x2
+ PF_INET6 = 0xa
+ PF_IPX = 0x4
+ PF_IRDA = 0x17
+ PF_ISDN = 0x22
+ PF_IUCV = 0x20
+ PF_KCM = 0x29
+ PF_KEY = 0xf
+ PF_LLC = 0x1a
+ PF_LOCAL = 0x1
+ PF_MAX = 0x2e
+ PF_MCTP = 0x2d
+ PF_MPLS = 0x1c
+ PF_NETBEUI = 0xd
+ PF_NETLINK = 0x10
+ PF_NETROM = 0x6
+ PF_NFC = 0x27
+ PF_PACKET = 0x11
+ PF_PHONET = 0x23
+ PF_PPPOX = 0x18
+ PF_QIPCRTR = 0x2a
+ PF_R = 0x4
+ PF_RDS = 0x15
+ PF_ROSE = 0xb
+ PF_ROUTE = 0x10
+ PF_RXRPC = 0x21
+ PF_SECURITY = 0xe
+ PF_SMC = 0x2b
+ PF_SNA = 0x16
+ PF_TIPC = 0x1e
+ PF_UNIX = 0x1
+ PF_UNSPEC = 0x0
+ PF_VSOCK = 0x28
+ PF_W = 0x2
+ PF_WANPIPE = 0x19
+ PF_X = 0x1
+ PF_X25 = 0x9
+ PF_XDP = 0x2c
PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c
@@ -2485,6 +2791,10 @@ const (
PR_FP_EXC_UND = 0x40000
PR_FP_MODE_FR = 0x1
PR_FP_MODE_FRE = 0x2
+ PR_FUTEX_HASH = 0x4e
+ PR_FUTEX_HASH_GET_IMMUTABLE = 0x3
+ PR_FUTEX_HASH_GET_SLOTS = 0x2
+ PR_FUTEX_HASH_SET_SLOTS = 0x1
PR_GET_AUXV = 0x41555856
PR_GET_CHILD_SUBREAPER = 0x25
PR_GET_DUMPABLE = 0x3
@@ -2644,6 +2954,10 @@ const (
PR_TAGGED_ADDR_ENABLE = 0x1
PR_TASK_PERF_EVENTS_DISABLE = 0x1f
PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMER_CREATE_RESTORE_IDS = 0x4d
+ PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2
+ PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0
+ PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1
PR_TIMING_STATISTICAL = 0x0
PR_TIMING_TIMESTAMP = 0x1
PR_TSC_ENABLE = 0x1
@@ -2724,6 +3038,7 @@ const (
PTRACE_SETREGSET = 0x4205
PTRACE_SETSIGINFO = 0x4203
PTRACE_SETSIGMASK = 0x420b
+ PTRACE_SET_SYSCALL_INFO = 0x4212
PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210
PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18
@@ -2732,6 +3047,23 @@ const (
PTRACE_SYSCALL_INFO_NONE = 0x0
PTRACE_SYSCALL_INFO_SECCOMP = 0x3
PTRACE_TRACEME = 0x0
+ PT_AARCH64_MEMTAG_MTE = 0x70000002
+ PT_DYNAMIC = 0x2
+ PT_GNU_EH_FRAME = 0x6474e550
+ PT_GNU_PROPERTY = 0x6474e553
+ PT_GNU_RELRO = 0x6474e552
+ PT_GNU_STACK = 0x6474e551
+ PT_HIOS = 0x6fffffff
+ PT_HIPROC = 0x7fffffff
+ PT_INTERP = 0x3
+ PT_LOAD = 0x1
+ PT_LOOS = 0x60000000
+ PT_LOPROC = 0x70000000
+ PT_NOTE = 0x4
+ PT_NULL = 0x0
+ PT_PHDR = 0x6
+ PT_SHLIB = 0x5
+ PT_TLS = 0x7
P_ALL = 0x0
P_PGID = 0x2
P_PID = 0x1
@@ -2787,7 +3119,7 @@ const (
RTAX_UNSPEC = 0x0
RTAX_WINDOW = 0x3
RTA_ALIGNTO = 0x4
- RTA_MAX = 0x1e
+ RTA_MAX = 0x1f
RTCF_DIRECTSRC = 0x4000000
RTCF_DOREDIRECT = 0x1000000
RTCF_LOG = 0x2000000
@@ -2864,10 +3196,12 @@ const (
RTM_DELACTION = 0x31
RTM_DELADDR = 0x15
RTM_DELADDRLABEL = 0x49
+ RTM_DELANYCAST = 0x3d
RTM_DELCHAIN = 0x65
RTM_DELLINK = 0x11
RTM_DELLINKPROP = 0x6d
RTM_DELMDB = 0x55
+ RTM_DELMULTICAST = 0x39
RTM_DELNEIGH = 0x1d
RTM_DELNETCONF = 0x51
RTM_DELNEXTHOP = 0x69
@@ -2917,11 +3251,13 @@ const (
RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48
+ RTM_NEWANYCAST = 0x3c
RTM_NEWCACHEREPORT = 0x60
RTM_NEWCHAIN = 0x64
RTM_NEWLINK = 0x10
RTM_NEWLINKPROP = 0x6c
RTM_NEWMDB = 0x54
+ RTM_NEWMULTICAST = 0x38
RTM_NEWNDUSEROPT = 0x44
RTM_NEWNEIGH = 0x1c
RTM_NEWNEIGHTBL = 0x40
@@ -2970,6 +3306,7 @@ const (
RTPROT_NTK = 0xf
RTPROT_OPENR = 0x63
RTPROT_OSPF = 0xbc
+ RTPROT_OVN = 0x54
RTPROT_RA = 0x9
RTPROT_REDIRECT = 0x1
RTPROT_RIP = 0xbd
@@ -2987,11 +3324,12 @@ const (
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
RWF_ATOMIC = 0x40
+ RWF_DONTCACHE = 0x80
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x7f
+ RWF_SUPPORTED = 0xff
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -3059,6 +3397,47 @@ const (
SEEK_MAX = 0x4
SEEK_SET = 0x0
SELINUX_MAGIC = 0xf97cff8c
+ SHF_ALLOC = 0x2
+ SHF_EXCLUDE = 0x8000000
+ SHF_EXECINSTR = 0x4
+ SHF_GROUP = 0x200
+ SHF_INFO_LINK = 0x40
+ SHF_LINK_ORDER = 0x80
+ SHF_MASKOS = 0xff00000
+ SHF_MASKPROC = 0xf0000000
+ SHF_MERGE = 0x10
+ SHF_ORDERED = 0x4000000
+ SHF_OS_NONCONFORMING = 0x100
+ SHF_RELA_LIVEPATCH = 0x100000
+ SHF_RO_AFTER_INIT = 0x200000
+ SHF_STRINGS = 0x20
+ SHF_TLS = 0x400
+ SHF_WRITE = 0x1
+ SHN_ABS = 0xfff1
+ SHN_COMMON = 0xfff2
+ SHN_HIPROC = 0xff1f
+ SHN_HIRESERVE = 0xffff
+ SHN_LIVEPATCH = 0xff20
+ SHN_LOPROC = 0xff00
+ SHN_LORESERVE = 0xff00
+ SHN_UNDEF = 0x0
+ SHT_DYNAMIC = 0x6
+ SHT_DYNSYM = 0xb
+ SHT_HASH = 0x5
+ SHT_HIPROC = 0x7fffffff
+ SHT_HIUSER = 0xffffffff
+ SHT_LOPROC = 0x70000000
+ SHT_LOUSER = 0x80000000
+ SHT_NOBITS = 0x8
+ SHT_NOTE = 0x7
+ SHT_NULL = 0x0
+ SHT_NUM = 0xc
+ SHT_PROGBITS = 0x1
+ SHT_REL = 0x9
+ SHT_RELA = 0x4
+ SHT_SHLIB = 0xa
+ SHT_STRTAB = 0x3
+ SHT_SYMTAB = 0x2
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -3271,6 +3650,7 @@ const (
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_DIOALIGN = 0x2000
+ STATX_DIO_READ_ALIGN = 0x20000
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MNT_ID = 0x1000
@@ -3284,6 +3664,16 @@ const (
STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
+ STB_GLOBAL = 0x1
+ STB_LOCAL = 0x0
+ STB_WEAK = 0x2
+ STT_COMMON = 0x5
+ STT_FILE = 0x4
+ STT_FUNC = 0x2
+ STT_NOTYPE = 0x0
+ STT_OBJECT = 0x1
+ STT_SECTION = 0x3
+ STT_TLS = 0x6
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
SYNC_FILE_RANGE_WRITE = 0x2
@@ -3322,7 +3712,7 @@ const (
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
- TASKSTATS_VERSION = 0xe
+ TASKSTATS_VERSION = 0x10
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -3392,8 +3782,6 @@ const (
TCP_TX_DELAY = 0x25
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
- TCP_V4_FLOW = 0x1
- TCP_V6_FLOW = 0x5
TCP_WINDOW_CLAMP = 0xa
TCP_ZEROCOPY_RECEIVE = 0x23
TFD_TIMER_ABSTIME = 0x1
@@ -3503,6 +3891,7 @@ const (
TP_STATUS_WRONG_FORMAT = 0x4
TRACEFS_MAGIC = 0x74726163
TS_COMM_LEN = 0x20
+ UBI_IOCECNFO = 0xc01c6f06
UDF_SUPER_MAGIC = 0x15013346
UDP_CORK = 0x1
UDP_ENCAP = 0x64
@@ -3515,14 +3904,14 @@ const (
UDP_NO_CHECK6_RX = 0x66
UDP_NO_CHECK6_TX = 0x65
UDP_SEGMENT = 0x67
- UDP_V4_FLOW = 0x2
- UDP_V6_FLOW = 0x6
UMOUNT_NOFOLLOW = 0x8
USBDEVICE_SUPER_MAGIC = 0x9fa2
UTIME_NOW = 0x3fffffff
UTIME_OMIT = 0x3ffffffe
V9FS_MAGIC = 0x1021997
VERASE = 0x2
+ VER_FLG_BASE = 0x1
+ VER_FLG_WEAK = 0x2
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf
@@ -3559,7 +3948,7 @@ const (
WDIOS_TEMPPANIC = 0x4
WDIOS_UNKNOWN = -0x1
WEXITED = 0x4
- WGALLOWEDIP_A_MAX = 0x3
+ WGALLOWEDIP_A_MAX = 0x4
WGDEVICE_A_MAX = 0x8
WGPEER_A_MAX = 0xa
WG_CMD_MAX = 0x1
@@ -3673,6 +4062,7 @@ const (
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TXMD_FLAGS_CHECKSUM = 0x2
+ XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4
XDP_TXMD_FLAGS_TIMESTAMP = 0x1
XDP_TX_METADATA = 0x2
XDP_TX_RING = 0x3
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 75207613..1c37f9fb 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -360,6 +361,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -372,6 +374,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c68acda5..6f54d34a 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -361,6 +362,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -373,6 +375,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index a8c607ab..783ec5c1 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -366,6 +367,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -378,6 +380,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 18563dd8..ca83d3ba 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 22912cda..607e611c 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -353,6 +354,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -365,6 +367,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 29344eb3..b9cb5bd3 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 20d51fb9..65b078a6 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 321b6090..5298a303 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 9bacdf1e..7bc557c8 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index c2242726..152399bb 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -414,6 +415,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -426,6 +428,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 6270c8ee..1a1ce240 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -430,6 +432,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 9966c194..4231a1fb 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -430,6 +432,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 848e5fcc..21c0e952 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -350,6 +351,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -362,6 +364,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 669b2adb..f00d1cd7 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -68,6 +68,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -422,6 +423,7 @@ const (
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -434,6 +436,7 @@ const (
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 4834e575..bc8d539e 100644
--- a/operator/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -71,6 +71,7 @@ const (
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -461,6 +462,7 @@ const (
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x2
SO_PASSPIDFD = 0x55
+ SO_PASSRIGHTS = 0x5c
SO_PASSSEC = 0x1f
SO_PEEK_OFF = 0x26
SO_PEERCRED = 0x40
@@ -473,6 +475,7 @@ const (
SO_RCVBUFFORCE = 0x100b
SO_RCVLOWAT = 0x800
SO_RCVMARK = 0x54
+ SO_RCVPRIORITY = 0x5b
SO_RCVTIMEO = 0x2000
SO_RCVTIMEO_NEW = 0x44
SO_RCVTIMEO_OLD = 0x2000
diff --git a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 24b346e1..813c05b6 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index ebd21310..fda32858 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8
diff --git a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 824b9c2d..e6f58f3c 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 4f178a22..7f8998b9 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
diff --git a/operator/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/operator/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 5cc1e8eb..8935d10a 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setMemPolicy(mode int, mask *CPUSet, size int) (err error) {
+ _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/operator/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/operator/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index c6545413..b4609c20 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -72,7 +72,7 @@ import (
//go:cgo_import_dynamic libc_kill kill "libc.so"
//go:cgo_import_dynamic libc_lchown lchown "libc.so"
//go:cgo_import_dynamic libc_link link "libc.so"
-//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so"
+//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so"
//go:cgo_import_dynamic libc_lstat lstat "libc.so"
//go:cgo_import_dynamic libc_madvise madvise "libc.so"
//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
@@ -221,7 +221,7 @@ import (
//go:linkname procKill libc_kill
//go:linkname procLchown libc_lchown
//go:linkname procLink libc_link
-//go:linkname proc__xnet_llisten libc___xnet_llisten
+//go:linkname proc__xnet_listen libc___xnet_listen
//go:linkname procLstat libc_lstat
//go:linkname procMadvise libc_madvise
//go:linkname procMkdir libc_mkdir
@@ -371,7 +371,7 @@ var (
procKill,
procLchown,
procLink,
- proc__xnet_llisten,
+ proc__xnet_listen,
procLstat,
procMadvise,
procMkdir,
@@ -1178,7 +1178,7 @@ func Link(path string, link string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
- _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index c79aaff3..aca56ee4 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -462,4 +462,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 5eb45069..2ea1ef58 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -385,4 +385,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 05e50297..d22c8af3 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -426,4 +426,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 38c53ec5..5ee264ae 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -329,4 +329,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 31d2e71a..f9f03ebf 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -325,4 +325,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index f4184a33..87c2118e 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -446,4 +446,5 @@ const (
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 05b99622..391ad102 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -376,4 +376,5 @@ const (
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 43a256e9..56561577 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -376,4 +376,5 @@ const (
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index eea5ddfc..0482b52e 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -446,4 +446,5 @@ const (
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index 0d777bfb..71806f08 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -453,4 +453,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index b4463650..e35a7105 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -425,4 +425,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 0c7d21c1..2aea4767 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -425,4 +425,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 84053916..6c9bb4e5 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -330,4 +330,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index fcf1b790..680bc991 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -391,4 +391,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 52d15b5f..620f2710 100644
--- a/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/operator/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -404,4 +404,5 @@ const (
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux.go
index a46abe64..c1a46701 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -114,8 +114,10 @@ type Statx_t struct {
Atomic_write_unit_min uint32
Atomic_write_unit_max uint32
Atomic_write_segments_max uint32
+ Dio_read_offset_align uint32
+ Atomic_write_unit_max_opt uint32
_ [1]uint32
- _ [9]uint64
+ _ [8]uint64
}
type Fsid struct {
@@ -199,7 +201,8 @@ type FscryptAddKeyArg struct {
Key_spec FscryptKeySpecifier
Raw_size uint32
Key_id uint32
- _ [8]uint32
+ Flags uint32
+ _ [7]uint32
}
type FscryptRemoveKeyArg struct {
@@ -629,6 +632,8 @@ const (
IFA_FLAGS = 0x8
IFA_RT_PRIORITY = 0x9
IFA_TARGET_NETNSID = 0xa
+ IFAL_LABEL = 0x2
+ IFAL_ADDRESS = 0x1
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
@@ -686,6 +691,7 @@ const (
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
+ SizeofIfAddrlblmsg = 0xc
SizeofIfaCacheinfo = 0x10
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
@@ -737,6 +743,15 @@ type IfAddrmsg struct {
Index uint32
}
+type IfAddrlblmsg struct {
+ Family uint8
+ _ uint8
+ Prefixlen uint8
+ Flags uint8
+ Index uint32
+ Seq uint32
+}
+
type IfaCacheinfo struct {
Prefered uint32
Valid uint32
@@ -2226,8 +2241,11 @@ const (
NFT_PAYLOAD_LL_HEADER = 0x0
NFT_PAYLOAD_NETWORK_HEADER = 0x1
NFT_PAYLOAD_TRANSPORT_HEADER = 0x2
+ NFT_PAYLOAD_INNER_HEADER = 0x3
+ NFT_PAYLOAD_TUN_HEADER = 0x4
NFT_PAYLOAD_CSUM_NONE = 0x0
NFT_PAYLOAD_CSUM_INET = 0x1
+ NFT_PAYLOAD_CSUM_SCTP = 0x2
NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1
NFTA_PAYLOAD_UNSPEC = 0x0
NFTA_PAYLOAD_DREG = 0x1
@@ -2314,6 +2332,11 @@ const (
NFT_CT_AVGPKT = 0x10
NFT_CT_ZONE = 0x11
NFT_CT_EVENTMASK = 0x12
+ NFT_CT_SRC_IP = 0x13
+ NFT_CT_DST_IP = 0x14
+ NFT_CT_SRC_IP6 = 0x15
+ NFT_CT_DST_IP6 = 0x16
+ NFT_CT_ID = 0x17
NFTA_CT_UNSPEC = 0x0
NFTA_CT_DREG = 0x1
NFTA_CT_KEY = 0x2
@@ -2594,8 +2617,8 @@ const (
SOF_TIMESTAMPING_BIND_PHC = 0x8000
SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000
- SOF_TIMESTAMPING_LAST = 0x20000
- SOF_TIMESTAMPING_MASK = 0x3ffff
+ SOF_TIMESTAMPING_LAST = 0x40000
+ SOF_TIMESTAMPING_MASK = 0x7ffff
SCM_TSTAMP_SND = 0x0
SCM_TSTAMP_SCHED = 0x1
@@ -3041,6 +3064,23 @@ const (
)
const (
+ TCA_UNSPEC = 0x0
+ TCA_KIND = 0x1
+ TCA_OPTIONS = 0x2
+ TCA_STATS = 0x3
+ TCA_XSTATS = 0x4
+ TCA_RATE = 0x5
+ TCA_FCNT = 0x6
+ TCA_STATS2 = 0x7
+ TCA_STAB = 0x8
+ TCA_PAD = 0x9
+ TCA_DUMP_INVISIBLE = 0xa
+ TCA_CHAIN = 0xb
+ TCA_HW_OFFLOAD = 0xc
+ TCA_INGRESS_BLOCK = 0xd
+ TCA_EGRESS_BLOCK = 0xe
+ TCA_DUMP_FLAGS = 0xf
+ TCA_EXT_WARN_MSG = 0x10
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
@@ -3075,6 +3115,18 @@ const (
RTNLGRP_IPV6_MROUTE_R = 0x1f
RTNLGRP_NEXTHOP = 0x20
RTNLGRP_BRVLAN = 0x21
+ RTNLGRP_MCTP_IFADDR = 0x22
+ RTNLGRP_TUNNEL = 0x23
+ RTNLGRP_STATS = 0x24
+ RTNLGRP_IPV4_MCADDR = 0x25
+ RTNLGRP_IPV6_MCADDR = 0x26
+ RTNLGRP_IPV6_ACADDR = 0x27
+ TCA_ROOT_UNSPEC = 0x0
+ TCA_ROOT_TAB = 0x1
+ TCA_ROOT_FLAGS = 0x2
+ TCA_ROOT_COUNT = 0x3
+ TCA_ROOT_TIME_DELTA = 0x4
+ TCA_ROOT_EXT_WARN_MSG = 0x5
)
type CapUserHeader struct {
@@ -3538,6 +3590,8 @@ type Nhmsg struct {
Flags uint32
}
+const SizeofNhmsg = 0x8
+
type NexthopGrp struct {
Id uint32
Weight uint8
@@ -3545,6 +3599,8 @@ type NexthopGrp struct {
Resvd2 uint16
}
+const SizeofNexthopGrp = 0x8
+
const (
NHA_UNSPEC = 0x0
NHA_ID = 0x1
@@ -3802,7 +3858,16 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
- ETHTOOL_MSG_USER_MAX = 0x2d
+ ETHTOOL_MSG_PLCA_GET_CFG = 0x27
+ ETHTOOL_MSG_PLCA_SET_CFG = 0x28
+ ETHTOOL_MSG_PLCA_GET_STATUS = 0x29
+ ETHTOOL_MSG_MM_GET = 0x2a
+ ETHTOOL_MSG_MM_SET = 0x2b
+ ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c
+ ETHTOOL_MSG_PHY_GET = 0x2d
+ ETHTOOL_MSG_TSCONFIG_GET = 0x2e
+ ETHTOOL_MSG_TSCONFIG_SET = 0x2f
+ ETHTOOL_MSG_USER_MAX = 0x2f
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@@ -3842,7 +3907,17 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
- ETHTOOL_MSG_KERNEL_MAX = 0x2e
+ ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27
+ ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28
+ ETHTOOL_MSG_PLCA_NTF = 0x29
+ ETHTOOL_MSG_MM_GET_REPLY = 0x2a
+ ETHTOOL_MSG_MM_NTF = 0x2b
+ ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c
+ ETHTOOL_MSG_PHY_GET_REPLY = 0x2d
+ ETHTOOL_MSG_PHY_NTF = 0x2e
+ ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f
+ ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30
+ ETHTOOL_MSG_KERNEL_MAX = 0x30
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4
@@ -3949,7 +4024,12 @@ const (
ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb
ETHTOOL_A_RINGS_CQE_SIZE = 0xc
ETHTOOL_A_RINGS_TX_PUSH = 0xd
- ETHTOOL_A_RINGS_MAX = 0x10
+ ETHTOOL_A_RINGS_RX_PUSH = 0xe
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10
+ ETHTOOL_A_RINGS_HDS_THRESH = 0x11
+ ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12
+ ETHTOOL_A_RINGS_MAX = 0x12
ETHTOOL_A_CHANNELS_UNSPEC = 0x0
ETHTOOL_A_CHANNELS_HEADER = 0x1
ETHTOOL_A_CHANNELS_RX_MAX = 0x2
@@ -4015,7 +4095,9 @@ const (
ETHTOOL_A_TSINFO_TX_TYPES = 0x3
ETHTOOL_A_TSINFO_RX_FILTERS = 0x4
ETHTOOL_A_TSINFO_PHC_INDEX = 0x5
- ETHTOOL_A_TSINFO_MAX = 0x6
+ ETHTOOL_A_TSINFO_STATS = 0x6
+ ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7
+ ETHTOOL_A_TSINFO_MAX = 0x9
ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0
ETHTOOL_A_CABLE_TEST_HEADER = 0x1
ETHTOOL_A_CABLE_TEST_MAX = 0x1
@@ -4101,6 +4183,19 @@ const (
ETHTOOL_A_TUNNEL_INFO_MAX = 0x2
)
+const (
+ TCP_V4_FLOW = 0x1
+ UDP_V4_FLOW = 0x2
+ TCP_V6_FLOW = 0x5
+ UDP_V6_FLOW = 0x6
+ ESP_V4_FLOW = 0xa
+ ESP_V6_FLOW = 0xc
+ IP_USER_FLOW = 0xd
+ IPV6_USER_FLOW = 0xe
+ IPV6_FLOW = 0x11
+ ETHER_FLOW = 0x12
+)
+
const SPEED_UNKNOWN = -0x1
type EthtoolDrvinfo struct {
@@ -4613,6 +4708,7 @@ const (
NL80211_ATTR_AKM_SUITES = 0x4c
NL80211_ATTR_AP_ISOLATE = 0x60
NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135
+ NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a
NL80211_ATTR_AUTH_DATA = 0x9c
NL80211_ATTR_AUTH_TYPE = 0x35
NL80211_ATTR_BANDS = 0xef
@@ -4623,6 +4719,7 @@ const (
NL80211_ATTR_BSS_BASIC_RATES = 0x24
NL80211_ATTR_BSS = 0x2f
NL80211_ATTR_BSS_CTS_PROT = 0x1c
+ NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147
NL80211_ATTR_BSS_HT_OPMODE = 0x6d
NL80211_ATTR_BSSID = 0xf5
NL80211_ATTR_BSS_SELECT = 0xe3
@@ -4682,6 +4779,7 @@ const (
NL80211_ATTR_DTIM_PERIOD = 0xd
NL80211_ATTR_DURATION = 0x57
NL80211_ATTR_EHT_CAPABILITY = 0x136
+ NL80211_ATTR_EMA_RNR_ELEMS = 0x145
NL80211_ATTR_EML_CAPABILITY = 0x13d
NL80211_ATTR_EXT_CAPA = 0xa9
NL80211_ATTR_EXT_CAPA_MASK = 0xaa
@@ -4717,6 +4815,7 @@ const (
NL80211_ATTR_HIDDEN_SSID = 0x7e
NL80211_ATTR_HT_CAPABILITY = 0x1f
NL80211_ATTR_HT_CAPABILITY_MASK = 0x94
+ NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144
NL80211_ATTR_IE_ASSOC_RESP = 0x80
NL80211_ATTR_IE = 0x2a
NL80211_ATTR_IE_PROBE_RESP = 0x7f
@@ -4747,9 +4846,10 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14d
+ NL80211_ATTR_MAX = 0x151
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
+ NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143
NL80211_ATTR_MAX_MATCH_SETS = 0x85
NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c
NL80211_ATTR_MAX_NUM_PMKIDS = 0x56
@@ -4774,9 +4874,12 @@ const (
NL80211_ATTR_MGMT_SUBTYPE = 0x29
NL80211_ATTR_MLD_ADDR = 0x13a
NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e
+ NL80211_ATTR_MLO_LINK_DISABLED = 0x146
NL80211_ATTR_MLO_LINK_ID = 0x139
NL80211_ATTR_MLO_LINKS = 0x138
NL80211_ATTR_MLO_SUPPORT = 0x13b
+ NL80211_ATTR_MLO_TTLM_DLINK = 0x148
+ NL80211_ATTR_MLO_TTLM_ULINK = 0x149
NL80211_ATTR_MNTR_FLAGS = 0x17
NL80211_ATTR_MPATH_INFO = 0x1b
NL80211_ATTR_MPATH_NEXT_HOP = 0x1a
@@ -4809,12 +4912,14 @@ const (
NL80211_ATTR_PORT_AUTHORIZED = 0x103
NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5
NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6
+ NL80211_ATTR_POWER_RULE_PSD = 0x8
NL80211_ATTR_PREV_BSSID = 0x4f
NL80211_ATTR_PRIVACY = 0x46
NL80211_ATTR_PROBE_RESP = 0x91
NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90
NL80211_ATTR_PROTOCOL_FEATURES = 0xad
NL80211_ATTR_PS_STATE = 0x5d
+ NL80211_ATTR_PUNCT_BITMAP = 0x142
NL80211_ATTR_QOS_MAP = 0xc7
NL80211_ATTR_RADAR_BACKGROUND = 0x134
NL80211_ATTR_RADAR_EVENT = 0xa8
@@ -4943,7 +5048,9 @@ const (
NL80211_ATTR_WIPHY_FREQ = 0x26
NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9
NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122
+ NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c
NL80211_ATTR_WIPHY_NAME = 0x2
+ NL80211_ATTR_WIPHY_RADIOS = 0x14b
NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e
NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d
NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40
@@ -4978,6 +5085,8 @@ const (
NL80211_BAND_ATTR_IFTYPE_DATA = 0x9
NL80211_BAND_ATTR_MAX = 0xd
NL80211_BAND_ATTR_RATES = 0x2
+ NL80211_BAND_ATTR_S1G_CAPA = 0xd
+ NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc
NL80211_BAND_ATTR_VHT_CAPA = 0x8
NL80211_BAND_ATTR_VHT_MCS_SET = 0x7
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8
@@ -5001,6 +5110,10 @@ const (
NL80211_BSS_BEACON_INTERVAL = 0x4
NL80211_BSS_BEACON_TSF = 0xd
NL80211_BSS_BSSID = 0x1
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1
+ NL80211_BSS_CANNOT_USE_REASONS = 0x18
+ NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2
NL80211_BSS_CAPABILITY = 0x5
NL80211_BSS_CHAIN_SIGNAL = 0x13
NL80211_BSS_CHAN_WIDTH_10 = 0x1
@@ -5032,6 +5145,9 @@ const (
NL80211_BSS_STATUS = 0x9
NL80211_BSS_STATUS_IBSS_JOINED = 0x2
NL80211_BSS_TSF = 0x3
+ NL80211_BSS_USE_FOR = 0x17
+ NL80211_BSS_USE_FOR_MLD_LINK = 0x2
+ NL80211_BSS_USE_FOR_NORMAL = 0x1
NL80211_CHAN_HT20 = 0x1
NL80211_CHAN_HT40MINUS = 0x2
NL80211_CHAN_HT40PLUS = 0x3
@@ -5117,7 +5233,8 @@ const (
NL80211_CMD_LEAVE_IBSS = 0x2c
NL80211_CMD_LEAVE_MESH = 0x45
NL80211_CMD_LEAVE_OCB = 0x6d
- NL80211_CMD_MAX = 0x9b
+ NL80211_CMD_LINKS_REMOVED = 0x9a
+ NL80211_CMD_MAX = 0x9d
NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29
NL80211_CMD_MODIFY_LINK_STA = 0x97
NL80211_CMD_NAN_MATCH = 0x78
@@ -5161,6 +5278,7 @@ const (
NL80211_CMD_SET_COALESCE = 0x65
NL80211_CMD_SET_CQM = 0x3f
NL80211_CMD_SET_FILS_AAD = 0x92
+ NL80211_CMD_SET_HW_TIMESTAMP = 0x99
NL80211_CMD_SET_INTERFACE = 0x6
NL80211_CMD_SET_KEY = 0xa
NL80211_CMD_SET_MAC_ACL = 0x5d
@@ -5180,6 +5298,7 @@ const (
NL80211_CMD_SET_SAR_SPECS = 0x8c
NL80211_CMD_SET_STATION = 0x12
NL80211_CMD_SET_TID_CONFIG = 0x89
+ NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b
NL80211_CMD_SET_TX_BITRATE_MASK = 0x39
NL80211_CMD_SET_WDS_PEER = 0x42
NL80211_CMD_SET_WIPHY = 0x2
@@ -5247,6 +5366,7 @@ const (
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21
NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22
NL80211_EXT_FEATURE_AQL = 0x28
+ NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40
NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e
NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29
NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36
@@ -5262,6 +5382,7 @@ const (
NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b
NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c
+ NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43
NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20
NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24
@@ -5281,9 +5402,12 @@ const (
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31
+ NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42
+ NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d
NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b
NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39
+ NL80211_EXT_FEATURE_PUNCT = 0x3e
NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c
NL80211_EXT_FEATURE_RRM = 0x1
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33
@@ -5295,8 +5419,10 @@ const (
NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc
NL80211_EXT_FEATURE_SECURE_LTF = 0x37
+ NL80211_EXT_FEATURE_SECURE_NAN = 0x3f
NL80211_EXT_FEATURE_SECURE_RTT = 0x38
NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44
NL80211_EXT_FEATURE_STA_TX_PWR = 0x25
NL80211_EXT_FEATURE_TXQS = 0x1c
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35
@@ -5343,7 +5469,10 @@ const (
NL80211_FREQUENCY_ATTR_2MHZ = 0x16
NL80211_FREQUENCY_ATTR_4MHZ = 0x17
NL80211_FREQUENCY_ATTR_8MHZ = 0x18
+ NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21
+ NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20
NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd
+ NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d
NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7
NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8
NL80211_FREQUENCY_ATTR_DISABLED = 0x2
@@ -5351,12 +5480,14 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x21
+ NL80211_FREQUENCY_ATTR_MAX = 0x22
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10
NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e
NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb
NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b
NL80211_FREQUENCY_ATTR_NO_HE = 0x13
@@ -5364,8 +5495,11 @@ const (
NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa
NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3
NL80211_FREQUENCY_ATTR_NO_IR = 0x3
+ NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f
+ NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e
NL80211_FREQUENCY_ATTR_OFFSET = 0x14
NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3
+ NL80211_FREQUENCY_ATTR_PSD = 0x1c
NL80211_FREQUENCY_ATTR_RADAR = 0x5
NL80211_FREQUENCY_ATTR_WMM = 0x12
NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3
@@ -5430,6 +5564,7 @@ const (
NL80211_IFTYPE_STATION = 0x2
NL80211_IFTYPE_UNSPECIFIED = 0x0
NL80211_IFTYPE_WDS = 0x5
+ NL80211_KCK_EXT_LEN_32 = 0x20
NL80211_KCK_EXT_LEN = 0x18
NL80211_KCK_LEN = 0x10
NL80211_KEK_EXT_LEN = 0x20
@@ -5458,9 +5593,10 @@ const (
NL80211_MAX_SUPP_HT_RATES = 0x4d
NL80211_MAX_SUPP_RATES = 0x20
NL80211_MAX_SUPP_REG_RULES = 0x80
+ NL80211_MAX_SUPP_SELECTORS = 0x80
NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5
NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3
- NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5
+ NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6
NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2
NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1
NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4
@@ -5703,11 +5839,16 @@ const (
NL80211_RADAR_PRE_CAC_EXPIRED = 0x4
NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb
NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa
+ NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d
+ NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19
+ NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a
NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12
NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3
+ NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b
NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc
NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8
NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9
+ NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c
NL80211_RATE_INFO_BITRATE32 = 0x5
NL80211_RATE_INFO_BITRATE = 0x1
NL80211_RATE_INFO_EHT_GI_0_8 = 0x0
@@ -5753,6 +5894,8 @@ const (
NL80211_RATE_INFO_HE_RU_ALLOC = 0x11
NL80211_RATE_INFO_MAX = 0x1d
NL80211_RATE_INFO_MCS = 0x2
+ NL80211_RATE_INFO_S1G_MCS = 0x17
+ NL80211_RATE_INFO_S1G_NSS = 0x18
NL80211_RATE_INFO_SHORT_GI = 0x4
NL80211_RATE_INFO_VHT_MCS = 0x6
NL80211_RATE_INFO_VHT_NSS = 0x7
@@ -5770,14 +5913,19 @@ const (
NL80211_REKEY_DATA_KEK = 0x1
NL80211_REKEY_DATA_REPLAY_CTR = 0x3
NL80211_REPLAY_CTR_LEN = 0x8
+ NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000
NL80211_RRF_AUTO_BW = 0x800
NL80211_RRF_DFS = 0x10
+ NL80211_RRF_DFS_CONCURRENT = 0x200000
NL80211_RRF_GO_CONCURRENT = 0x1000
NL80211_RRF_IR_CONCURRENT = 0x1000
NL80211_RRF_NO_160MHZ = 0x10000
NL80211_RRF_NO_320MHZ = 0x40000
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000
NL80211_RRF_NO_80MHZ = 0x8000
NL80211_RRF_NO_CCK = 0x2
+ NL80211_RRF_NO_EHT = 0x80000
NL80211_RRF_NO_HE = 0x20000
NL80211_RRF_NO_HT40 = 0x6000
NL80211_RRF_NO_HT40MINUS = 0x2000
@@ -5788,7 +5936,10 @@ const (
NL80211_RRF_NO_IR = 0x80
NL80211_RRF_NO_OFDM = 0x1
NL80211_RRF_NO_OUTDOOR = 0x8
+ NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000
+ NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000
NL80211_RRF_PASSIVE_SCAN = 0x80
+ NL80211_RRF_PSD = 0x100000
NL80211_RRF_PTMP_ONLY = 0x40
NL80211_RRF_PTP_ONLY = 0x20
NL80211_RXMGMT_FLAG_ANSWERED = 0x1
@@ -5849,6 +6000,7 @@ const (
NL80211_STA_FLAG_MAX_OLD_API = 0x6
NL80211_STA_FLAG_MFP = 0x4
NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2
+ NL80211_STA_FLAG_SPP_AMSDU = 0x8
NL80211_STA_FLAG_TDLS_PEER = 0x6
NL80211_STA_FLAG_WME = 0x3
NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23
@@ -6007,6 +6159,13 @@ const (
NL80211_VHT_CAPABILITY_LEN = 0xc
NL80211_VHT_NSS_MAX = 0x8
NL80211_WIPHY_NAME_MAXLEN = 0x40
+ NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2
+ NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1
+ NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3
+ NL80211_WIPHY_RADIO_ATTR_MAX = 0x4
+ NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2
+ NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2
+ NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1
NL80211_WMMR_AIFSN = 0x3
NL80211_WMMR_CW_MAX = 0x2
NL80211_WMMR_CW_MIN = 0x1
@@ -6038,6 +6197,7 @@ const (
NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4
NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9
NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14
NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa
NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb
NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc
@@ -6176,3 +6336,30 @@ type SockDiagReq struct {
}
const RTM_NEWNVLAN = 0x70
+
+const (
+ MPOL_BIND = 0x2
+ MPOL_DEFAULT = 0x0
+ MPOL_F_ADDR = 0x2
+ MPOL_F_MEMS_ALLOWED = 0x4
+ MPOL_F_MOF = 0x8
+ MPOL_F_MORON = 0x10
+ MPOL_F_NODE = 0x1
+ MPOL_F_NUMA_BALANCING = 0x2000
+ MPOL_F_RELATIVE_NODES = 0x4000
+ MPOL_F_SHARED = 0x1
+ MPOL_F_STATIC_NODES = 0x8000
+ MPOL_INTERLEAVE = 0x3
+ MPOL_LOCAL = 0x4
+ MPOL_MAX = 0x7
+ MPOL_MF_INTERNAL = 0x10
+ MPOL_MF_LAZY = 0x8
+ MPOL_MF_MOVE_ALL = 0x4
+ MPOL_MF_MOVE = 0x2
+ MPOL_MF_STRICT = 0x1
+ MPOL_MF_VALID = 0x7
+ MPOL_MODE_FLAGS = 0xe000
+ MPOL_PREFERRED = 0x1
+ MPOL_PREFERRED_MANY = 0x5
+ MPOL_WEIGHTED_INTERLEAVE = 0x6
+)
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index fd402da4..485f2d3a 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -282,7 +282,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -338,6 +338,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index eb7a5e18..ecbd1ad8 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -351,6 +351,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index d78ac108..02f0463a 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -91,7 +91,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -273,7 +273,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -329,6 +329,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index cd06d47f..6f4d400d 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -330,6 +330,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 2f28fe26..cd532cfa 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -331,6 +331,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 71d6cac2..41336208 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -278,7 +278,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -334,6 +334,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 8596d453..eaa37eb7 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -333,6 +333,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index cd60ea18..98ae6a1e 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -333,6 +333,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index b0ae420c..cae19615 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -278,7 +278,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -334,6 +334,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 83597287..6ce3b4e0 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -90,7 +90,7 @@ type Stat_t struct {
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -285,7 +285,7 @@ type Taskstats struct {
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -341,6 +341,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 69eb6a5c..c7429c6a 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -340,6 +340,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 5f583cb6..4bf4baf4 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -340,6 +340,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index ad05b51a..e9709d70 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -358,6 +358,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index cf3ce900..fb44268c 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -353,6 +353,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 590b5673..9c38265c 100644
--- a/operator/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/operator/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -335,6 +335,22 @@ type Taskstats struct {
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/operator/vendor/golang.org/x/sys/windows/security_windows.go b/operator/vendor/golang.org/x/sys/windows/security_windows.go
index b6e1ab76..a8b0364c 100644
--- a/operator/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/operator/vendor/golang.org/x/sys/windows/security_windows.go
@@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
return nil, err
}
if absoluteSDSize > 0 {
- absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0]))
+ absoluteSD = new(SECURITY_DESCRIPTOR)
+ if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) {
+ panic("sizeof(SECURITY_DESCRIPTOR) too small")
+ }
}
var (
dacl *ACL
@@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
group *SID
)
if daclSize > 0 {
- dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0]))
+ dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize))))
}
if saclSize > 0 {
- sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0]))
+ sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize))))
}
if ownerSize > 0 {
- owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0]))
+ owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize))))
}
if groupSize > 0 {
- group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0]))
+ group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize))))
}
+ // We call into Windows via makeAbsoluteSD, which sets up
+ // pointers within absoluteSD that point to other chunks of memory
+ // we pass into makeAbsoluteSD, and that happens outside the view of the GC.
+ // We therefore take some care here to then verify the pointers are as we expect
+ // and set them explicitly in view of the GC. See https://go.dev/issue/73199.
+ // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575.
err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize,
dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize)
+ if err != nil {
+ // Don't return absoluteSD, which might be partially initialized.
+ return nil, err
+ }
+ // Before using any fields, verify absoluteSD is in the format we expect according to Windows.
+ // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors
+ absControl, _, err := absoluteSD.Control()
+ if err != nil {
+ panic("absoluteSD: " + err.Error())
+ }
+ if absControl&SE_SELF_RELATIVE != 0 {
+ panic("absoluteSD not in absolute format")
+ }
+ if absoluteSD.dacl != dacl {
+ panic("dacl pointer mismatch")
+ }
+ if absoluteSD.sacl != sacl {
+ panic("sacl pointer mismatch")
+ }
+ if absoluteSD.owner != owner {
+ panic("owner pointer mismatch")
+ }
+ if absoluteSD.group != group {
+ panic("group pointer mismatch")
+ }
+ absoluteSD.dacl = dacl
+ absoluteSD.sacl = sacl
+ absoluteSD.owner = owner
+ absoluteSD.group = group
+
return
}
diff --git a/operator/vendor/golang.org/x/sys/windows/syscall_windows.go b/operator/vendor/golang.org/x/sys/windows/syscall_windows.go
index 4a325438..69439df2 100644
--- a/operator/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/operator/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
+//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
@@ -870,6 +872,7 @@ const socket_error = uintptr(^uint32(0))
//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW
+//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW
//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
@@ -889,8 +892,12 @@ const socket_error = uintptr(^uint32(0))
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2
+//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2
//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable
//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2
//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
@@ -913,6 +920,17 @@ type RawSockaddrInet6 struct {
Scope_id uint32
}
+// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See
+// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet.
+//
+// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using
+// unsafe, depending on the address family.
+type RawSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Data [6]uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -1698,8 +1716,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
func (s *NTUnicodeString) Slice() []uint16 {
- slice := unsafe.Slice(s.Buffer, s.MaximumLength)
- return slice[:s.Length]
+ // Note: this rounds the length down, if it happens
+ // to (incorrectly) be odd. Probably safer than rounding up.
+ return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2]
}
func (s *NTUnicodeString) String() string {
diff --git a/operator/vendor/golang.org/x/sys/windows/types_windows.go b/operator/vendor/golang.org/x/sys/windows/types_windows.go
index 9d138de5..6e4f50eb 100644
--- a/operator/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/operator/vendor/golang.org/x/sys/windows/types_windows.go
@@ -65,6 +65,22 @@ var signals = [...]string{
15: "terminated",
}
+// File flags for [os.OpenFile]. The O_ prefix is used to indicate
+// that these flags are specific to the OpenFile function.
+const (
+ O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
+ O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
+ O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
+ O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
+ O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
+ O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
+ O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
+ O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
+ O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
+ O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
+ O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
+)
+
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080
@@ -1074,6 +1090,7 @@ const (
IP_ADD_MEMBERSHIP = 0xc
IP_DROP_MEMBERSHIP = 0xd
IP_PKTINFO = 0x13
+ IP_MTU_DISCOVER = 0x47
IPV6_V6ONLY = 0x1b
IPV6_UNICAST_HOPS = 0x4
@@ -1083,6 +1100,7 @@ const (
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_PKTINFO = 0x13
+ IPV6_MTU_DISCOVER = 0x47
MSG_OOB = 0x1
MSG_PEEK = 0x2
@@ -1132,6 +1150,15 @@ const (
WSASYS_STATUS_LEN = 128
)
+// enum PMTUD_STATE from ws2ipdef.h
+const (
+ IP_PMTUDISC_NOT_SET = 0
+ IP_PMTUDISC_DO = 1
+ IP_PMTUDISC_DONT = 2
+ IP_PMTUDISC_PROBE = 3
+ IP_PMTUDISC_MAX = 4
+)
+
type WSABuf struct {
Len uint32
Buf *byte
@@ -1146,6 +1173,22 @@ type WSAMsg struct {
Flags uint32
}
+type WSACMSGHDR struct {
+ Len uintptr
+ Level int32
+ Type int32
+}
+
+type IN_PKTINFO struct {
+ Addr [4]byte
+ Ifindex uint32
+}
+
+type IN6_PKTINFO struct {
+ Addr [16]byte
+ Ifindex uint32
+}
+
// Flags for WSASocket
const (
WSA_FLAG_OVERLAPPED = 0x01
@@ -1949,6 +1992,12 @@ const (
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
)
+// FILE_ZERO_DATA_INFORMATION from winioctl.h
+type FileZeroDataInformation struct {
+ FileOffset int64
+ BeyondFinalZero int64
+}
+
const (
ComputerNameNetBIOS = 0
ComputerNameDnsHostname = 1
@@ -2271,6 +2320,82 @@ type MibIfRow2 struct {
OutQLen uint64
}
+// IP_ADDRESS_PREFIX stores an IP address prefix. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix.
+type IpAddressPrefix struct {
+ Prefix RawSockaddrInet
+ PrefixLength uint8
+}
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin.
+const (
+ NlroManual = 0
+ NlroWellKnown = 1
+ NlroDHCP = 2
+ NlroRouterAdvertisement = 3
+ Nlro6to4 = 4
+)
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol.
+const (
+ MIB_IPPROTO_OTHER = 1
+ MIB_IPPROTO_LOCAL = 2
+ MIB_IPPROTO_NETMGMT = 3
+ MIB_IPPROTO_ICMP = 4
+ MIB_IPPROTO_EGP = 5
+ MIB_IPPROTO_GGP = 6
+ MIB_IPPROTO_HELLO = 7
+ MIB_IPPROTO_RIP = 8
+ MIB_IPPROTO_IS_IS = 9
+ MIB_IPPROTO_ES_IS = 10
+ MIB_IPPROTO_CISCO = 11
+ MIB_IPPROTO_BBN = 12
+ MIB_IPPROTO_OSPF = 13
+ MIB_IPPROTO_BGP = 14
+ MIB_IPPROTO_IDPR = 15
+ MIB_IPPROTO_EIGRP = 16
+ MIB_IPPROTO_DVMRP = 17
+ MIB_IPPROTO_RPL = 18
+ MIB_IPPROTO_DHCP = 19
+ MIB_IPPROTO_NT_AUTOSTATIC = 10002
+ MIB_IPPROTO_NT_STATIC = 10006
+ MIB_IPPROTO_NT_STATIC_NON_DOD = 10007
+)
+
+// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2.
+type MibIpForwardRow2 struct {
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ DestinationPrefix IpAddressPrefix
+ NextHop RawSockaddrInet
+ SitePrefixLength uint8
+ ValidLifetime uint32
+ PreferredLifetime uint32
+ Metric uint32
+ Protocol uint32
+ Loopback uint8
+ AutoconfigureAddress uint8
+ Publish uint8
+ Immortal uint8
+ Age uint32
+ Origin uint32
+}
+
+// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2.
+type MibIpForwardTable2 struct {
+ NumEntries uint32
+ Table [1]MibIpForwardRow2
+}
+
+// Rows returns the IP route entries in the table.
+func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 {
+ return unsafe.Slice(&t.Table[0], t.NumEntries)
+}
+
// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
type MibUnicastIpAddressRow struct {
@@ -2673,6 +2798,8 @@ type CommTimeouts struct {
// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING.
type NTUnicodeString struct {
+ // Note: Length and MaximumLength are in *bytes*, not uint16s.
+ // They should always be even.
Length uint16
MaximumLength uint16
Buffer *uint16
@@ -3601,3 +3728,213 @@ const (
KLF_NOTELLSHELL = 0x00000080
KLF_SETFORPROCESS = 0x00000100
)
+
+// Virtual Key codes
+// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
+const (
+ VK_LBUTTON = 0x01
+ VK_RBUTTON = 0x02
+ VK_CANCEL = 0x03
+ VK_MBUTTON = 0x04
+ VK_XBUTTON1 = 0x05
+ VK_XBUTTON2 = 0x06
+ VK_BACK = 0x08
+ VK_TAB = 0x09
+ VK_CLEAR = 0x0C
+ VK_RETURN = 0x0D
+ VK_SHIFT = 0x10
+ VK_CONTROL = 0x11
+ VK_MENU = 0x12
+ VK_PAUSE = 0x13
+ VK_CAPITAL = 0x14
+ VK_KANA = 0x15
+ VK_HANGEUL = 0x15
+ VK_HANGUL = 0x15
+ VK_IME_ON = 0x16
+ VK_JUNJA = 0x17
+ VK_FINAL = 0x18
+ VK_HANJA = 0x19
+ VK_KANJI = 0x19
+ VK_IME_OFF = 0x1A
+ VK_ESCAPE = 0x1B
+ VK_CONVERT = 0x1C
+ VK_NONCONVERT = 0x1D
+ VK_ACCEPT = 0x1E
+ VK_MODECHANGE = 0x1F
+ VK_SPACE = 0x20
+ VK_PRIOR = 0x21
+ VK_NEXT = 0x22
+ VK_END = 0x23
+ VK_HOME = 0x24
+ VK_LEFT = 0x25
+ VK_UP = 0x26
+ VK_RIGHT = 0x27
+ VK_DOWN = 0x28
+ VK_SELECT = 0x29
+ VK_PRINT = 0x2A
+ VK_EXECUTE = 0x2B
+ VK_SNAPSHOT = 0x2C
+ VK_INSERT = 0x2D
+ VK_DELETE = 0x2E
+ VK_HELP = 0x2F
+ VK_LWIN = 0x5B
+ VK_RWIN = 0x5C
+ VK_APPS = 0x5D
+ VK_SLEEP = 0x5F
+ VK_NUMPAD0 = 0x60
+ VK_NUMPAD1 = 0x61
+ VK_NUMPAD2 = 0x62
+ VK_NUMPAD3 = 0x63
+ VK_NUMPAD4 = 0x64
+ VK_NUMPAD5 = 0x65
+ VK_NUMPAD6 = 0x66
+ VK_NUMPAD7 = 0x67
+ VK_NUMPAD8 = 0x68
+ VK_NUMPAD9 = 0x69
+ VK_MULTIPLY = 0x6A
+ VK_ADD = 0x6B
+ VK_SEPARATOR = 0x6C
+ VK_SUBTRACT = 0x6D
+ VK_DECIMAL = 0x6E
+ VK_DIVIDE = 0x6F
+ VK_F1 = 0x70
+ VK_F2 = 0x71
+ VK_F3 = 0x72
+ VK_F4 = 0x73
+ VK_F5 = 0x74
+ VK_F6 = 0x75
+ VK_F7 = 0x76
+ VK_F8 = 0x77
+ VK_F9 = 0x78
+ VK_F10 = 0x79
+ VK_F11 = 0x7A
+ VK_F12 = 0x7B
+ VK_F13 = 0x7C
+ VK_F14 = 0x7D
+ VK_F15 = 0x7E
+ VK_F16 = 0x7F
+ VK_F17 = 0x80
+ VK_F18 = 0x81
+ VK_F19 = 0x82
+ VK_F20 = 0x83
+ VK_F21 = 0x84
+ VK_F22 = 0x85
+ VK_F23 = 0x86
+ VK_F24 = 0x87
+ VK_NUMLOCK = 0x90
+ VK_SCROLL = 0x91
+ VK_OEM_NEC_EQUAL = 0x92
+ VK_OEM_FJ_JISHO = 0x92
+ VK_OEM_FJ_MASSHOU = 0x93
+ VK_OEM_FJ_TOUROKU = 0x94
+ VK_OEM_FJ_LOYA = 0x95
+ VK_OEM_FJ_ROYA = 0x96
+ VK_LSHIFT = 0xA0
+ VK_RSHIFT = 0xA1
+ VK_LCONTROL = 0xA2
+ VK_RCONTROL = 0xA3
+ VK_LMENU = 0xA4
+ VK_RMENU = 0xA5
+ VK_BROWSER_BACK = 0xA6
+ VK_BROWSER_FORWARD = 0xA7
+ VK_BROWSER_REFRESH = 0xA8
+ VK_BROWSER_STOP = 0xA9
+ VK_BROWSER_SEARCH = 0xAA
+ VK_BROWSER_FAVORITES = 0xAB
+ VK_BROWSER_HOME = 0xAC
+ VK_VOLUME_MUTE = 0xAD
+ VK_VOLUME_DOWN = 0xAE
+ VK_VOLUME_UP = 0xAF
+ VK_MEDIA_NEXT_TRACK = 0xB0
+ VK_MEDIA_PREV_TRACK = 0xB1
+ VK_MEDIA_STOP = 0xB2
+ VK_MEDIA_PLAY_PAUSE = 0xB3
+ VK_LAUNCH_MAIL = 0xB4
+ VK_LAUNCH_MEDIA_SELECT = 0xB5
+ VK_LAUNCH_APP1 = 0xB6
+ VK_LAUNCH_APP2 = 0xB7
+ VK_OEM_1 = 0xBA
+ VK_OEM_PLUS = 0xBB
+ VK_OEM_COMMA = 0xBC
+ VK_OEM_MINUS = 0xBD
+ VK_OEM_PERIOD = 0xBE
+ VK_OEM_2 = 0xBF
+ VK_OEM_3 = 0xC0
+ VK_OEM_4 = 0xDB
+ VK_OEM_5 = 0xDC
+ VK_OEM_6 = 0xDD
+ VK_OEM_7 = 0xDE
+ VK_OEM_8 = 0xDF
+ VK_OEM_AX = 0xE1
+ VK_OEM_102 = 0xE2
+ VK_ICO_HELP = 0xE3
+ VK_ICO_00 = 0xE4
+ VK_PROCESSKEY = 0xE5
+ VK_ICO_CLEAR = 0xE6
+ VK_OEM_RESET = 0xE9
+ VK_OEM_JUMP = 0xEA
+ VK_OEM_PA1 = 0xEB
+ VK_OEM_PA2 = 0xEC
+ VK_OEM_PA3 = 0xED
+ VK_OEM_WSCTRL = 0xEE
+ VK_OEM_CUSEL = 0xEF
+ VK_OEM_ATTN = 0xF0
+ VK_OEM_FINISH = 0xF1
+ VK_OEM_COPY = 0xF2
+ VK_OEM_AUTO = 0xF3
+ VK_OEM_ENLW = 0xF4
+ VK_OEM_BACKTAB = 0xF5
+ VK_ATTN = 0xF6
+ VK_CRSEL = 0xF7
+ VK_EXSEL = 0xF8
+ VK_EREOF = 0xF9
+ VK_PLAY = 0xFA
+ VK_ZOOM = 0xFB
+ VK_NONAME = 0xFC
+ VK_PA1 = 0xFD
+ VK_OEM_CLEAR = 0xFE
+)
+
+// Mouse button constants.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001
+ RIGHTMOST_BUTTON_PRESSED = 0x0002
+ FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004
+ FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008
+ FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010
+)
+
+// Control key state constaints.
+// https://docs.microsoft.com/en-us/windows/console/key-event-record-str
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ CAPSLOCK_ON = 0x0080
+ ENHANCED_KEY = 0x0100
+ LEFT_ALT_PRESSED = 0x0002
+ LEFT_CTRL_PRESSED = 0x0008
+ NUMLOCK_ON = 0x0020
+ RIGHT_ALT_PRESSED = 0x0001
+ RIGHT_CTRL_PRESSED = 0x0004
+ SCROLLLOCK_ON = 0x0040
+ SHIFT_PRESSED = 0x0010
+)
+
+// Mouse event record event flags.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ MOUSE_MOVED = 0x0001
+ DOUBLE_CLICK = 0x0002
+ MOUSE_WHEELED = 0x0004
+ MOUSE_HWHEELED = 0x0008
+)
+
+// Input Record Event Types
+// https://learn.microsoft.com/en-us/windows/console/input-record-str
+const (
+ FOCUS_EVENT = 0x0010
+ KEY_EVENT = 0x0001
+ MENU_EVENT = 0x0008
+ MOUSE_EVENT = 0x0002
+ WINDOW_BUFFER_SIZE_EVENT = 0x0004
+)
diff --git a/operator/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/operator/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 01c0716c..f25b7308 100644
--- a/operator/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/operator/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -182,13 +182,17 @@ var (
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
+ procFreeMibTable = modiphlpapi.NewProc("FreeMibTable")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
+ procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2")
+ procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2")
procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+ procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2")
procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
@@ -238,6 +242,7 @@ var (
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
+ procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@@ -284,6 +289,7 @@ var (
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
+ procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@@ -511,6 +517,7 @@ var (
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
procWSACleanup = modws2_32.NewProc("WSACleanup")
+ procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW")
procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procWSAIoctl = modws2_32.NewProc("WSAIoctl")
@@ -545,25 +552,25 @@ var (
)
func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) {
- r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0)
+ r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error))
ret = Errno(r0)
return
}
@@ -573,7 +580,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups,
if resetToDefault {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -585,7 +592,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok
if disableAllPrivileges {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -593,7 +600,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok
}
func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -601,7 +608,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s
}
func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
+ r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -609,7 +616,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries
}
func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -617,7 +624,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err
}
func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0)
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -625,7 +632,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e
}
func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
+ r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -633,7 +640,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (
}
func CloseServiceHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -641,7 +648,7 @@ func CloseServiceHandle(handle Handle) (err error) {
}
func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
+ r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -649,7 +656,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -657,7 +664,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR
}
func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -674,7 +681,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -682,7 +689,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision
}
func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -690,7 +697,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
}
func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
+ r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -702,7 +709,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -710,7 +717,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc
}
func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -719,7 +726,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access
}
func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -727,7 +734,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s
}
func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -735,7 +742,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16
}
func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+ r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -743,7 +750,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
}
func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -751,7 +758,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
}
func DeleteService(service Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -759,7 +766,7 @@ func DeleteService(service Handle) (err error) {
}
func DeregisterEventSource(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -767,7 +774,7 @@ func DeregisterEventSource(handle Handle) (err error) {
}
func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) {
- r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
+ r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -775,7 +782,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes
}
func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -783,7 +790,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_
}
func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -791,13 +798,13 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv
}
func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
- r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0)
+ r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)))
isEqual = r0 != 0
return
}
func FreeSid(sid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid)))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -805,7 +812,7 @@ func FreeSid(sid *SID) (err error) {
}
func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
- r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -813,7 +820,7 @@ func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
}
func GetLengthSid(sid *SID) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid)))
len = uint32(r0)
return
}
@@ -828,7 +835,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security
}
func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -836,7 +843,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi
}
func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -852,7 +859,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl
if *daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)))
*daclPresent = _p0 != 0
*daclDefaulted = _p1 != 0
if r1 == 0 {
@@ -866,7 +873,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau
if *groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
*groupDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -875,7 +882,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau
}
func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd)))
len = uint32(r0)
return
}
@@ -885,7 +892,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau
if *ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
*ownerDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -894,7 +901,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau
}
func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -910,7 +917,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl
if *saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)))
*saclPresent = _p0 != 0
*saclDefaulted = _p1 != 0
if r1 == 0 {
@@ -920,7 +927,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl
}
func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -928,25 +935,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati
}
func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) {
- r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid)))
authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0))
return
}
func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index))
subAuthority = (*uint32)(unsafe.Pointer(r0))
return
}
func getSidSubAuthorityCount(sid *SID) (count *uint8) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid)))
count = (*uint8)(unsafe.Pointer(r0))
return
}
func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -954,7 +961,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint
}
func ImpersonateSelf(impersonationlevel uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -962,7 +969,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) {
}
func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -978,7 +985,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint
if rebootAfterShutdown {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
+ r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -986,7 +993,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint
}
func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
- r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle))
ret = r0 != 0
if !ret {
err = errnoErr(e1)
@@ -995,25 +1002,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
}
func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd)))
isValid = r0 != 0
return
}
func isValidSid(sid *SID) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid)))
isValid = r0 != 0
return
}
func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) {
- r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0)
+ r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType))
isWellKnown = r0 != 0
return
}
func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1021,7 +1028,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen
}
func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1029,7 +1036,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3
}
func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
- r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1037,7 +1044,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err
}
func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1045,7 +1052,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE
}
func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
+ r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1053,7 +1060,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT
}
func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) {
- r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
+ r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1061,7 +1068,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV
}
func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
+ r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1069,7 +1076,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
}
func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1078,7 +1085,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha
}
func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1091,7 +1098,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token
if openAsSelf {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1099,7 +1106,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token
}
func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1107,7 +1114,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize
}
func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1119,7 +1126,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
+ r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1127,7 +1134,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf
}
func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1135,7 +1142,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b
}
func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1143,7 +1150,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
}
func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1151,7 +1158,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize
}
func RegCloseKey(key Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1159,7 +1166,7 @@ func RegCloseKey(key Handle) (regerrno error) {
}
func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0)
+ r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1175,7 +1182,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32,
if asynchronous {
_p1 = 1
}
- r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0)
+ r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1183,7 +1190,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32,
}
func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1191,7 +1198,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint
}
func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+ r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1199,7 +1206,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint
}
func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+ r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1207,7 +1214,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32
}
func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0)
+ r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1216,7 +1223,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand
}
func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
+ r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1225,7 +1232,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont
}
func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
+ r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1233,7 +1240,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS
}
func RevertToSelf() (err error) {
- r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1241,7 +1248,7 @@ func RevertToSelf() (err error) {
}
func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) {
- r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1249,7 +1256,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE
}
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
- r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
+ r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1266,7 +1273,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security
}
func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1274,7 +1281,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi
}
func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) {
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1290,7 +1297,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *
if daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1302,7 +1309,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul
if groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1314,7 +1321,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul
if ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1322,7 +1329,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul
}
func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) {
- syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
return
}
@@ -1335,7 +1342,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *
if saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1343,7 +1350,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *
}
func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1351,7 +1358,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati
}
func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1359,7 +1366,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error)
}
func SetThreadToken(thread *Handle, token Token) (err error) {
- r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0)
+ r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1367,7 +1374,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) {
}
func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1375,7 +1382,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint
}
func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1383,7 +1390,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
}
func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
+ r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1391,7 +1398,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro
}
func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1399,7 +1406,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad
}
func CertCloseStore(store Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1407,7 +1414,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) {
}
func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
+ r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1416,7 +1423,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en
}
func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1424,13 +1431,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
}
func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) {
- r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext)))
dupContext = (*CertContext)(unsafe.Pointer(r0))
return
}
func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext)))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1439,7 +1446,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex
}
func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
cert = (*CertContext)(unsafe.Pointer(r0))
if cert == nil {
err = errnoErr(e1)
@@ -1448,7 +1455,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags
}
func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
certchain = (*CertChainContext)(unsafe.Pointer(r0))
if certchain == nil {
err = errnoErr(e1)
@@ -1457,18 +1464,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3
}
func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) {
- r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
+ r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
ret = (*CertExtension)(unsafe.Pointer(r0))
return
}
func CertFreeCertificateChain(ctx *CertChainContext) {
- syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx)))
return
}
func CertFreeCertificateContext(ctx *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1476,7 +1483,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) {
}
func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) {
- r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0)
+ r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1484,13 +1491,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a
}
func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) {
- r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
+ r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
chars = uint32(r0)
return
}
func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1499,7 +1506,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr
}
func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name)))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1508,7 +1515,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
}
func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1520,7 +1527,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete
if *callerFreeProvOrNCryptKey {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
*callerFreeProvOrNCryptKey = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -1529,7 +1536,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete
}
func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1537,7 +1544,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte
}
func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1545,7 +1552,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob,
}
func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1553,7 +1560,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT
}
func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1561,7 +1568,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl
}
func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1570,7 +1577,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto
}
func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) {
- r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0)
+ r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)))
same = r0 != 0
return
}
@@ -1585,7 +1592,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR
}
func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
- r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
+ r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
if r0 != 0 {
status = syscall.Errno(r0)
}
@@ -1593,12 +1600,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN
}
func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
- syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0)
+ syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype))
return
}
func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1606,7 +1613,7 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
}
func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1614,15 +1621,20 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
}
func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
- r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
+func FreeMibTable(memory unsafe.Pointer) {
+ syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory))
+ return
+}
+
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1630,7 +1642,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter
}
func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1638,7 +1650,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
}
func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0)
+ r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1646,7 +1658,7 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod
}
func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1654,7 +1666,23 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
}
func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1662,7 +1690,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
}
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1674,7 +1702,19 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+ var _p0 uint32
+ if initialNotification {
+ _p0 = 1
+ }
+ r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1686,7 +1726,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1694,7 +1734,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext
}
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path)))
cookie = uintptr(r0)
if cookie == 0 {
err = errnoErr(e1)
@@ -1703,7 +1743,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
}
func AssignProcessToJobObject(job Handle, process Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
+ r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1711,7 +1751,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) {
}
func CancelIo(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1719,7 +1759,7 @@ func CancelIo(s Handle) (err error) {
}
func CancelIoEx(s Handle, o *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1727,7 +1767,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) {
}
func ClearCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1735,7 +1775,7 @@ func ClearCommBreak(handle Handle) (err error) {
}
func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
+ r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1743,7 +1783,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error
}
func CloseHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1751,12 +1791,12 @@ func CloseHandle(handle Handle) (err error) {
}
func ClosePseudoConsole(console Handle) {
- syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0)
+ syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console))
return
}
func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1764,7 +1804,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
}
func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1772,7 +1812,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
}
func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1781,7 +1821,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d
}
func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1790,7 +1830,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat
}
func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1799,7 +1839,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS
}
func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1808,7 +1848,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes
}
func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1816,7 +1856,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr
}
func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1825,7 +1865,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr
}
func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1834,7 +1874,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle,
}
func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1847,7 +1887,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16
if initialOwner {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1856,7 +1896,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16
}
func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1865,7 +1905,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u
}
func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1877,7 +1917,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1885,7 +1925,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA
}
func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) {
- r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0)
+ r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -1893,7 +1933,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons
}
func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1901,7 +1941,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u
}
func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1910,7 +1950,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er
}
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
+ r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1918,7 +1958,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err
}
func DeleteFile(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1926,12 +1966,12 @@ func DeleteFile(path *uint16) (err error) {
}
func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) {
- syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0)
+ syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)))
return
}
func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1939,7 +1979,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
}
func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1947,7 +1987,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff
}
func DisconnectNamedPipe(pipe Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1959,7 +1999,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP
if bInheritHandle {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1967,7 +2007,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP
}
func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0)
+ r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1975,12 +2015,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
}
func ExitProcess(exitcode uint32) {
- syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0)
+ syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode))
return
}
func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -1989,7 +2029,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32,
}
func FindClose(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1997,7 +2037,7 @@ func FindClose(handle Handle) (err error) {
}
func FindCloseChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2018,7 +2058,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter
if watchSubtree {
_p1 = 1
}
- r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
+ r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2027,7 +2067,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter
}
func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2036,7 +2076,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro
}
func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2045,7 +2085,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b
}
func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2054,7 +2094,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er
}
func FindNextChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2062,7 +2102,7 @@ func FindNextChangeNotification(handle Handle) (err error) {
}
func findNextFile1(handle Handle, data *win32finddata1) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2070,7 +2110,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) {
}
func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2078,7 +2118,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin
}
func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2086,7 +2126,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32)
}
func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType))
+ r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType))
resInfo = Handle(r0)
if resInfo == 0 {
err = errnoErr(e1)
@@ -2095,7 +2135,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle,
}
func FindVolumeClose(findVolume Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2103,7 +2143,15 @@ func FindVolumeClose(findVolume Handle) (err error) {
}
func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func FlushConsoleInputBuffer(console Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2111,7 +2159,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
}
func FlushFileBuffers(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2119,7 +2167,7 @@ func FlushFileBuffers(handle Handle) (err error) {
}
func FlushViewOfFile(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2131,7 +2179,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2140,7 +2188,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu
}
func FreeEnvironmentStrings(envs *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2148,7 +2196,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) {
}
func FreeLibrary(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2156,7 +2204,7 @@ func FreeLibrary(handle Handle) (err error) {
}
func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0)
+ r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2164,19 +2212,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro
}
func GetACP() (acp uint32) {
- r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetACP.Addr())
acp = uint32(r0)
return
}
func GetActiveProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2184,7 +2232,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
}
func GetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2192,7 +2240,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) {
}
func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2200,13 +2248,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
}
func GetCommandLine() (cmd *uint16) {
- r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr())
cmd = (*uint16)(unsafe.Pointer(r0))
return
}
func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2214,7 +2262,7 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
}
func GetComputerName(buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2222,7 +2270,7 @@ func GetComputerName(buf *uint16, n *uint32) (err error) {
}
func GetConsoleCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2231,7 +2279,7 @@ func GetConsoleCP() (cp uint32, err error) {
}
func GetConsoleMode(console Handle, mode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2239,7 +2287,7 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) {
}
func GetConsoleOutputCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2248,7 +2296,7 @@ func GetConsoleOutputCP() (cp uint32, err error) {
}
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2256,7 +2304,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (
}
func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2265,19 +2313,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
}
func GetCurrentProcessId() (pid uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr())
pid = uint32(r0)
return
}
func GetCurrentThreadId() (id uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr())
id = uint32(r0)
return
}
func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2285,13 +2333,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6
}
func GetDriveType(rootPathName *uint16) (driveType uint32) {
- r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName)))
driveType = uint32(r0)
return
}
func GetEnvironmentStrings() (envs *uint16, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr())
envs = (*uint16)(unsafe.Pointer(r0))
if envs == nil {
err = errnoErr(e1)
@@ -2300,7 +2348,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) {
}
func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2309,7 +2357,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32
}
func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2317,7 +2365,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
}
func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2325,7 +2373,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
}
func GetFileAttributes(name *uint16) (attrs uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)))
attrs = uint32(r0)
if attrs == INVALID_FILE_ATTRIBUTES {
err = errnoErr(e1)
@@ -2334,7 +2382,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) {
}
func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2342,7 +2390,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e
}
func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2350,7 +2398,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte,
}
func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2358,7 +2406,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim
}
func GetFileType(filehandle Handle) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2367,7 +2415,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) {
}
func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2376,7 +2424,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32
}
func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2385,13 +2433,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (
}
func GetLargePageMinimum() (size uintptr) {
- r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr())
size = uintptr(r0)
return
}
func GetLastError() (lasterr error) {
- r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLastError.Addr())
if r0 != 0 {
lasterr = syscall.Errno(r0)
}
@@ -2399,7 +2447,7 @@ func GetLastError() (lasterr error) {
}
func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2408,7 +2456,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err
}
func GetLogicalDrives() (drivesBitMask uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr())
drivesBitMask = uint32(r0)
if drivesBitMask == 0 {
err = errnoErr(e1)
@@ -2417,7 +2465,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) {
}
func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2426,13 +2474,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er
}
func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2441,7 +2489,7 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32,
}
func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
+ r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2449,7 +2497,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er
}
func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2457,7 +2505,7 @@ func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err erro
}
func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2465,7 +2513,7 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m
}
func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2473,7 +2521,15 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3
}
func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2485,7 +2541,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2493,7 +2549,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa
}
func GetPriorityClass(process Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process))
ret = uint32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -2511,7 +2567,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) {
}
func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname)))
proc = uintptr(r0)
if proc == 0 {
err = errnoErr(e1)
@@ -2520,7 +2576,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
}
func GetProcessId(process Handle) (id uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process))
id = uint32(r0)
if id == 0 {
err = errnoErr(e1)
@@ -2529,7 +2585,7 @@ func GetProcessId(process Handle) (id uint32, err error) {
}
func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2537,7 +2593,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin
}
func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2545,7 +2601,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
}
func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2553,12 +2609,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime,
}
func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
- syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
+ syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)))
return
}
func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
+ r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2566,7 +2622,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl
}
func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2575,12 +2631,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin
}
func getStartupInfo(startupInfo *StartupInfo) {
- syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
+ syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo)))
return
}
func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2589,7 +2645,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
}
func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2598,7 +2654,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
}
func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2606,17 +2662,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint
}
func GetSystemTimeAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func GetSystemTimePreciseAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2625,7 +2681,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro
}
func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2634,7 +2690,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
}
func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2642,13 +2698,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint
}
func getTickCount64() (ms uint64) {
- r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr())
ms = uint64(r0)
return
}
func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi)))
rc = uint32(r0)
if rc == 0xffffffff {
err = errnoErr(e1)
@@ -2657,7 +2713,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
}
func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2665,7 +2721,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16
}
func GetVersion() (ver uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetVersion.Addr())
ver = uint32(r0)
if ver == 0 {
err = errnoErr(e1)
@@ -2674,7 +2730,7 @@ func GetVersion() (ver uint32, err error) {
}
func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2682,7 +2738,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN
}
func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2690,7 +2746,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume
}
func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2698,7 +2754,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint
}
func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2706,7 +2762,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui
}
func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2714,7 +2770,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16
}
func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2723,7 +2779,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
}
func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2735,7 +2791,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) {
if *isWow64 {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0)
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0)))
*isWow64 = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -2748,7 +2804,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2765,7 +2821,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e
}
func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2783,7 +2839,7 @@ func LoadLibrary(libname string) (handle Handle, err error) {
}
func _LoadLibrary(libname *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2792,7 +2848,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) {
}
func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo))
resData = Handle(r0)
if resData == 0 {
err = errnoErr(e1)
@@ -2801,7 +2857,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
}
func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length))
ptr = uintptr(r0)
if ptr == 0 {
err = errnoErr(e1)
@@ -2810,7 +2866,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
}
func LocalFree(hmem Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem))
handle = Handle(r0)
if handle != 0 {
err = errnoErr(e1)
@@ -2819,7 +2875,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) {
}
func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+ r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2827,7 +2883,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt
}
func LockResource(resData Handle) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2836,7 +2892,7 @@ func LockResource(resData Handle) (addr uintptr, err error) {
}
func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2845,7 +2901,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui
}
func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2853,7 +2909,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
}
func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2861,7 +2917,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
}
func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2869,7 +2925,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
}
func MoveFile(from *uint16, to *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0)
+ r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2877,7 +2933,7 @@ func MoveFile(from *uint16, to *uint16) (err error) {
}
func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+ r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
nwrite = int32(r0)
if nwrite == 0 {
err = errnoErr(e1)
@@ -2890,7 +2946,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2903,7 +2959,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2916,7 +2972,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
+ r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2929,7 +2985,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
+ r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2938,7 +2994,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand
}
func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2946,7 +3002,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla
}
func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2954,7 +3010,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
}
func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2962,7 +3018,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
}
func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2970,7 +3026,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
}
func PulseEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2978,7 +3034,7 @@ func PulseEvent(event Handle) (err error) {
}
func PurgeComm(handle Handle, dwFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0)
+ r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2986,7 +3042,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) {
}
func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
+ r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2995,7 +3051,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3
}
func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3003,7 +3059,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size
}
func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3011,7 +3067,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO
}
func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3023,7 +3079,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree
if watchSubTree {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0)
+ r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3035,7 +3091,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3043,7 +3099,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (
}
func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3051,7 +3107,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u
}
func ReleaseMutex(mutex Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3059,7 +3115,7 @@ func ReleaseMutex(mutex Handle) (err error) {
}
func RemoveDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3067,7 +3123,7 @@ func RemoveDirectory(path *uint16) (err error) {
}
func RemoveDllDirectory(cookie uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3075,7 +3131,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) {
}
func ResetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3083,7 +3139,7 @@ func ResetEvent(event Handle) (err error) {
}
func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0)
+ r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -3091,7 +3147,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
}
func ResumeThread(thread Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread))
ret = uint32(r0)
if ret == 0xffffffff {
err = errnoErr(e1)
@@ -3100,7 +3156,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) {
}
func SetCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3108,7 +3164,7 @@ func SetCommBreak(handle Handle) (err error) {
}
func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3116,7 +3172,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
}
func SetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3124,7 +3180,7 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) {
}
func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3132,7 +3188,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
}
func SetConsoleCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3140,7 +3196,7 @@ func SetConsoleCP(cp uint32) (err error) {
}
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3148,7 +3204,7 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) {
}
func SetConsoleMode(console Handle, mode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3156,7 +3212,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) {
}
func SetConsoleOutputCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3164,7 +3220,7 @@ func SetConsoleOutputCP(cp uint32) (err error) {
}
func SetCurrentDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3172,7 +3228,7 @@ func SetCurrentDirectory(path *uint16) (err error) {
}
func SetDefaultDllDirectories(directoryFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3189,7 +3245,7 @@ func SetDllDirectory(path string) (err error) {
}
func _SetDllDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3197,7 +3253,7 @@ func _SetDllDirectory(path *uint16) (err error) {
}
func SetEndOfFile(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3205,7 +3261,7 @@ func SetEndOfFile(handle Handle) (err error) {
}
func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3213,13 +3269,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
}
func SetErrorMode(mode uint32) (ret uint32) {
- r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode))
ret = uint32(r0)
return
}
func SetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3227,7 +3283,7 @@ func SetEvent(event Handle) (err error) {
}
func SetFileAttributes(name *uint16, attrs uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3235,7 +3291,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) {
}
func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3243,7 +3299,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error)
}
func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3251,7 +3307,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB
}
func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence))
newlowoffset = uint32(r0)
if newlowoffset == 0xffffffff {
err = errnoErr(e1)
@@ -3260,7 +3316,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence
}
func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3268,7 +3324,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim
}
func SetFileValidData(handle Handle, validDataLength int64) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3276,7 +3332,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) {
}
func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3284,7 +3340,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error)
}
func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) {
- r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength))
ret = int(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -3293,7 +3349,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb
}
func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3301,7 +3357,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin
}
func SetPriorityClass(process Handle, priorityClass uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0)
+ r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3313,7 +3369,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
if disable {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3321,7 +3377,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
}
func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3329,7 +3385,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
}
func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3337,7 +3393,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr
}
func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
+ r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3345,7 +3401,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
}
func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3353,7 +3409,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
}
func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3361,7 +3417,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro
}
func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
+ r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3369,7 +3425,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
}
func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo))
size = uint32(r0)
if size == 0 {
err = errnoErr(e1)
@@ -3382,13 +3438,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) {
if alertable {
_p0 = 1
}
- r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0)
+ r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0))
ret = uint32(r0)
return
}
func TerminateJobObject(job Handle, exitCode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3396,7 +3452,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) {
}
func TerminateProcess(handle Handle, exitcode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3404,7 +3460,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) {
}
func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3412,7 +3468,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
}
func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3420,7 +3476,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
}
func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3428,7 +3484,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3
}
func UnmapViewOfFile(addr uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3436,7 +3492,7 @@ func UnmapViewOfFile(addr uintptr) (err error) {
}
func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3444,7 +3500,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32,
}
func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect))
value = uintptr(r0)
if value == 0 {
err = errnoErr(e1)
@@ -3453,7 +3509,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3
}
func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype))
+ r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3461,7 +3517,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
}
func VirtualLock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3469,7 +3525,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) {
}
func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3477,7 +3533,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect
}
func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3485,7 +3541,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect
}
func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+ r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3493,7 +3549,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt
}
func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3501,7 +3557,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat
}
func VirtualUnlock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3509,13 +3565,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) {
}
func WTSGetActiveConsoleSessionId() (sessionID uint32) {
- r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr())
sessionID = uint32(r0)
return
}
func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
+ r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3527,7 +3583,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil
if waitAll {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3536,7 +3592,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil
}
func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) {
- r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3545,7 +3601,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32,
}
func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3557,7 +3613,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3565,7 +3621,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped)
}
func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3573,7 +3629,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size
}
func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3581,12 +3637,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32
}
func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) {
- syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0)
+ syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)))
return
}
func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3594,7 +3650,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint
}
func NetApiBufferFree(buf *byte) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3602,7 +3658,7 @@ func NetApiBufferFree(buf *byte) (neterr error) {
}
func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
+ r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3610,7 +3666,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete
}
func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3618,7 +3674,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr
}
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
- r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3626,7 +3682,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by
}
func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3634,7 +3690,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO
}
func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) {
- r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3642,7 +3698,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i
}
func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0)
+ r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3650,7 +3706,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe
}
func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3658,7 +3714,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf
}
func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3666,7 +3722,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte,
}
func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3674,7 +3730,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P
}
func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
+ r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3682,13 +3738,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL
}
func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
+ r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
ret = r0 != 0
return
}
func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3696,13 +3752,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
}
func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)))
ret = r0 != 0
return
}
func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3710,7 +3766,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile
}
func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3718,18 +3774,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString
}
func RtlGetCurrentPeb() (peb *PEB) {
- r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr())
peb = (*PEB)(unsafe.Pointer(r0))
return
}
func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) {
- syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
+ syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
return
}
func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3737,23 +3793,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
}
func RtlInitString(destinationString *NTString, sourceString *byte) {
- syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) {
- syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus))
ret = syscall.Errno(r0)
return
}
func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0)
+ r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3761,7 +3817,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
}
func coCreateGuid(pguid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3769,7 +3825,7 @@ func coCreateGuid(pguid *GUID) (ret error) {
}
func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) {
- r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3777,7 +3833,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable *
}
func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
- r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0)
+ r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3785,23 +3841,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
}
func CoTaskMemFree(address unsafe.Pointer) {
- syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0)
+ syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address))
return
}
func CoUninitialize() {
- syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0)
+ syscall.SyscallN(procCoUninitialize.Addr())
return
}
func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) {
- r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
+ r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
chars = int32(r0)
return
}
func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3809,7 +3865,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin
}
func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3817,7 +3873,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u
}
func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3825,7 +3881,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err
}
func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3833,7 +3889,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin
}
func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3841,7 +3897,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u
}
func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3849,7 +3905,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb
}
func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb))
+ r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3861,7 +3917,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb
if ret != nil {
return
}
- r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0)
+ r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3873,12 +3929,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) {
if err != nil {
return
}
- syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0)
+ syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription))
return
}
func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
+ r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3886,7 +3942,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er
}
func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3894,7 +3950,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint
}
func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3902,7 +3958,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3910,7 +3966,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf
}
func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3918,7 +3974,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
}
func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3926,7 +3982,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu
}
func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3934,7 +3990,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz
}
func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3943,7 +3999,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN
}
func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3951,7 +4007,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI
}
func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3959,7 +4015,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
}
func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3967,7 +4023,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3975,7 +4031,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo
}
func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3983,7 +4039,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d
}
func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3992,7 +4048,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp
}
func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4000,7 +4056,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4008,7 +4064,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa
}
func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4016,7 +4072,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf
}
func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4024,7 +4080,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4032,7 +4088,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4040,7 +4096,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev
}
func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4048,7 +4104,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa
}
func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4056,7 +4112,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4064,7 +4120,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
+ r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
key = Handle(r0)
if key == InvalidHandle {
err = errnoErr(e1)
@@ -4073,7 +4129,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc
}
func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4081,7 +4137,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo
}
func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4089,7 +4145,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf
}
func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4097,7 +4153,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev
}
func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4105,7 +4161,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4113,7 +4169,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData
}
func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4121,7 +4177,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er
}
func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
- r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0)
+ r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)))
argv = (**uint16)(unsafe.Pointer(r0))
if argv == nil {
err = errnoErr(e1)
@@ -4130,7 +4186,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
}
func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) {
- r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4138,7 +4194,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u
}
func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
+ r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
if r1 <= 32 {
err = errnoErr(e1)
}
@@ -4146,12 +4202,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui
}
func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) {
- syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param))
+ syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param))
return
}
func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4159,7 +4215,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
}
func ExitWindowsEx(flags uint32, reason uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0)
+ r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4167,7 +4223,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) {
}
func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) {
- r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
+ r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
copied = int32(r0)
if copied == 0 {
err = errnoErr(e1)
@@ -4176,19 +4232,19 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e
}
func GetDesktopWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr())
hwnd = HWND(r0)
return
}
func GetForegroundWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr())
hwnd = HWND(r0)
return
}
func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4196,19 +4252,19 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
}
func GetKeyboardLayout(tid uint32) (hkl Handle) {
- r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid))
hkl = Handle(r0)
return
}
func GetShellWindow() (shellWindow HWND) {
- r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr())
shellWindow = HWND(r0)
return
}
func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid)))
tid = uint32(r0)
if tid == 0 {
err = errnoErr(e1)
@@ -4217,25 +4273,25 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
}
func IsWindow(hwnd HWND) (isWindow bool) {
- r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd))
isWindow = r0 != 0
return
}
func IsWindowUnicode(hwnd HWND) (isUnicode bool) {
- r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd))
isUnicode = r0 != 0
return
}
func IsWindowVisible(hwnd HWND) (isVisible bool) {
- r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd))
isVisible = r0 != 0
return
}
func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags))
hkl = Handle(r0)
if hkl == 0 {
err = errnoErr(e1)
@@ -4244,7 +4300,7 @@ func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
}
func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype))
ret = int32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -4253,13 +4309,13 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i
}
func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) {
- r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0)
+ r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl))
ret = int32(r0)
return
}
func UnloadKeyboardLayout(hkl Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4271,7 +4327,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (
if inheritExisting {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4279,7 +4335,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (
}
func DestroyEnvironmentBlock(block *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4287,7 +4343,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) {
}
func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
+ r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4304,7 +4360,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32
}
func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)))
bufSize = uint32(r0)
if bufSize == 0 {
err = errnoErr(e1)
@@ -4322,7 +4378,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u
}
func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4339,7 +4395,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer
}
func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4347,7 +4403,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint
}
func TimeBeginPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4355,7 +4411,7 @@ func TimeBeginPeriod(period uint32) (err error) {
}
func TimeEndPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4363,7 +4419,7 @@ func TimeEndPeriod(period uint32) (err error) {
}
func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
- r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
+ r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4371,12 +4427,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error)
}
func FreeAddrInfoW(addrinfo *AddrinfoW) {
- syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0)
+ syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo)))
return
}
func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) {
- r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4384,15 +4440,23 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul
}
func WSACleanup() (err error) {
- r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr())
if r1 == socket_error {
err = errnoErr(e1)
}
return
}
+func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
+ r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
+ if r1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
- r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
+ r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4405,7 +4469,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4413,7 +4477,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f
}
func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+ r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4421,7 +4485,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo
}
func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4429,7 +4493,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle)
}
func WSALookupServiceEnd(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4437,7 +4501,7 @@ func WSALookupServiceEnd(handle Handle) (err error) {
}
func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) {
- r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4445,7 +4509,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS
}
func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4453,7 +4517,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32
}
func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4461,7 +4525,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui
}
func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4469,7 +4533,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32,
}
func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4477,7 +4541,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32
}
func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4486,7 +4550,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo,
}
func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
- r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4494,7 +4558,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
}
func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4502,7 +4566,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
}
func Closesocket(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4510,7 +4574,7 @@ func Closesocket(s Handle) (err error) {
}
func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4527,7 +4591,7 @@ func GetHostByName(name string) (h *Hostent, err error) {
}
func _GetHostByName(name *byte) (h *Hostent, err error) {
- r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name)))
h = (*Hostent)(unsafe.Pointer(r0))
if h == nil {
err = errnoErr(e1)
@@ -4536,7 +4600,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) {
}
func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4553,7 +4617,7 @@ func GetProtoByName(name string) (p *Protoent, err error) {
}
func _GetProtoByName(name *byte) (p *Protoent, err error) {
- r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name)))
p = (*Protoent)(unsafe.Pointer(r0))
if p == nil {
err = errnoErr(e1)
@@ -4576,7 +4640,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) {
}
func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
- r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0)
+ r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)))
s = (*Servent)(unsafe.Pointer(r0))
if s == nil {
err = errnoErr(e1)
@@ -4585,7 +4649,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
}
func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4593,7 +4657,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
}
func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4601,7 +4665,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3
}
func listen(s Handle, backlog int32) (err error) {
- r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0)
+ r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4609,7 +4673,7 @@ func listen(s Handle, backlog int32) (err error) {
}
func Ntohs(netshort uint16) (u uint16) {
- r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0)
+ r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort))
u = uint16(r0)
return
}
@@ -4619,7 +4683,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4632,7 +4696,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
+ r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4640,7 +4704,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (
}
func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0)
+ r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4648,7 +4712,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32
}
func shutdown(s Handle, how int32) (err error) {
- r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0)
+ r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4656,7 +4720,7 @@ func shutdown(s Handle, how int32) (err error) {
}
func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol))
+ r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4665,7 +4729,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
}
func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4673,12 +4737,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio
}
func WTSFreeMemory(ptr uintptr) {
- syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0)
+ syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr))
return
}
func WTSQueryUserToken(session uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
diff --git a/operator/vendor/golang.org/x/term/term_windows.go b/operator/vendor/golang.org/x/term/term_windows.go
index df6bf948..0ddd81c0 100644
--- a/operator/vendor/golang.org/x/term/term_windows.go
+++ b/operator/vendor/golang.org/x/term/term_windows.go
@@ -20,12 +20,14 @@ func isTerminal(fd int) bool {
return err == nil
}
+// This is intended to be used on a console input handle.
+// See https://learn.microsoft.com/en-us/windows/console/setconsolemode
func makeRaw(fd int) (*State, error) {
var st uint32
if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
return nil, err
}
- raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT)
raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil {
return nil, err
diff --git a/operator/vendor/golang.org/x/term/terminal.go b/operator/vendor/golang.org/x/term/terminal.go
index f636667f..9255449b 100644
--- a/operator/vendor/golang.org/x/term/terminal.go
+++ b/operator/vendor/golang.org/x/term/terminal.go
@@ -6,6 +6,7 @@ package term
import (
"bytes"
+ "fmt"
"io"
"runtime"
"strconv"
@@ -36,6 +37,26 @@ var vt100EscapeCodes = EscapeCodes{
Reset: []byte{keyEscape, '[', '0', 'm'},
}
+// A History provides a (possibly bounded) queue of input lines read by [Terminal.ReadLine].
+type History interface {
+ // Add will be called by [Terminal.ReadLine] to add
+ // a new, most recent entry to the history.
+ // It is allowed to drop any entry, including
+ // the entry being added (e.g., if it's deemed an invalid entry),
+ // the least-recent entry (e.g., to keep the history bounded),
+ // or any other entry.
+ Add(entry string)
+
+ // Len returns the number of entries in the history.
+ Len() int
+
+ // At returns an entry from the history.
+ // Index 0 is the most-recently added entry and
+ // index Len()-1 is the least-recently added entry.
+ // If index is < 0 or >= Len(), it panics.
+ At(idx int) string
+}
+
// Terminal contains the state for running a VT100 terminal that is capable of
// reading lines of input.
type Terminal struct {
@@ -44,6 +65,8 @@ type Terminal struct {
// bytes, as an index into |line|). If it returns ok=false, the key
// press is processed normally. Otherwise it returns a replacement line
// and the new cursor position.
+ //
+ // This will be disabled during ReadPassword.
AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
// Escape contains a pointer to the escape codes for this terminal.
@@ -84,9 +107,14 @@ type Terminal struct {
remainder []byte
inBuf [256]byte
- // history contains previously entered commands so that they can be
- // accessed with the up and down keys.
- history stRingBuffer
+ // History records and retrieves lines of input read by [ReadLine] which
+ // a user can retrieve and navigate using the up and down arrow keys.
+ //
+ // It is not safe to call ReadLine concurrently with any methods on History.
+ //
+ // [NewTerminal] sets this to a default implementation that records the
+ // last 100 lines of input.
+ History History
// historyIndex stores the currently accessed history entry, where zero
// means the immediately previous entry.
historyIndex int
@@ -109,6 +137,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
termHeight: 24,
echo: true,
historyIndex: -1,
+ History: &stRingBuffer{},
}
}
@@ -117,6 +146,7 @@ const (
keyCtrlD = 4
keyCtrlU = 21
keyEnter = '\r'
+ keyLF = '\n'
keyEscape = 27
keyBackspace = 127
keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
@@ -383,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) {
}
}
-// countToLeftWord returns then number of characters from the cursor to the
+// countToLeftWord returns the number of characters from the cursor to the
// start of the previous word.
func (t *Terminal) countToLeftWord() int {
if t.pos == 0 {
@@ -408,7 +438,7 @@ func (t *Terminal) countToLeftWord() int {
return t.pos - pos
}
-// countToRightWord returns then number of characters from the cursor to the
+// countToRightWord returns the number of characters from the cursor to the
// start of the next word.
func (t *Terminal) countToRightWord() int {
pos := t.pos
@@ -448,10 +478,27 @@ func visualLength(runes []rune) int {
return length
}
+// historyAt unlocks the terminal and relocks it while calling History.At.
+func (t *Terminal) historyAt(idx int) (string, bool) {
+ t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
+ defer t.lock.Lock() // panic in At (or Len) protection.
+ if idx < 0 || idx >= t.History.Len() {
+ return "", false
+ }
+ return t.History.At(idx), true
+}
+
+// historyAdd unlocks the terminal and relocks it while calling History.Add.
+func (t *Terminal) historyAdd(entry string) {
+ t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
+ defer t.lock.Lock() // panic in Add protection.
+ t.History.Add(entry)
+}
+
// handleKey processes the given key and, optionally, returns a line of text
// that the user has entered.
func (t *Terminal) handleKey(key rune) (line string, ok bool) {
- if t.pasteActive && key != keyEnter {
+ if t.pasteActive && key != keyEnter && key != keyLF {
t.addKeyToLine(key)
return
}
@@ -495,7 +542,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
t.pos = len(t.line)
t.moveCursorToPos(t.pos)
case keyUp:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
+ entry, ok := t.historyAt(t.historyIndex + 1)
if !ok {
return "", false
}
@@ -514,14 +561,14 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
t.setLine(runes, len(runes))
t.historyIndex--
default:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
+ entry, ok := t.historyAt(t.historyIndex - 1)
if ok {
t.historyIndex--
runes := []rune(entry)
t.setLine(runes, len(runes))
}
}
- case keyEnter:
+ case keyEnter, keyLF:
t.moveCursorToPos(len(t.line))
t.queue([]rune("\r\n"))
line = string(t.line)
@@ -692,6 +739,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) {
// ReadPassword temporarily changes the prompt and reads a password, without
// echo, from the terminal.
+//
+// The AutoCompleteCallback is disabled during this call.
func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
t.lock.Lock()
defer t.lock.Unlock()
@@ -699,6 +748,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
oldPrompt := t.prompt
t.prompt = []rune(prompt)
t.echo = false
+ oldAutoCompleteCallback := t.AutoCompleteCallback
+ t.AutoCompleteCallback = nil
+ defer func() {
+ t.AutoCompleteCallback = oldAutoCompleteCallback
+ }()
line, err = t.readLine()
@@ -759,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) {
if !t.pasteActive {
lineIsPasted = false
}
+ // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line.
+ if key == keyEnter && len(rest) > 0 && rest[0] == keyLF {
+ rest = rest[1:]
+ }
line, lineOk = t.handleKey(key)
}
if len(rest) > 0 {
@@ -772,7 +830,7 @@ func (t *Terminal) readLine() (line string, err error) {
if lineOk {
if t.echo {
t.historyIndex = -1
- t.history.Add(line)
+ t.historyAdd(line)
}
if lineIsPasted {
err = ErrPasteIndicator
@@ -929,19 +987,23 @@ func (s *stRingBuffer) Add(a string) {
}
}
-// NthPreviousEntry returns the value passed to the nth previous call to Add.
+func (s *stRingBuffer) Len() int {
+ return s.size
+}
+
+// At returns the value passed to the nth previous call to Add.
// If n is zero then the immediately prior value is returned, if one, then the
// next most recent, and so on. If such an element doesn't exist then ok is
// false.
-func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
+func (s *stRingBuffer) At(n int) string {
if n < 0 || n >= s.size {
- return "", false
+ panic(fmt.Sprintf("term: history index [%d] out of range [0,%d)", n, s.size))
}
index := s.head - n
if index < 0 {
index += s.max
}
- return s.entries[index], true
+ return s.entries[index]
}
// readPasswordLine reads from reader until it finds \n or io.EOF.
diff --git a/operator/vendor/golang.org/x/text/unicode/bidi/core.go b/operator/vendor/golang.org/x/text/unicode/bidi/core.go
index 9d2ae547..fb827323 100644
--- a/operator/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/operator/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -427,13 +427,6 @@ type isolatingRunSequence struct {
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
-func maxLevel(a, b level) level {
- if a > b {
- return a
- }
- return b
-}
-
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
indexes: indexes,
types: types,
level: level,
- sos: typeForLevel(maxLevel(prevLevel, level)),
- eos: typeForLevel(maxLevel(succLevel, level)),
+ sos: typeForLevel(max(prevLevel, level)),
+ eos: typeForLevel(max(succLevel, level)),
}
}
diff --git a/operator/vendor/golang.org/x/tools/go/ast/edge/edge.go b/operator/vendor/golang.org/x/tools/go/ast/edge/edge.go
new file mode 100644
index 00000000..4f6ccfd6
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/ast/edge/edge.go
@@ -0,0 +1,295 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edge defines identifiers for each field of an ast.Node
+// struct type that refers to another Node.
+package edge
+
+import (
+ "fmt"
+ "go/ast"
+ "reflect"
+)
+
+// A Kind describes a field of an ast.Node struct.
+type Kind uint8
+
+// String returns a description of the edge kind.
+func (k Kind) String() string {
+ if k == Invalid {
+ return ""
+ }
+ info := fieldInfos[k]
+ return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name)
+}
+
+// NodeType returns the pointer-to-struct type of the ast.Node implementation.
+func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType }
+
+// FieldName returns the name of the field.
+func (k Kind) FieldName() string { return fieldInfos[k].name }
+
+// FieldType returns the declared type of the field.
+func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType }
+
+// Get returns the direct child of n identified by (k, idx).
+// n's type must match k.NodeType().
+// idx must be a valid slice index, or -1 for a non-slice.
+func (k Kind) Get(n ast.Node, idx int) ast.Node {
+ if k.NodeType() != reflect.TypeOf(n) {
+ panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n))
+ }
+ v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index)
+ if idx != -1 {
+ v = v.Index(idx) // asserts valid index
+ } else {
+ // (The type assertion below asserts that v is not a slice.)
+ }
+ return v.Interface().(ast.Node) // may be nil
+}
+
+const (
+ Invalid Kind = iota // for nodes at the root of the traversal
+
+ // Kinds are sorted alphabetically.
+ // Numbering is not stable.
+ // Each is named Type_Field, where Type is the
+ // ast.Node struct type and Field is the name of the field
+
+ ArrayType_Elt
+ ArrayType_Len
+ AssignStmt_Lhs
+ AssignStmt_Rhs
+ BinaryExpr_X
+ BinaryExpr_Y
+ BlockStmt_List
+ BranchStmt_Label
+ CallExpr_Args
+ CallExpr_Fun
+ CaseClause_Body
+ CaseClause_List
+ ChanType_Value
+ CommClause_Body
+ CommClause_Comm
+ CommentGroup_List
+ CompositeLit_Elts
+ CompositeLit_Type
+ DeclStmt_Decl
+ DeferStmt_Call
+ Ellipsis_Elt
+ ExprStmt_X
+ FieldList_List
+ Field_Comment
+ Field_Doc
+ Field_Names
+ Field_Tag
+ Field_Type
+ File_Decls
+ File_Doc
+ File_Name
+ ForStmt_Body
+ ForStmt_Cond
+ ForStmt_Init
+ ForStmt_Post
+ FuncDecl_Body
+ FuncDecl_Doc
+ FuncDecl_Name
+ FuncDecl_Recv
+ FuncDecl_Type
+ FuncLit_Body
+ FuncLit_Type
+ FuncType_Params
+ FuncType_Results
+ FuncType_TypeParams
+ GenDecl_Doc
+ GenDecl_Specs
+ GoStmt_Call
+ IfStmt_Body
+ IfStmt_Cond
+ IfStmt_Else
+ IfStmt_Init
+ ImportSpec_Comment
+ ImportSpec_Doc
+ ImportSpec_Name
+ ImportSpec_Path
+ IncDecStmt_X
+ IndexExpr_Index
+ IndexExpr_X
+ IndexListExpr_Indices
+ IndexListExpr_X
+ InterfaceType_Methods
+ KeyValueExpr_Key
+ KeyValueExpr_Value
+ LabeledStmt_Label
+ LabeledStmt_Stmt
+ MapType_Key
+ MapType_Value
+ ParenExpr_X
+ RangeStmt_Body
+ RangeStmt_Key
+ RangeStmt_Value
+ RangeStmt_X
+ ReturnStmt_Results
+ SelectStmt_Body
+ SelectorExpr_Sel
+ SelectorExpr_X
+ SendStmt_Chan
+ SendStmt_Value
+ SliceExpr_High
+ SliceExpr_Low
+ SliceExpr_Max
+ SliceExpr_X
+ StarExpr_X
+ StructType_Fields
+ SwitchStmt_Body
+ SwitchStmt_Init
+ SwitchStmt_Tag
+ TypeAssertExpr_Type
+ TypeAssertExpr_X
+ TypeSpec_Comment
+ TypeSpec_Doc
+ TypeSpec_Name
+ TypeSpec_Type
+ TypeSpec_TypeParams
+ TypeSwitchStmt_Assign
+ TypeSwitchStmt_Body
+ TypeSwitchStmt_Init
+ UnaryExpr_X
+ ValueSpec_Comment
+ ValueSpec_Doc
+ ValueSpec_Names
+ ValueSpec_Type
+ ValueSpec_Values
+
+ maxKind
+)
+
+// Assert that the encoding fits in 7 bits,
+// as the inspector relies on this.
+// (We are currently at 104.)
+var _ = [1 << 7]struct{}{}[maxKind]
+
+type fieldInfo struct {
+ nodeType reflect.Type // pointer-to-struct type of ast.Node implementation
+ name string
+ index int
+ fieldType reflect.Type
+}
+
+func info[N ast.Node](fieldName string) fieldInfo {
+ nodePtrType := reflect.TypeFor[N]()
+ f, ok := nodePtrType.Elem().FieldByName(fieldName)
+ if !ok {
+ panic(fieldName)
+ }
+ return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type}
+}
+
+var fieldInfos = [...]fieldInfo{
+ Invalid: {},
+ ArrayType_Elt: info[*ast.ArrayType]("Elt"),
+ ArrayType_Len: info[*ast.ArrayType]("Len"),
+ AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"),
+ AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"),
+ BinaryExpr_X: info[*ast.BinaryExpr]("X"),
+ BinaryExpr_Y: info[*ast.BinaryExpr]("Y"),
+ BlockStmt_List: info[*ast.BlockStmt]("List"),
+ BranchStmt_Label: info[*ast.BranchStmt]("Label"),
+ CallExpr_Args: info[*ast.CallExpr]("Args"),
+ CallExpr_Fun: info[*ast.CallExpr]("Fun"),
+ CaseClause_Body: info[*ast.CaseClause]("Body"),
+ CaseClause_List: info[*ast.CaseClause]("List"),
+ ChanType_Value: info[*ast.ChanType]("Value"),
+ CommClause_Body: info[*ast.CommClause]("Body"),
+ CommClause_Comm: info[*ast.CommClause]("Comm"),
+ CommentGroup_List: info[*ast.CommentGroup]("List"),
+ CompositeLit_Elts: info[*ast.CompositeLit]("Elts"),
+ CompositeLit_Type: info[*ast.CompositeLit]("Type"),
+ DeclStmt_Decl: info[*ast.DeclStmt]("Decl"),
+ DeferStmt_Call: info[*ast.DeferStmt]("Call"),
+ Ellipsis_Elt: info[*ast.Ellipsis]("Elt"),
+ ExprStmt_X: info[*ast.ExprStmt]("X"),
+ FieldList_List: info[*ast.FieldList]("List"),
+ Field_Comment: info[*ast.Field]("Comment"),
+ Field_Doc: info[*ast.Field]("Doc"),
+ Field_Names: info[*ast.Field]("Names"),
+ Field_Tag: info[*ast.Field]("Tag"),
+ Field_Type: info[*ast.Field]("Type"),
+ File_Decls: info[*ast.File]("Decls"),
+ File_Doc: info[*ast.File]("Doc"),
+ File_Name: info[*ast.File]("Name"),
+ ForStmt_Body: info[*ast.ForStmt]("Body"),
+ ForStmt_Cond: info[*ast.ForStmt]("Cond"),
+ ForStmt_Init: info[*ast.ForStmt]("Init"),
+ ForStmt_Post: info[*ast.ForStmt]("Post"),
+ FuncDecl_Body: info[*ast.FuncDecl]("Body"),
+ FuncDecl_Doc: info[*ast.FuncDecl]("Doc"),
+ FuncDecl_Name: info[*ast.FuncDecl]("Name"),
+ FuncDecl_Recv: info[*ast.FuncDecl]("Recv"),
+ FuncDecl_Type: info[*ast.FuncDecl]("Type"),
+ FuncLit_Body: info[*ast.FuncLit]("Body"),
+ FuncLit_Type: info[*ast.FuncLit]("Type"),
+ FuncType_Params: info[*ast.FuncType]("Params"),
+ FuncType_Results: info[*ast.FuncType]("Results"),
+ FuncType_TypeParams: info[*ast.FuncType]("TypeParams"),
+ GenDecl_Doc: info[*ast.GenDecl]("Doc"),
+ GenDecl_Specs: info[*ast.GenDecl]("Specs"),
+ GoStmt_Call: info[*ast.GoStmt]("Call"),
+ IfStmt_Body: info[*ast.IfStmt]("Body"),
+ IfStmt_Cond: info[*ast.IfStmt]("Cond"),
+ IfStmt_Else: info[*ast.IfStmt]("Else"),
+ IfStmt_Init: info[*ast.IfStmt]("Init"),
+ ImportSpec_Comment: info[*ast.ImportSpec]("Comment"),
+ ImportSpec_Doc: info[*ast.ImportSpec]("Doc"),
+ ImportSpec_Name: info[*ast.ImportSpec]("Name"),
+ ImportSpec_Path: info[*ast.ImportSpec]("Path"),
+ IncDecStmt_X: info[*ast.IncDecStmt]("X"),
+ IndexExpr_Index: info[*ast.IndexExpr]("Index"),
+ IndexExpr_X: info[*ast.IndexExpr]("X"),
+ IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"),
+ IndexListExpr_X: info[*ast.IndexListExpr]("X"),
+ InterfaceType_Methods: info[*ast.InterfaceType]("Methods"),
+ KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"),
+ KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"),
+ LabeledStmt_Label: info[*ast.LabeledStmt]("Label"),
+ LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"),
+ MapType_Key: info[*ast.MapType]("Key"),
+ MapType_Value: info[*ast.MapType]("Value"),
+ ParenExpr_X: info[*ast.ParenExpr]("X"),
+ RangeStmt_Body: info[*ast.RangeStmt]("Body"),
+ RangeStmt_Key: info[*ast.RangeStmt]("Key"),
+ RangeStmt_Value: info[*ast.RangeStmt]("Value"),
+ RangeStmt_X: info[*ast.RangeStmt]("X"),
+ ReturnStmt_Results: info[*ast.ReturnStmt]("Results"),
+ SelectStmt_Body: info[*ast.SelectStmt]("Body"),
+ SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"),
+ SelectorExpr_X: info[*ast.SelectorExpr]("X"),
+ SendStmt_Chan: info[*ast.SendStmt]("Chan"),
+ SendStmt_Value: info[*ast.SendStmt]("Value"),
+ SliceExpr_High: info[*ast.SliceExpr]("High"),
+ SliceExpr_Low: info[*ast.SliceExpr]("Low"),
+ SliceExpr_Max: info[*ast.SliceExpr]("Max"),
+ SliceExpr_X: info[*ast.SliceExpr]("X"),
+ StarExpr_X: info[*ast.StarExpr]("X"),
+ StructType_Fields: info[*ast.StructType]("Fields"),
+ SwitchStmt_Body: info[*ast.SwitchStmt]("Body"),
+ SwitchStmt_Init: info[*ast.SwitchStmt]("Init"),
+ SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"),
+ TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"),
+ TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"),
+ TypeSpec_Comment: info[*ast.TypeSpec]("Comment"),
+ TypeSpec_Doc: info[*ast.TypeSpec]("Doc"),
+ TypeSpec_Name: info[*ast.TypeSpec]("Name"),
+ TypeSpec_Type: info[*ast.TypeSpec]("Type"),
+ TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"),
+ TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"),
+ TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"),
+ TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"),
+ UnaryExpr_X: info[*ast.UnaryExpr]("X"),
+ ValueSpec_Comment: info[*ast.ValueSpec]("Comment"),
+ ValueSpec_Doc: info[*ast.ValueSpec]("Doc"),
+ ValueSpec_Names: info[*ast.ValueSpec]("Names"),
+ ValueSpec_Type: info[*ast.ValueSpec]("Type"),
+ ValueSpec_Values: info[*ast.ValueSpec]("Values"),
+}
diff --git a/operator/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/operator/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
new file mode 100644
index 00000000..7e72d3c2
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/ast/inspector/cursor.go
@@ -0,0 +1,502 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "iter"
+ "reflect"
+
+ "golang.org/x/tools/go/ast/edge"
+)
+
+// A Cursor represents an [ast.Node]. It is immutable.
+//
+// Two Cursors compare equal if they represent the same node.
+//
+// Call [Inspector.Root] to obtain a valid cursor for the virtual root
+// node of the traversal.
+//
+// Use the following methods to navigate efficiently around the tree:
+// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing];
+// - for children, use [Cursor.Child], [Cursor.Children],
+// [Cursor.FirstChild], and [Cursor.LastChild];
+// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling];
+// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode],
+// [Cursor.Inspect], and [Cursor.Preorder].
+//
+// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for
+// information about the edges in a tree: which field (and slice
+// element) of the parent node holds the child.
+type Cursor struct {
+ in *Inspector
+ index int32 // index of push node; -1 for virtual root node
+}
+
+// Root returns a cursor for the virtual root node,
+// whose children are the files provided to [New].
+//
+// Its [Cursor.Node] method return nil.
+func (in *Inspector) Root() Cursor {
+ return Cursor{in, -1}
+}
+
+// At returns the cursor at the specified index in the traversal,
+// which must have been obtained from [Cursor.Index] on a Cursor
+// belonging to the same Inspector (see [Cursor.Inspector]).
+func (in *Inspector) At(index int32) Cursor {
+ if index < 0 {
+ panic("negative index")
+ }
+ if int(index) >= len(in.events) {
+ panic("index out of range for this inspector")
+ }
+ if in.events[index].index < index {
+ panic("invalid index") // (a push, not a pop)
+ }
+ return Cursor{in, index}
+}
+
+// Inspector returns the cursor's Inspector.
+func (c Cursor) Inspector() *Inspector { return c.in }
+
+// Index returns the index of this cursor position within the package.
+//
+// Clients should not assume anything about the numeric Index value
+// except that it increases monotonically throughout the traversal.
+// It is provided for use with [At].
+//
+// Index must not be called on the Root node.
+func (c Cursor) Index() int32 {
+ if c.index < 0 {
+ panic("Index called on Root node")
+ }
+ return c.index
+}
+
+// Node returns the node at the current cursor position,
+// or nil for the cursor returned by [Inspector.Root].
+func (c Cursor) Node() ast.Node {
+ if c.index < 0 {
+ return nil
+ }
+ return c.in.events[c.index].node
+}
+
+// String returns information about the cursor's node, if any.
+func (c Cursor) String() string {
+ if c.in == nil {
+ return "(invalid)"
+ }
+ if c.index < 0 {
+ return "(root)"
+ }
+ return reflect.TypeOf(c.Node()).String()
+}
+
+// indices return the [start, end) half-open interval of event indices.
+func (c Cursor) indices() (int32, int32) {
+ if c.index < 0 {
+ return 0, int32(len(c.in.events)) // root: all events
+ } else {
+ return c.index, c.in.events[c.index].index + 1 // just one subtree
+ }
+}
+
+// Preorder returns an iterator over the nodes of the subtree
+// represented by c in depth-first order. Each node in the sequence is
+// represented by a Cursor that allows access to the Node, but may
+// also be used to start a new traversal, or to obtain the stack of
+// nodes enclosing the cursor.
+//
+// The traversal sequence is determined by [ast.Inspect]. The types
+// argument, if non-empty, enables type-based filtering of events. The
+// function f if is called only for nodes whose type matches an
+// element of the types slice.
+//
+// If you need control over descent into subtrees,
+// or need both pre- and post-order notifications, use [Cursor.Inspect]
+func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] {
+ mask := maskOf(types)
+
+ return func(yield func(Cursor) bool) {
+ events := c.in.events
+
+ for i, limit := c.indices(); i < limit; {
+ ev := events[i]
+ if ev.index > i { // push?
+ if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) {
+ break
+ }
+ pop := ev.index
+ if events[pop].typ&mask == 0 {
+ // Subtree does not contain types: skip.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+ }
+}
+
+// Inspect visits the nodes of the subtree represented by c in
+// depth-first order. It calls f(n) for each node n before it
+// visits n's children. If f returns true, Inspect invokes f
+// recursively for each of the non-nil children of the node.
+//
+// Each node is represented by a Cursor that allows access to the
+// Node, but may also be used to start a new traversal, or to obtain
+// the stack of nodes enclosing the cursor.
+//
+// The complete traversal sequence is determined by [ast.Inspect].
+// The types argument, if non-empty, enables type-based filtering of
+// events. The function f if is called only for nodes whose type
+// matches an element of the types slice.
+func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) {
+ mask := maskOf(types)
+ events := c.in.events
+ for i, limit := c.indices(); i < limit; {
+ ev := events[i]
+ if ev.index > i {
+ // push
+ pop := ev.index
+ if ev.typ&mask != 0 && !f(Cursor{c.in, i}) ||
+ events[pop].typ&mask == 0 {
+ // The user opted not to descend, or the
+ // subtree does not contain types:
+ // skip past the pop.
+ i = pop + 1
+ continue
+ }
+ }
+ i++
+ }
+}
+
+// Enclosing returns an iterator over the nodes enclosing the current
+// current node, starting with the Cursor itself.
+//
+// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil).
+//
+// The types argument, if non-empty, enables type-based filtering of
+// events: the sequence includes only enclosing nodes whose type
+// matches an element of the types slice.
+func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] {
+ if c.index < 0 {
+ panic("Cursor.Enclosing called on Root node")
+ }
+
+ mask := maskOf(types)
+
+ return func(yield func(Cursor) bool) {
+ events := c.in.events
+ for i := c.index; i >= 0; i = events[i].parent {
+ if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) {
+ break
+ }
+ }
+ }
+}
+
+// Parent returns the parent of the current node.
+//
+// Parent must not be called on the Root node (whose [Cursor.Node] returns nil).
+func (c Cursor) Parent() Cursor {
+ if c.index < 0 {
+ panic("Cursor.Parent called on Root node")
+ }
+
+ return Cursor{c.in, c.in.events[c.index].parent}
+}
+
+// ParentEdge returns the identity of the field in the parent node
+// that holds this cursor's node, and if it is a list, the index within it.
+//
+// For example, f(x, y) is a CallExpr whose three children are Idents.
+// f has edge kind [edge.CallExpr_Fun] and index -1.
+// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively.
+//
+// If called on a child of the Root node, it returns ([edge.Invalid], -1).
+//
+// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil).
+func (c Cursor) ParentEdge() (edge.Kind, int) {
+ if c.index < 0 {
+ panic("Cursor.ParentEdge called on Root node")
+ }
+ events := c.in.events
+ pop := events[c.index].index
+ return unpackEdgeKindAndIndex(events[pop].parent)
+}
+
+// ChildAt returns the cursor for the child of the
+// current node identified by its edge and index.
+// The index must be -1 if the edge.Kind is not a slice.
+// The indicated child node must exist.
+//
+// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil).
+//
+// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c.
+func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor {
+ target := packEdgeKindAndIndex(k, idx)
+
+ // Unfortunately there's no shortcut to looping.
+ events := c.in.events
+ i := c.index + 1
+ for {
+ pop := events[i].index
+ if pop < i {
+ break
+ }
+ if events[pop].parent == target {
+ return Cursor{c.in, i}
+ }
+ i = pop + 1
+ }
+ panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c))
+}
+
+// Child returns the cursor for n, which must be a direct child of c's Node.
+//
+// Child must not be called on the Root node (whose [Cursor.Node] returns nil).
+func (c Cursor) Child(n ast.Node) Cursor {
+ if c.index < 0 {
+ panic("Cursor.Child called on Root node")
+ }
+
+ if false {
+ // reference implementation
+ for child := range c.Children() {
+ if child.Node() == n {
+ return child
+ }
+ }
+
+ } else {
+ // optimized implementation
+ events := c.in.events
+ for i := c.index + 1; events[i].index > i; i = events[i].index + 1 {
+ if events[i].node == n {
+ return Cursor{c.in, i}
+ }
+ }
+ }
+ panic(fmt.Sprintf("Child(%T): not a child of %v", n, c))
+}
+
+// NextSibling returns the cursor for the next sibling node in the same list
+// (for example, of files, decls, specs, statements, fields, or expressions) as
+// the current node. It returns (zero, false) if the node is the last node in
+// the list, or is not part of a list.
+//
+// NextSibling must not be called on the Root node.
+//
+// See note at [Cursor.Children].
+func (c Cursor) NextSibling() (Cursor, bool) {
+ if c.index < 0 {
+ panic("Cursor.NextSibling called on Root node")
+ }
+
+ events := c.in.events
+ i := events[c.index].index + 1 // after corresponding pop
+ if i < int32(len(events)) {
+ if events[i].index > i { // push?
+ return Cursor{c.in, i}, true
+ }
+ }
+ return Cursor{}, false
+}
+
+// PrevSibling returns the cursor for the previous sibling node in the
+// same list (for example, of files, decls, specs, statements, fields,
+// or expressions) as the current node. It returns zero if the node is
+// the first node in the list, or is not part of a list.
+//
+// It must not be called on the Root node.
+//
+// See note at [Cursor.Children].
+func (c Cursor) PrevSibling() (Cursor, bool) {
+ if c.index < 0 {
+ panic("Cursor.PrevSibling called on Root node")
+ }
+
+ events := c.in.events
+ i := c.index - 1
+ if i >= 0 {
+ if j := events[i].index; j < i { // pop?
+ return Cursor{c.in, j}, true
+ }
+ }
+ return Cursor{}, false
+}
+
+// FirstChild returns the first direct child of the current node,
+// or zero if it has no children.
+func (c Cursor) FirstChild() (Cursor, bool) {
+ events := c.in.events
+ i := c.index + 1 // i=0 if c is root
+ if i < int32(len(events)) && events[i].index > i { // push?
+ return Cursor{c.in, i}, true
+ }
+ return Cursor{}, false
+}
+
+// LastChild returns the last direct child of the current node,
+// or zero if it has no children.
+func (c Cursor) LastChild() (Cursor, bool) {
+ events := c.in.events
+ if c.index < 0 { // root?
+ if len(events) > 0 {
+ // return push of final event (a pop)
+ return Cursor{c.in, events[len(events)-1].index}, true
+ }
+ } else {
+ j := events[c.index].index - 1 // before corresponding pop
+ // Inv: j == c.index if c has no children
+ // or j is last child's pop.
+ if j > c.index { // c has children
+ return Cursor{c.in, events[j].index}, true
+ }
+ }
+ return Cursor{}, false
+}
+
+// Children returns an iterator over the direct children of the
+// current node, if any.
+//
+// When using Children, NextChild, and PrevChild, bear in mind that a
+// Node's children may come from different fields, some of which may
+// be lists of nodes without a distinguished intervening container
+// such as [ast.BlockStmt].
+//
+// For example, [ast.CaseClause] has a field List of expressions and a
+// field Body of statements, so the children of a CaseClause are a mix
+// of expressions and statements. Other nodes that have "uncontained"
+// list fields include:
+//
+// - [ast.ValueSpec] (Names, Values)
+// - [ast.CompositeLit] (Type, Elts)
+// - [ast.IndexListExpr] (X, Indices)
+// - [ast.CallExpr] (Fun, Args)
+// - [ast.AssignStmt] (Lhs, Rhs)
+//
+// So, do not assume that the previous sibling of an ast.Stmt is also
+// an ast.Stmt, or if it is, that they are executed sequentially,
+// unless you have established that, say, its parent is a BlockStmt
+// or its [Cursor.ParentEdge] is [edge.BlockStmt_List].
+// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1,
+// even though they are not executed in sequence.
+func (c Cursor) Children() iter.Seq[Cursor] {
+ return func(yield func(Cursor) bool) {
+ c, ok := c.FirstChild()
+ for ok && yield(c) {
+ c, ok = c.NextSibling()
+ }
+ }
+}
+
+// Contains reports whether c contains or is equal to c2.
+//
+// Both Cursors must belong to the same [Inspector];
+// neither may be its Root node.
+func (c Cursor) Contains(c2 Cursor) bool {
+ if c.in != c2.in {
+ panic("different inspectors")
+ }
+ events := c.in.events
+ return c.index <= c2.index && events[c2.index].index <= events[c.index].index
+}
+
+// FindNode returns the cursor for node n if it belongs to the subtree
+// rooted at c. It returns zero if n is not found.
+func (c Cursor) FindNode(n ast.Node) (Cursor, bool) {
+
+ // FindNode is equivalent to this code,
+ // but more convenient and 15-20% faster:
+ if false {
+ for candidate := range c.Preorder(n) {
+ if candidate.Node() == n {
+ return candidate, true
+ }
+ }
+ return Cursor{}, false
+ }
+
+ // TODO(adonovan): opt: should we assume Node.Pos is accurate
+ // and combine type-based filtering with position filtering
+ // like FindByPos?
+
+ mask := maskOf([]ast.Node{n})
+ events := c.in.events
+
+ for i, limit := c.indices(); i < limit; i++ {
+ ev := events[i]
+ if ev.index > i { // push?
+ if ev.typ&mask != 0 && ev.node == n {
+ return Cursor{c.in, i}, true
+ }
+ pop := ev.index
+ if events[pop].typ&mask == 0 {
+ // Subtree does not contain type of n: skip.
+ i = pop
+ }
+ }
+ }
+ return Cursor{}, false
+}
+
+// FindByPos returns the cursor for the innermost node n in the tree
+// rooted at c such that n.Pos() <= start && end <= n.End().
+// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.)
+//
+// It returns zero if none is found.
+// Precondition: start <= end.
+//
+// See also [astutil.PathEnclosingInterval], which
+// tolerates adjoining whitespace.
+func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) {
+ if end < start {
+ panic("end < start")
+ }
+ events := c.in.events
+
+ // This algorithm could be implemented using c.Inspect,
+ // but it is about 2.5x slower.
+
+ best := int32(-1) // push index of latest (=innermost) node containing range
+ for i, limit := c.indices(); i < limit; i++ {
+ ev := events[i]
+ if ev.index > i { // push?
+ n := ev.node
+ var nodeEnd token.Pos
+ if file, ok := n.(*ast.File); ok {
+ nodeEnd = file.FileEnd
+ // Note: files may be out of Pos order.
+ if file.FileStart > start {
+ i = ev.index // disjoint, after; skip to next file
+ continue
+ }
+ } else {
+ nodeEnd = n.End()
+ if n.Pos() > start {
+ break // disjoint, after; stop
+ }
+ }
+ // Inv: node.{Pos,FileStart} <= start
+ if end <= nodeEnd {
+ // node fully contains target range
+ best = i
+ } else if nodeEnd < start {
+ i = ev.index // disjoint, before; skip forward
+ }
+ }
+ }
+ if best >= 0 {
+ return Cursor{c.in, best}, true
+ }
+ return Cursor{}, false
+}
diff --git a/operator/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/operator/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
index 958cf38d..a703cdfc 100644
--- a/operator/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ b/operator/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -10,12 +10,22 @@
// builds a list of push/pop events and their node type. Subsequent
// method calls that request a traversal scan this list, rather than walk
// the AST, and perform type filtering using efficient bit sets.
+// This representation is sometimes called a "balanced parenthesis tree."
//
// Experiments suggest the inspector's traversals are about 2.5x faster
-// than ast.Inspect, but it may take around 5 traversals for this
+// than [ast.Inspect], but it may take around 5 traversals for this
// benefit to amortize the inspector's construction cost.
// If efficiency is the primary concern, do not use Inspector for
// one-off traversals.
+//
+// The [Cursor] type provides a more flexible API for efficient
+// navigation of syntax trees in all four "cardinal directions". For
+// example, traversals may be nested, so you can find each node of
+// type A and then search within it for nodes of type B. Or you can
+// traverse from a node to its immediate neighbors: its parent, its
+// previous and next sibling, or its first and last child. We
+// recommend using methods of Cursor in preference to Inspector where
+// possible.
package inspector
// There are four orthogonal features in a traversal:
@@ -36,6 +46,8 @@ package inspector
import (
"go/ast"
+
+ "golang.org/x/tools/go/ast/edge"
)
// An Inspector provides methods for inspecting
@@ -44,6 +56,19 @@ type Inspector struct {
events []event
}
+func packEdgeKindAndIndex(ek edge.Kind, index int) int32 {
+ return int32(uint32(index+1)<<7 | uint32(ek))
+}
+
+// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within
+// an []ast.Node slice) from the parent field of a pop event.
+func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) {
+ // The "parent" field of a pop node holds the
+ // edge Kind in the lower 7 bits and the index+1
+ // in the upper 25.
+ return edge.Kind(x & 0x7f), int(x>>7) - 1
+}
+
// New returns an Inspector for the specified syntax trees.
func New(files []*ast.File) *Inspector {
return &Inspector{traverse(files)}
@@ -52,22 +77,29 @@ func New(files []*ast.File) *Inspector {
// An event represents a push or a pop
// of an ast.Node during a traversal.
type event struct {
- node ast.Node
- typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
- index int // index of corresponding push or pop event
+ node ast.Node
+ typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
+ index int32 // index of corresponding push or pop event
+ parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only)
}
// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
// Type can be recovered from the sole bit in typ.
+// [Tried this, wasn't faster. --adonovan]
// Preorder visits all the nodes of the files supplied to New in
// depth-first order. It calls f(n) for each node n before it visits
// n's children.
//
-// The complete traversal sequence is determined by ast.Inspect.
+// The complete traversal sequence is determined by [ast.Inspect].
// The types argument, if non-empty, enables type-based filtering of
// events. The function f is called only for nodes whose type
// matches an element of the types slice.
+//
+// The [Cursor.Preorder] method provides a richer alternative interface.
+// Example:
+//
+// for c := range in.Root().Preorder(types) { ... }
func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// Because it avoids postorder calls to f, and the pruning
// check, Preorder is almost twice as fast as Nodes. The two
@@ -83,7 +115,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// })
mask := maskOf(types)
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -107,13 +139,21 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// of the non-nil children of the node, followed by a call of
// f(n, false).
//
-// The complete traversal sequence is determined by ast.Inspect.
+// The complete traversal sequence is determined by [ast.Inspect].
// The types argument, if non-empty, enables type-based filtering of
// events. The function f if is called only for nodes whose type
// matches an element of the types slice.
+//
+// The [Cursor.Inspect] method provides a richer alternative interface.
+// Example:
+//
+// in.Root().Inspect(types, func(c Cursor) bool {
+// ...
+// return true
+// }
func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
mask := maskOf(types)
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -144,10 +184,19 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc
// supplies each call to f an additional argument, the current
// traversal stack. The stack's first element is the outermost node,
// an *ast.File; its last is the innermost, n.
+//
+// The [Cursor.Inspect] method provides a richer alternative interface.
+// Example:
+//
+// in.Root().Inspect(types, func(c Cursor) bool {
+// stack := slices.Collect(c.Enclosing())
+// ...
+// return true
+// })
func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
mask := maskOf(types)
var stack []ast.Node
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -189,43 +238,74 @@ func traverse(files []*ast.File) []event {
extent += int(f.End() - f.Pos())
}
// This estimate is based on the net/http package.
- capacity := extent * 33 / 100
- if capacity > 1e6 {
- capacity = 1e6 // impose some reasonable maximum
+ capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M)
+
+ v := &visitor{
+ events: make([]event, 0, capacity),
+ stack: []item{{index: -1}}, // include an extra event so file nodes have a parent
+ }
+ for _, file := range files {
+ walk(v, edge.Invalid, -1, file)
}
- events := make([]event, 0, capacity)
+ return v.events
+}
- var stack []event
- stack = append(stack, event{}) // include an extra event so file nodes have a parent
- for _, f := range files {
- ast.Inspect(f, func(n ast.Node) bool {
- if n != nil {
- // push
- ev := event{
- node: n,
- typ: 0, // temporarily used to accumulate type bits of subtree
- index: len(events), // push event temporarily holds own index
- }
- stack = append(stack, ev)
- events = append(events, ev)
- } else {
- // pop
- top := len(stack) - 1
- ev := stack[top]
- typ := typeOf(ev.node)
- push := ev.index
- parent := top - 1
-
- events[push].typ = typ // set type of push
- stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
- events[push].index = len(events) // make push refer to pop
-
- stack = stack[:top]
- events = append(events, ev)
- }
- return true
- })
+type visitor struct {
+ events []event
+ stack []item
+}
+
+type item struct {
+ index int32 // index of current node's push event
+ parentIndex int32 // index of parent node's push event
+ typAccum uint64 // accumulated type bits of current node's descendants
+ edgeKindAndIndex int32 // edge.Kind and index, bit packed
+}
+
+func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) {
+ var (
+ index = int32(len(v.events))
+ parentIndex = v.stack[len(v.stack)-1].index
+ )
+ v.events = append(v.events, event{
+ node: node,
+ parent: parentIndex,
+ typ: typeOf(node),
+ index: 0, // (pop index is set later by visitor.pop)
+ })
+ v.stack = append(v.stack, item{
+ index: index,
+ parentIndex: parentIndex,
+ edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex),
+ })
+
+ // 2B nodes ought to be enough for anyone!
+ if int32(len(v.events)) < 0 {
+ panic("event index exceeded int32")
}
- return events
+ // 32M elements in an []ast.Node ought to be enough for anyone!
+ if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex {
+ panic("Node slice index exceeded uint25")
+ }
+}
+
+func (v *visitor) pop(node ast.Node) {
+ top := len(v.stack) - 1
+ current := v.stack[top]
+
+ push := &v.events[current.index]
+ parent := &v.stack[top-1]
+
+ push.index = int32(len(v.events)) // make push event refer to pop
+ parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent
+
+ v.stack = v.stack[:top]
+
+ v.events = append(v.events, event{
+ node: node,
+ typ: current.typAccum,
+ index: current.index,
+ parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex]
+ })
}
diff --git a/operator/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/operator/vendor/golang.org/x/tools/go/ast/inspector/iter.go
index b7e95911..c576dc70 100644
--- a/operator/vendor/golang.org/x/tools/go/ast/inspector/iter.go
+++ b/operator/vendor/golang.org/x/tools/go/ast/inspector/iter.go
@@ -26,7 +26,7 @@ func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
return func(yield func(ast.Node) bool) {
mask := maskOf(types)
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -63,7 +63,7 @@ func All[N interface {
mask := typeOf((N)(nil))
return func(yield func(N) bool) {
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
diff --git a/operator/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/operator/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index 2a872f89..9852331a 100644
--- a/operator/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/operator/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -216,7 +216,7 @@ func typeOf(n ast.Node) uint64 {
}
func maskOf(nodes []ast.Node) uint64 {
- if nodes == nil {
+ if len(nodes) == 0 {
return math.MaxUint64 // match all node types
}
var mask uint64
diff --git a/operator/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/operator/vendor/golang.org/x/tools/go/ast/inspector/walk.go
new file mode 100644
index 00000000..5f1c93c8
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/ast/inspector/walk.go
@@ -0,0 +1,341 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+// This file is a fork of ast.Inspect to reduce unnecessary dynamic
+// calls and to gather edge information.
+//
+// Consistency with the original is ensured by TestInspectAllNodes.
+
+import (
+ "fmt"
+ "go/ast"
+
+ "golang.org/x/tools/go/ast/edge"
+)
+
+func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) {
+ for i, node := range list {
+ walk(v, ek, i, node)
+ }
+}
+
+func walk(v *visitor, ek edge.Kind, index int, node ast.Node) {
+ v.push(ek, index, node)
+
+ // walk children
+ // (the order of the cases matches the order
+ // of the corresponding node types in ast.go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ walkList(v, edge.CommentGroup_List, n.List)
+
+ case *ast.Field:
+ if n.Doc != nil {
+ walk(v, edge.Field_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.Field_Names, n.Names)
+ if n.Type != nil {
+ walk(v, edge.Field_Type, -1, n.Type)
+ }
+ if n.Tag != nil {
+ walk(v, edge.Field_Tag, -1, n.Tag)
+ }
+ if n.Comment != nil {
+ walk(v, edge.Field_Comment, -1, n.Comment)
+ }
+
+ case *ast.FieldList:
+ walkList(v, edge.FieldList_List, n.List)
+
+ // Expressions
+ case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Ellipsis:
+ if n.Elt != nil {
+ walk(v, edge.Ellipsis_Elt, -1, n.Elt)
+ }
+
+ case *ast.FuncLit:
+ walk(v, edge.FuncLit_Type, -1, n.Type)
+ walk(v, edge.FuncLit_Body, -1, n.Body)
+
+ case *ast.CompositeLit:
+ if n.Type != nil {
+ walk(v, edge.CompositeLit_Type, -1, n.Type)
+ }
+ walkList(v, edge.CompositeLit_Elts, n.Elts)
+
+ case *ast.ParenExpr:
+ walk(v, edge.ParenExpr_X, -1, n.X)
+
+ case *ast.SelectorExpr:
+ walk(v, edge.SelectorExpr_X, -1, n.X)
+ walk(v, edge.SelectorExpr_Sel, -1, n.Sel)
+
+ case *ast.IndexExpr:
+ walk(v, edge.IndexExpr_X, -1, n.X)
+ walk(v, edge.IndexExpr_Index, -1, n.Index)
+
+ case *ast.IndexListExpr:
+ walk(v, edge.IndexListExpr_X, -1, n.X)
+ walkList(v, edge.IndexListExpr_Indices, n.Indices)
+
+ case *ast.SliceExpr:
+ walk(v, edge.SliceExpr_X, -1, n.X)
+ if n.Low != nil {
+ walk(v, edge.SliceExpr_Low, -1, n.Low)
+ }
+ if n.High != nil {
+ walk(v, edge.SliceExpr_High, -1, n.High)
+ }
+ if n.Max != nil {
+ walk(v, edge.SliceExpr_Max, -1, n.Max)
+ }
+
+ case *ast.TypeAssertExpr:
+ walk(v, edge.TypeAssertExpr_X, -1, n.X)
+ if n.Type != nil {
+ walk(v, edge.TypeAssertExpr_Type, -1, n.Type)
+ }
+
+ case *ast.CallExpr:
+ walk(v, edge.CallExpr_Fun, -1, n.Fun)
+ walkList(v, edge.CallExpr_Args, n.Args)
+
+ case *ast.StarExpr:
+ walk(v, edge.StarExpr_X, -1, n.X)
+
+ case *ast.UnaryExpr:
+ walk(v, edge.UnaryExpr_X, -1, n.X)
+
+ case *ast.BinaryExpr:
+ walk(v, edge.BinaryExpr_X, -1, n.X)
+ walk(v, edge.BinaryExpr_Y, -1, n.Y)
+
+ case *ast.KeyValueExpr:
+ walk(v, edge.KeyValueExpr_Key, -1, n.Key)
+ walk(v, edge.KeyValueExpr_Value, -1, n.Value)
+
+ // Types
+ case *ast.ArrayType:
+ if n.Len != nil {
+ walk(v, edge.ArrayType_Len, -1, n.Len)
+ }
+ walk(v, edge.ArrayType_Elt, -1, n.Elt)
+
+ case *ast.StructType:
+ walk(v, edge.StructType_Fields, -1, n.Fields)
+
+ case *ast.FuncType:
+ if n.TypeParams != nil {
+ walk(v, edge.FuncType_TypeParams, -1, n.TypeParams)
+ }
+ if n.Params != nil {
+ walk(v, edge.FuncType_Params, -1, n.Params)
+ }
+ if n.Results != nil {
+ walk(v, edge.FuncType_Results, -1, n.Results)
+ }
+
+ case *ast.InterfaceType:
+ walk(v, edge.InterfaceType_Methods, -1, n.Methods)
+
+ case *ast.MapType:
+ walk(v, edge.MapType_Key, -1, n.Key)
+ walk(v, edge.MapType_Value, -1, n.Value)
+
+ case *ast.ChanType:
+ walk(v, edge.ChanType_Value, -1, n.Value)
+
+ // Statements
+ case *ast.BadStmt:
+ // nothing to do
+
+ case *ast.DeclStmt:
+ walk(v, edge.DeclStmt_Decl, -1, n.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ walk(v, edge.LabeledStmt_Label, -1, n.Label)
+ walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt)
+
+ case *ast.ExprStmt:
+ walk(v, edge.ExprStmt_X, -1, n.X)
+
+ case *ast.SendStmt:
+ walk(v, edge.SendStmt_Chan, -1, n.Chan)
+ walk(v, edge.SendStmt_Value, -1, n.Value)
+
+ case *ast.IncDecStmt:
+ walk(v, edge.IncDecStmt_X, -1, n.X)
+
+ case *ast.AssignStmt:
+ walkList(v, edge.AssignStmt_Lhs, n.Lhs)
+ walkList(v, edge.AssignStmt_Rhs, n.Rhs)
+
+ case *ast.GoStmt:
+ walk(v, edge.GoStmt_Call, -1, n.Call)
+
+ case *ast.DeferStmt:
+ walk(v, edge.DeferStmt_Call, -1, n.Call)
+
+ case *ast.ReturnStmt:
+ walkList(v, edge.ReturnStmt_Results, n.Results)
+
+ case *ast.BranchStmt:
+ if n.Label != nil {
+ walk(v, edge.BranchStmt_Label, -1, n.Label)
+ }
+
+ case *ast.BlockStmt:
+ walkList(v, edge.BlockStmt_List, n.List)
+
+ case *ast.IfStmt:
+ if n.Init != nil {
+ walk(v, edge.IfStmt_Init, -1, n.Init)
+ }
+ walk(v, edge.IfStmt_Cond, -1, n.Cond)
+ walk(v, edge.IfStmt_Body, -1, n.Body)
+ if n.Else != nil {
+ walk(v, edge.IfStmt_Else, -1, n.Else)
+ }
+
+ case *ast.CaseClause:
+ walkList(v, edge.CaseClause_List, n.List)
+ walkList(v, edge.CaseClause_Body, n.Body)
+
+ case *ast.SwitchStmt:
+ if n.Init != nil {
+ walk(v, edge.SwitchStmt_Init, -1, n.Init)
+ }
+ if n.Tag != nil {
+ walk(v, edge.SwitchStmt_Tag, -1, n.Tag)
+ }
+ walk(v, edge.SwitchStmt_Body, -1, n.Body)
+
+ case *ast.TypeSwitchStmt:
+ if n.Init != nil {
+ walk(v, edge.TypeSwitchStmt_Init, -1, n.Init)
+ }
+ walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign)
+ walk(v, edge.TypeSwitchStmt_Body, -1, n.Body)
+
+ case *ast.CommClause:
+ if n.Comm != nil {
+ walk(v, edge.CommClause_Comm, -1, n.Comm)
+ }
+ walkList(v, edge.CommClause_Body, n.Body)
+
+ case *ast.SelectStmt:
+ walk(v, edge.SelectStmt_Body, -1, n.Body)
+
+ case *ast.ForStmt:
+ if n.Init != nil {
+ walk(v, edge.ForStmt_Init, -1, n.Init)
+ }
+ if n.Cond != nil {
+ walk(v, edge.ForStmt_Cond, -1, n.Cond)
+ }
+ if n.Post != nil {
+ walk(v, edge.ForStmt_Post, -1, n.Post)
+ }
+ walk(v, edge.ForStmt_Body, -1, n.Body)
+
+ case *ast.RangeStmt:
+ if n.Key != nil {
+ walk(v, edge.RangeStmt_Key, -1, n.Key)
+ }
+ if n.Value != nil {
+ walk(v, edge.RangeStmt_Value, -1, n.Value)
+ }
+ walk(v, edge.RangeStmt_X, -1, n.X)
+ walk(v, edge.RangeStmt_Body, -1, n.Body)
+
+ // Declarations
+ case *ast.ImportSpec:
+ if n.Doc != nil {
+ walk(v, edge.ImportSpec_Doc, -1, n.Doc)
+ }
+ if n.Name != nil {
+ walk(v, edge.ImportSpec_Name, -1, n.Name)
+ }
+ walk(v, edge.ImportSpec_Path, -1, n.Path)
+ if n.Comment != nil {
+ walk(v, edge.ImportSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.ValueSpec:
+ if n.Doc != nil {
+ walk(v, edge.ValueSpec_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.ValueSpec_Names, n.Names)
+ if n.Type != nil {
+ walk(v, edge.ValueSpec_Type, -1, n.Type)
+ }
+ walkList(v, edge.ValueSpec_Values, n.Values)
+ if n.Comment != nil {
+ walk(v, edge.ValueSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.TypeSpec:
+ if n.Doc != nil {
+ walk(v, edge.TypeSpec_Doc, -1, n.Doc)
+ }
+ walk(v, edge.TypeSpec_Name, -1, n.Name)
+ if n.TypeParams != nil {
+ walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams)
+ }
+ walk(v, edge.TypeSpec_Type, -1, n.Type)
+ if n.Comment != nil {
+ walk(v, edge.TypeSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.GenDecl:
+ if n.Doc != nil {
+ walk(v, edge.GenDecl_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.GenDecl_Specs, n.Specs)
+
+ case *ast.FuncDecl:
+ if n.Doc != nil {
+ walk(v, edge.FuncDecl_Doc, -1, n.Doc)
+ }
+ if n.Recv != nil {
+ walk(v, edge.FuncDecl_Recv, -1, n.Recv)
+ }
+ walk(v, edge.FuncDecl_Name, -1, n.Name)
+ walk(v, edge.FuncDecl_Type, -1, n.Type)
+ if n.Body != nil {
+ walk(v, edge.FuncDecl_Body, -1, n.Body)
+ }
+
+ case *ast.File:
+ if n.Doc != nil {
+ walk(v, edge.File_Doc, -1, n.Doc)
+ }
+ walk(v, edge.File_Name, -1, n.Name)
+ walkList(v, edge.File_Decls, n.Decls)
+ // don't walk n.Comments - they have been
+ // visited already through the individual
+ // nodes
+
+ default:
+ // (includes *ast.Package)
+ panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+ }
+
+ v.pop(node)
+}
diff --git a/operator/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/operator/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
new file mode 100644
index 00000000..7b90bc92
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -0,0 +1,236 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gcexportdata provides functions for reading and writing
+// export data, which is a serialized description of the API of a Go
+// package including the names, kinds, types, and locations of all
+// exported declarations.
+//
+// The standard Go compiler (cmd/compile) writes an export data file
+// for each package it compiles, which it later reads when compiling
+// packages that import the earlier one. The compiler must thus
+// contain logic to both write and read export data.
+// (See the "Export" section in the cmd/compile/README file.)
+//
+// The [Read] function in this package can read files produced by the
+// compiler, producing [go/types] data structures. As a matter of
+// policy, Read supports export data files produced by only the last
+// two Go releases plus tip; see https://go.dev/issue/68898. The
+// export data files produced by the compiler contain additional
+// details related to generics, inlining, and other optimizations that
+// cannot be decoded by the [Read] function.
+//
+// In files written by the compiler, the export data is not at the
+// start of the file. Before calling Read, use [NewReader] to locate
+// the desired portion of the file.
+//
+// The [Write] function in this package encodes the exported API of a
+// Go package ([types.Package]) as a file. Such files can be later
+// decoded by Read, but cannot be consumed by the compiler.
+//
+// # Future changes
+//
+// Although Read supports the formats written by both Write and the
+// compiler, the two are quite different, and there is an open
+// proposal (https://go.dev/issue/69491) to separate these APIs.
+//
+// Under that proposal, this package would ultimately provide only the
+// Read operation for compiler export data, which must be defined in
+// this module (golang.org/x/tools), not in the standard library, to
+// avoid version skew for developer tools that need to read compiler
+// export data both before and after a Go release, such as from Go
+// 1.23 to Go 1.24. Because this package lives in the tools module,
+// clients can update their version of the module some time before the
+// Go 1.24 release and rebuild and redeploy their tools, which will
+// then be able to consume both Go 1.23 and Go 1.24 export data files,
+// so they will work before and after the Go update. (See discussion
+// at https://go.dev/issue/15651.)
+//
+// The operations to import and export [go/types] data structures
+// would be defined in the go/types package as Import and Export.
+// [Write] would (eventually) delegate to Export,
+// and [Read], when it detects a file produced by Export,
+// would delegate to Import.
+//
+// # Deprecations
+//
+// The [NewImporter] and [Find] functions are deprecated and should
+// not be used in new code. The [WriteBundle] and [ReadBundle]
+// functions are experimental, and there is an open proposal to
+// deprecate them (https://go.dev/issue/69573).
+package gcexportdata
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "os/exec"
+
+ "golang.org/x/tools/internal/gcimporter"
+)
+
+// Find returns the name of an object (.o) or archive (.a) file
+// containing type information for the specified import path,
+// using the go command.
+// If no file was found, an empty filename is returned.
+//
+// A relative srcDir is interpreted relative to the current working directory.
+//
+// Find also returns the package's resolved (canonical) import path,
+// reflecting the effects of srcDir and vendoring on importPath.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
+func Find(importPath, srcDir string) (filename, path string) {
+ cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
+ cmd.Dir = srcDir
+ out, err := cmd.Output()
+ if err != nil {
+ return "", ""
+ }
+ var data struct {
+ ImportPath string
+ Export string
+ }
+ json.Unmarshal(out, &data)
+ return data.Export, data.ImportPath
+}
+
+// NewReader returns a reader for the export data section of an object
+// (.o) or archive (.a) file read from r. The new reader may provide
+// additional trailing data beyond the end of the export data.
+func NewReader(r io.Reader) (io.Reader, error) {
+ buf := bufio.NewReader(r)
+ size, err := gcimporter.FindExportData(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ // We were given an archive and found the __.PKGDEF in it.
+ // This tells us the size of the export data, and we don't
+ // need to return the entire file.
+ return &io.LimitedReader{
+ R: buf,
+ N: size,
+ }, nil
+}
+
+// readAll works the same way as io.ReadAll, but avoids allocations and copies
+// by preallocating a byte slice of the necessary size if the size is known up
+// front. This is always possible when the input is an archive. In that case,
+// NewReader will return the known size using an io.LimitedReader.
+func readAll(r io.Reader) ([]byte, error) {
+ if lr, ok := r.(*io.LimitedReader); ok {
+ data := make([]byte, lr.N)
+ _, err := io.ReadFull(lr, data)
+ return data, err
+ }
+ return io.ReadAll(r)
+}
+
+// Read reads export data from in, decodes it, and returns type
+// information for the package.
+//
+// Read is capable of reading export data produced by [Write] at the
+// same source code version, or by the last two Go releases (plus tip)
+// of the standard Go compiler. Reading files from older compilers may
+// produce an error.
+//
+// The package path (effectively its linker symbol prefix) is
+// specified by path, since unlike the package name, this information
+// may not be recorded in the export data.
+//
+// File position information is added to fset.
+//
+// Read may inspect and add to the imports map to ensure that references
+// within the export data to other packages are consistent. The caller
+// must ensure that imports[path] does not exist, or exists but is
+// incomplete (see types.Package.Complete), and Read inserts the
+// resulting package into this map entry.
+//
+// On return, the state of the reader is undefined.
+func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
+ data, err := readAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export data for %q: %v", path, err)
+ }
+
+ if bytes.HasPrefix(data, []byte("!")) {
+ return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
+ }
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 {
+ switch data[0] {
+ case 'v', 'c', 'd':
+ // binary, produced by cmd/compile till go1.10
+ return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
+
+ case 'i':
+ // indexed, produced by cmd/compile till go1.19,
+ // and also by [Write].
+ //
+ // If proposal #69491 is accepted, go/types
+ // serialization will be implemented by
+ // types.Export, to which Write would eventually
+ // delegate (explicitly dropping any pretence at
+ // inter-version Write-Read compatibility).
+ // This [Read] function would delegate to types.Import
+ // when it detects that the file was produced by Export.
+ _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ case 'u':
+ // unified, produced by cmd/compile since go1.20
+ _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
+ return pkg, err
+
+ default:
+ l := min(len(data), 10)
+ return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
+ }
+ }
+ return nil, fmt.Errorf("empty export data for %s", path)
+}
+
+// Write writes encoded type information for the specified package to out.
+// The FileSet provides file position information for named objects.
+func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ if _, err := io.WriteString(out, "i"); err != nil {
+ return err
+ }
+ return gcimporter.IExportData(out, fset, pkg)
+}
+
+// ReadBundle reads an export bundle from in, decodes it, and returns type
+// information for the packages.
+// File position information is added to fset.
+//
+// ReadBundle may inspect and add to the imports map to ensure that references
+// within the export bundle to other packages are consistent.
+//
+// On return, the state of the reader is undefined.
+//
+// Experimental: This API is experimental and may change in the future.
+func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
+ data, err := readAll(in)
+ if err != nil {
+ return nil, fmt.Errorf("reading export bundle: %v", err)
+ }
+ return gcimporter.IImportBundle(fset, imports, data)
+}
+
+// WriteBundle writes encoded type information for the specified packages to out.
+// The FileSet provides file position information for named objects.
+//
+// Experimental: This API is experimental and may change in the future.
+func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ return gcimporter.IExportBundle(out, fset, pkgs)
+}
diff --git a/operator/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/operator/vendor/golang.org/x/tools/go/gcexportdata/importer.go
new file mode 100644
index 00000000..37a7247e
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/gcexportdata/importer.go
@@ -0,0 +1,75 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcexportdata
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "os"
+)
+
+// NewImporter returns a new instance of the types.Importer interface
+// that reads type information from export data files written by gc.
+// The Importer also satisfies types.ImporterFrom.
+//
+// Export data files are located using "go build" workspace conventions
+// and the build.Default context.
+//
+// Use this importer instead of go/importer.For("gc", ...) to avoid the
+// version-skew problems described in the documentation of this package,
+// or to control the FileSet or access the imports map populated during
+// package loading.
+//
+// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
+// which is more efficient.
+func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
+ return importer{fset, imports}
+}
+
+type importer struct {
+ fset *token.FileSet
+ imports map[string]*types.Package
+}
+
+func (imp importer) Import(importPath string) (*types.Package, error) {
+ return imp.ImportFrom(importPath, "", 0)
+}
+
+func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
+ filename, path := Find(importPath, srcDir)
+ if filename == "" {
+ if importPath == "unsafe" {
+ // Even for unsafe, call Find first in case
+ // the package was vendored.
+ return types.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %s", importPath)
+ }
+
+ if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
+ return pkg, nil // cache hit
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ f.Close()
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("reading export data: %s: %v", filename, err)
+ }
+ }()
+
+ r, err := NewReader(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return Read(r, imp.fset, imp.imports, path)
+}
diff --git a/operator/vendor/golang.org/x/tools/go/packages/doc.go b/operator/vendor/golang.org/x/tools/go/packages/doc.go
new file mode 100644
index 00000000..366aab6b
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/doc.go
@@ -0,0 +1,253 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package packages loads Go packages for inspection and analysis.
+
+The [Load] function takes as input a list of patterns and returns a
+list of [Package] values describing individual packages matched by those
+patterns.
+A [Config] specifies configuration options, the most important of which is
+the [LoadMode], which controls the amount of detail in the loaded packages.
+
+Load passes most patterns directly to the underlying build tool.
+The default build tool is the go command.
+Its supported patterns are described at
+https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
+Other build systems may be supported by providing a "driver";
+see [The driver protocol].
+
+All patterns with the prefix "query=", where query is a
+non-empty string of letters from [a-z], are reserved and may be
+interpreted as query operators.
+
+Two query operators are currently supported: "file" and "pattern".
+
+The query "file=path/to/file.go" matches the package or packages enclosing
+the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
+might return the packages "fmt" and "fmt [fmt.test]".
+
+The query "pattern=string" causes "string" to be passed directly to
+the underlying build tool. In most cases this is unnecessary,
+but an application can use Load("pattern=" + x) as an escaping mechanism
+to ensure that x is not interpreted as a query operator if it contains '='.
+
+All other query operators are reserved for future use and currently
+cause Load to report an error.
+
+The Package struct provides basic information about the package, including
+
+ - ID, a unique identifier for the package in the returned set;
+ - GoFiles, the names of the package's Go source files;
+ - Imports, a map from source import strings to the Packages they name;
+ - Types, the type information for the package's exported symbols;
+ - Syntax, the parsed syntax trees for the package's source code; and
+ - TypesInfo, the result of a complete type-check of the package syntax trees.
+
+(See the documentation for type Package for the complete list of fields
+and more detailed descriptions.)
+
+For example,
+
+ Load(nil, "bytes", "unicode...")
+
+returns four Package structs describing the standard library packages
+bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
+can match multiple packages and that a package might be matched by
+multiple patterns: in general it is not possible to determine which
+packages correspond to which patterns.
+
+Note that the list returned by Load contains only the packages matched
+by the patterns. Their dependencies can be found by walking the import
+graph using the Imports fields.
+
+The Load function can be configured by passing a pointer to a Config as
+the first argument. A nil Config is equivalent to the zero Config, which
+causes Load to run in [LoadFiles] mode, collecting minimal information.
+See the documentation for type Config for details.
+
+As noted earlier, the Config.Mode controls the amount of detail
+reported about the loaded packages. See the documentation for type LoadMode
+for details.
+
+Most tools should pass their command-line arguments (after any flags)
+uninterpreted to Load, so that it can interpret them
+according to the conventions of the underlying build system.
+
+See the Example function for typical usage.
+See also [golang.org/x/tools/go/packages/internal/linecount]
+for an example application.
+
+# The driver protocol
+
+Load may be used to load Go packages even in Go projects that use
+alternative build systems, by installing an appropriate "driver"
+program for the build system and specifying its location in the
+GOPACKAGESDRIVER environment variable.
+For example,
+https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
+explains how to use the driver for Bazel.
+
+The driver program is responsible for interpreting patterns in its
+preferred notation and reporting information about the packages that
+those patterns identify. Drivers must also support the special "file="
+and "pattern=" patterns described above.
+
+The patterns are provided as positional command-line arguments. A
+JSON-encoded [DriverRequest] message providing additional information
+is written to the driver's standard input. The driver must write a
+JSON-encoded [DriverResponse] message to its standard output. (This
+message differs from the JSON schema produced by 'go list'.)
+
+The value of the PWD environment variable seen by the driver process
+is the preferred name of its working directory. (The working directory
+may have other aliases due to symbolic links; see the comment on the
+Dir field of [exec.Cmd] for related information.)
+When the driver process emits in its response the name of a file
+that is a descendant of this directory, it must use an absolute path
+that has the value of PWD as a prefix, to ensure that the returned
+filenames satisfy the original query.
+*/
+package packages // import "golang.org/x/tools/go/packages"
+
+/*
+
+Motivation and design considerations
+
+The new package's design solves problems addressed by two existing
+packages: go/build, which locates and describes packages, and
+golang.org/x/tools/go/loader, which loads, parses and type-checks them.
+The go/build.Package structure encodes too much of the 'go build' way
+of organizing projects, leaving us in need of a data type that describes a
+package of Go source code independent of the underlying build system.
+We wanted something that works equally well with go build and vgo, and
+also other build systems such as Bazel and Blaze, making it possible to
+construct analysis tools that work in all these environments.
+Tools such as errcheck and staticcheck were essentially unavailable to
+the Go community at Google, and some of Google's internal tools for Go
+are unavailable externally.
+This new package provides a uniform way to obtain package metadata by
+querying each of these build systems, optionally supporting their
+preferred command-line notations for packages, so that tools integrate
+neatly with users' build environments. The Metadata query function
+executes an external query tool appropriate to the current workspace.
+
+Loading packages always returns the complete import graph "all the way down",
+even if all you want is information about a single package, because the query
+mechanisms of all the build systems we currently support ({go,vgo} list, and
+blaze/bazel aspect-based query) cannot provide detailed information
+about one package without visiting all its dependencies too, so there is
+no additional asymptotic cost to providing transitive information.
+(This property might not be true of a hypothetical 5th build system.)
+
+In calls to TypeCheck, all initial packages, and any package that
+transitively depends on one of them, must be loaded from source.
+Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
+source; D may be loaded from export data, and E may not be loaded at all
+(though it's possible that D's export data mentions it, so a
+types.Package may be created for it and exposed.)
+
+The old loader had a feature to suppress type-checking of function
+bodies on a per-package basis, primarily intended to reduce the work of
+obtaining type information for imported packages. Now that imports are
+satisfied by export data, the optimization no longer seems necessary.
+
+Despite some early attempts, the old loader did not exploit export data,
+instead always using the equivalent of WholeProgram mode. This was due
+to the complexity of mixing source and export data packages (now
+resolved by the upward traversal mentioned above), and because export data
+files were nearly always missing or stale. Now that 'go build' supports
+caching, all the underlying build systems can guarantee to produce
+export data in a reasonable (amortized) time.
+
+Test "main" packages synthesized by the build system are now reported as
+first-class packages, avoiding the need for clients (such as go/ssa) to
+reinvent this generation logic.
+
+One way in which go/packages is simpler than the old loader is in its
+treatment of in-package tests. In-package tests are packages that
+consist of all the files of the library under test, plus the test files.
+The old loader constructed in-package tests by a two-phase process of
+mutation called "augmentation": first it would construct and type check
+all the ordinary library packages and type-check the packages that
+depend on them; then it would add more (test) files to the package and
+type-check again. This two-phase approach had four major problems:
+1) in processing the tests, the loader modified the library package,
+ leaving no way for a client application to see both the test
+ package and the library package; one would mutate into the other.
+2) because test files can declare additional methods on types defined in
+ the library portion of the package, the dispatch of method calls in
+ the library portion was affected by the presence of the test files.
+ This should have been a clue that the packages were logically
+ different.
+3) this model of "augmentation" assumed at most one in-package test
+ per library package, which is true of projects using 'go build',
+ but not other build systems.
+4) because of the two-phase nature of test processing, all packages that
+ import the library package had to be processed before augmentation,
+ forcing a "one-shot" API and preventing the client from calling Load
+ in several times in sequence as is now possible in WholeProgram mode.
+ (TypeCheck mode has a similar one-shot restriction for a different reason.)
+
+Early drafts of this package supported "multi-shot" operation.
+Although it allowed clients to make a sequence of calls (or concurrent
+calls) to Load, building up the graph of Packages incrementally,
+it was of marginal value: it complicated the API
+(since it allowed some options to vary across calls but not others),
+it complicated the implementation,
+it cannot be made to work in Types mode, as explained above,
+and it was less efficient than making one combined call (when this is possible).
+Among the clients we have inspected, none made multiple calls to load
+but could not be easily and satisfactorily modified to make only a single call.
+However, applications changes may be required.
+For example, the ssadump command loads the user-specified packages
+and in addition the runtime package. It is tempting to simply append
+"runtime" to the user-provided list, but that does not work if the user
+specified an ad-hoc package such as [a.go b.go].
+Instead, ssadump no longer requests the runtime package,
+but seeks it among the dependencies of the user-specified packages,
+and emits an error if it is not found.
+
+Questions & Tasks
+
+- Add GOARCH/GOOS?
+ They are not portable concepts, but could be made portable.
+ Our goal has been to allow users to express themselves using the conventions
+ of the underlying build system: if the build system honors GOARCH
+ during a build and during a metadata query, then so should
+ applications built atop that query mechanism.
+ Conversely, if the target architecture of the build is determined by
+ command-line flags, the application can pass the relevant
+ flags through to the build system using a command such as:
+ myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
+ However, this approach is low-level, unwieldy, and non-portable.
+ GOOS and GOARCH seem important enough to warrant a dedicated option.
+
+- How should we handle partial failures such as a mixture of good and
+ malformed patterns, existing and non-existent packages, successful and
+ failed builds, import failures, import cycles, and so on, in a call to
+ Load?
+
+- Support bazel, blaze, and go1.10 list, not just go1.11 list.
+
+- Handle (and test) various partial success cases, e.g.
+ a mixture of good packages and:
+ invalid patterns
+ nonexistent packages
+ empty packages
+ packages with malformed package or import declarations
+ unreadable files
+ import cycles
+ other parse errors
+ type errors
+ Make sure we record errors at the correct place in the graph.
+
+- Missing packages among initial arguments are not reported.
+ Return bogus packages for them, like golist does.
+
+- "undeclared name" errors (for example) are reported out of source file
+ order. I suspect this is due to the breadth-first resolution now used
+ by go/types. Is that a bug? Discuss with gri.
+
+*/
diff --git a/operator/vendor/golang.org/x/tools/go/packages/external.go b/operator/vendor/golang.org/x/tools/go/packages/external.go
new file mode 100644
index 00000000..f37bc651
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/external.go
@@ -0,0 +1,153 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// This file defines the protocol that enables an external "driver"
+// tool to supply package metadata in place of 'go list'.
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "slices"
+ "strings"
+)
+
+// DriverRequest defines the schema of a request for package metadata
+// from an external driver program. The JSON-encoded DriverRequest
+// message is provided to the driver program's standard input. The
+// query patterns are provided as command-line arguments.
+//
+// See the package documentation for an overview.
+type DriverRequest struct {
+ Mode LoadMode `json:"mode"`
+
+ // Env specifies the environment the underlying build system should be run in.
+ Env []string `json:"env"`
+
+ // BuildFlags are flags that should be passed to the underlying build system.
+ BuildFlags []string `json:"build_flags"`
+
+ // Tests specifies whether the patterns should also return test packages.
+ Tests bool `json:"tests"`
+
+ // Overlay maps file paths (relative to the driver's working directory)
+ // to the contents of overlay files (see Config.Overlay).
+ Overlay map[string][]byte `json:"overlay"`
+}
+
+// DriverResponse defines the schema of a response from an external
+// driver program, providing the results of a query for package
+// metadata. The driver program must write a JSON-encoded
+// DriverResponse message to its standard output.
+//
+// See the package documentation for an overview.
+type DriverResponse struct {
+ // NotHandled is returned if the request can't be handled by the current
+ // driver. If an external driver returns a response with NotHandled, the
+ // rest of the DriverResponse is ignored, and go/packages will fallback
+ // to the next driver. If go/packages is extended in the future to support
+ // lists of multiple drivers, go/packages will fall back to the next driver.
+ NotHandled bool
+
+ // Compiler and Arch are the arguments pass of types.SizesFor
+ // to get a types.Sizes to use when type checking.
+ Compiler string
+ Arch string
+
+ // Roots is the set of package IDs that make up the root packages.
+ // We have to encode this separately because when we encode a single package
+ // we cannot know if it is one of the roots as that requires knowledge of the
+ // graph it is part of.
+ Roots []string `json:",omitempty"`
+
+ // Packages is the full set of packages in the graph.
+ // The packages are not connected into a graph.
+ // The Imports if populated will be stubs that only have their ID set.
+ // Imports will be connected and then type and syntax information added in a
+ // later pass (see refine).
+ Packages []*Package
+
+ // GoVersion is the minor version number used by the driver
+ // (e.g. the go command on the PATH) when selecting .go files.
+ // Zero means unknown.
+ GoVersion int
+}
+
+// driver is the type for functions that query the build system for the
+// packages named by the patterns.
+type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
+
+// findExternalDriver returns the file path of a tool that supplies
+// the build system package structure, or "" if not found.
+// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
+// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
+func findExternalDriver(cfg *Config) driver {
+ const toolPrefix = "GOPACKAGESDRIVER="
+ tool := ""
+ for _, env := range cfg.Env {
+ if val, ok := strings.CutPrefix(env, toolPrefix); ok {
+ tool = val
+ }
+ }
+ if tool != "" && tool == "off" {
+ return nil
+ }
+ if tool == "" {
+ var err error
+ tool, err = exec.LookPath("gopackagesdriver")
+ if err != nil {
+ return nil
+ }
+ }
+ return func(cfg *Config, patterns []string) (*DriverResponse, error) {
+ req, err := json.Marshal(DriverRequest{
+ Mode: cfg.Mode,
+ Env: cfg.Env,
+ BuildFlags: cfg.BuildFlags,
+ Tests: cfg.Tests,
+ Overlay: cfg.Overlay,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
+ }
+
+ buf := new(bytes.Buffer)
+ stderr := new(bytes.Buffer)
+ cmd := exec.CommandContext(cfg.Context, tool, patterns...)
+ cmd.Dir = cfg.Dir
+ // The cwd gets resolved to the real path. On Darwin, where
+ // /tmp is a symlink, this breaks anything that expects the
+ // working directory to keep the original path, including the
+ // go command when dealing with modules.
+ //
+ // os.Getwd stdlib has a special feature where if the
+ // cwd and the PWD are the same node then it trusts
+ // the PWD, so by setting it in the env for the child
+ // process we fix up all the paths returned by the go
+ // command.
+ //
+ // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
+ cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
+ cmd.Stdin = bytes.NewReader(req)
+ cmd.Stdout = buf
+ cmd.Stderr = stderr
+
+ if err := cmd.Run(); err != nil {
+ return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
+ }
+ if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
+ fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
+ }
+
+ var response DriverResponse
+ if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+}
diff --git a/operator/vendor/golang.org/x/tools/go/packages/golist.go b/operator/vendor/golang.org/x/tools/go/packages/golist.go
new file mode 100644
index 00000000..680a70ca
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/golist.go
@@ -0,0 +1,1086 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+)
+
+// debug controls verbose logging.
+var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
+
+// A goTooOldError reports that the go command
+// found by exec.LookPath is too old to use the new go list behavior.
+type goTooOldError struct {
+ error
+}
+
+// responseDeduper wraps a DriverResponse, deduplicating its contents.
+type responseDeduper struct {
+ seenRoots map[string]bool
+ seenPackages map[string]*Package
+ dr *DriverResponse
+}
+
+func newDeduper() *responseDeduper {
+ return &responseDeduper{
+ dr: &DriverResponse{},
+ seenRoots: map[string]bool{},
+ seenPackages: map[string]*Package{},
+ }
+}
+
+// addAll fills in r with a DriverResponse.
+func (r *responseDeduper) addAll(dr *DriverResponse) {
+ for _, pkg := range dr.Packages {
+ r.addPackage(pkg)
+ }
+ for _, root := range dr.Roots {
+ r.addRoot(root)
+ }
+ r.dr.GoVersion = dr.GoVersion
+}
+
+func (r *responseDeduper) addPackage(p *Package) {
+ if r.seenPackages[p.ID] != nil {
+ return
+ }
+ r.seenPackages[p.ID] = p
+ r.dr.Packages = append(r.dr.Packages, p)
+}
+
+func (r *responseDeduper) addRoot(id string) {
+ if r.seenRoots[id] {
+ return
+ }
+ r.seenRoots[id] = true
+ r.dr.Roots = append(r.dr.Roots, id)
+}
+
+type golistState struct {
+ cfg *Config
+ ctx context.Context
+
+ runner *gocommand.Runner
+
+ // overlay is the JSON file that encodes the Config.Overlay
+ // mapping, used by 'go list -overlay=...'.
+ overlay string
+
+ envOnce sync.Once
+ goEnvError error
+ goEnv map[string]string
+
+ rootsOnce sync.Once
+ rootDirsError error
+ rootDirs map[string]string
+
+ goVersionOnce sync.Once
+ goVersionError error
+ goVersion int // The X in Go 1.X.
+
+ // vendorDirs caches the (non)existence of vendor directories.
+ vendorDirs map[string]bool
+}
+
+// getEnv returns Go environment variables. Only specific variables are
+// populated -- computing all of them is slow.
+func (state *golistState) getEnv() (map[string]string, error) {
+ state.envOnce.Do(func() {
+ var b *bytes.Buffer
+ b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
+ if state.goEnvError != nil {
+ return
+ }
+
+ state.goEnv = make(map[string]string)
+ decoder := json.NewDecoder(b)
+ if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
+ return
+ }
+ })
+ return state.goEnv, state.goEnvError
+}
+
+// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
+func (state *golistState) mustGetEnv() map[string]string {
+ env, err := state.getEnv()
+ if err != nil {
+ panic(fmt.Sprintf("mustGetEnv: %v", err))
+ }
+ return env
+}
+
+// goListDriver uses the go list command to interpret the patterns and produce
+// the build system package structure.
+// See driver for more details.
+//
+// overlay is the JSON file that encodes the cfg.Overlay
+// mapping, used by 'go list -overlay=...'
+func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) {
+ // Make sure that any asynchronous go commands are killed when we return.
+ parentCtx := cfg.Context
+ if parentCtx == nil {
+ parentCtx = context.Background()
+ }
+ ctx, cancel := context.WithCancel(parentCtx)
+ defer cancel()
+
+ response := newDeduper()
+
+ state := &golistState{
+ cfg: cfg,
+ ctx: ctx,
+ vendorDirs: map[string]bool{},
+ overlay: overlay,
+ runner: runner,
+ }
+
+ // Fill in response.Sizes asynchronously if necessary.
+ if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
+ errCh := make(chan error)
+ go func() {
+ compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner)
+ response.dr.Compiler = compiler
+ response.dr.Arch = arch
+ errCh <- err
+ }()
+ defer func() {
+ if sizesErr := <-errCh; sizesErr != nil {
+ err = sizesErr
+ }
+ }()
+ }
+
+ // Determine files requested in contains patterns
+ var containFiles []string
+ restPatterns := make([]string, 0, len(patterns))
+ // Extract file= and other [querytype]= patterns. Report an error if querytype
+ // doesn't exist.
+extractQueries:
+ for _, pattern := range patterns {
+ eqidx := strings.Index(pattern, "=")
+ if eqidx < 0 {
+ restPatterns = append(restPatterns, pattern)
+ } else {
+ query, value := pattern[:eqidx], pattern[eqidx+len("="):]
+ switch query {
+ case "file":
+ containFiles = append(containFiles, value)
+ case "pattern":
+ restPatterns = append(restPatterns, value)
+ case "": // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ default:
+ for _, rune := range query {
+ if rune < 'a' || rune > 'z' { // not a reserved query
+ restPatterns = append(restPatterns, pattern)
+ continue extractQueries
+ }
+ }
+ // Reject all other patterns containing "="
+ return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
+ }
+ }
+ }
+
+ // See if we have any patterns to pass through to go list. Zero initial
+ // patterns also requires a go list call, since it's the equivalent of
+ // ".".
+ if len(restPatterns) > 0 || len(patterns) == 0 {
+ dr, err := state.createDriverResponse(restPatterns...)
+ if err != nil {
+ return nil, err
+ }
+ response.addAll(dr)
+ }
+
+ if len(containFiles) != 0 {
+ if err := state.runContainsQueries(response, containFiles); err != nil {
+ return nil, err
+ }
+ }
+
+ // (We may yet return an error due to defer.)
+ return response.dr, nil
+}
+
+// abs returns an absolute representation of path, based on cfg.Dir.
+func (cfg *Config) abs(path string) (string, error) {
+ if filepath.IsAbs(path) {
+ return path, nil
+ }
+ // In case cfg.Dir is relative, pass it to filepath.Abs.
+ return filepath.Abs(filepath.Join(cfg.Dir, path))
+}
+
+func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
+ for _, query := range queries {
+ // TODO(matloob): Do only one query per directory.
+ fdir := filepath.Dir(query)
+ // Pass absolute path of directory to go list so that it knows to treat it as a directory,
+ // not a package path.
+ pattern, err := state.cfg.abs(fdir)
+ if err != nil {
+ return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
+ }
+ dirResponse, err := state.createDriverResponse(pattern)
+
+ // If there was an error loading the package, or no packages are returned,
+ // or the package is returned with errors, try to load the file as an
+ // ad-hoc package.
+ // Usually the error will appear in a returned package, but may not if we're
+ // in module mode and the ad-hoc is located outside a module.
+ if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
+ len(dirResponse.Packages[0].Errors) == 1 {
+ var queryErr error
+ if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
+ return err // return the original error
+ }
+ }
+ isRoot := make(map[string]bool, len(dirResponse.Roots))
+ for _, root := range dirResponse.Roots {
+ isRoot[root] = true
+ }
+ for _, pkg := range dirResponse.Packages {
+ // Add any new packages to the main set
+ // We don't bother to filter packages that will be dropped by the changes of roots,
+ // that will happen anyway during graph construction outside this function.
+ // Over-reporting packages is not a problem.
+ response.addPackage(pkg)
+ // if the package was not a root one, it cannot have the file
+ if !isRoot[pkg.ID] {
+ continue
+ }
+ for _, pkgFile := range pkg.GoFiles {
+ if filepath.Base(query) == filepath.Base(pkgFile) {
+ response.addRoot(pkg.ID)
+ break
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// adhocPackage attempts to load or construct an ad-hoc package for a given
+// query, if the original call to the driver produced inadequate results.
+func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) {
+ response, err := state.createDriverResponse(query)
+ if err != nil {
+ return nil, err
+ }
+ // If we get nothing back from `go list`,
+ // try to make this file into its own ad-hoc package.
+ // TODO(rstambler): Should this check against the original response?
+ if len(response.Packages) == 0 {
+ response.Packages = append(response.Packages, &Package{
+ ID: "command-line-arguments",
+ PkgPath: query,
+ GoFiles: []string{query},
+ CompiledGoFiles: []string{query},
+ Imports: make(map[string]*Package),
+ })
+ response.Roots = append(response.Roots, "command-line-arguments")
+ }
+ // Handle special cases.
+ if len(response.Packages) == 1 {
+ // golang/go#33482: If this is a file= query for ad-hoc packages where
+ // the file only exists on an overlay, and exists outside of a module,
+ // add the file to the package and remove the errors.
+ if response.Packages[0].ID == "command-line-arguments" ||
+ filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
+ if len(response.Packages[0].GoFiles) == 0 {
+ filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
+ // TODO(matloob): check if the file is outside of a root dir?
+ for path := range state.cfg.Overlay {
+ if path == filename {
+ response.Packages[0].Errors = nil
+ response.Packages[0].GoFiles = []string{path}
+ response.Packages[0].CompiledGoFiles = []string{path}
+ }
+ }
+ }
+ }
+ }
+ return response, nil
+}
+
+// Fields must match go list;
+// see $GOROOT/src/cmd/go/internal/load/pkg.go.
+type jsonPackage struct {
+ ImportPath string
+ Dir string
+ Name string
+ Target string
+ Export string
+ GoFiles []string
+ CompiledGoFiles []string
+ IgnoredGoFiles []string
+ IgnoredOtherFiles []string
+ EmbedPatterns []string
+ EmbedFiles []string
+ CFiles []string
+ CgoFiles []string
+ CXXFiles []string
+ MFiles []string
+ HFiles []string
+ FFiles []string
+ SFiles []string
+ SwigFiles []string
+ SwigCXXFiles []string
+ SysoFiles []string
+ Imports []string
+ ImportMap map[string]string
+ Deps []string
+ Module *Module
+ TestGoFiles []string
+ TestImports []string
+ XTestGoFiles []string
+ XTestImports []string
+ ForTest string // q in a "p [q.test]" package, else ""
+ DepOnly bool
+
+ Error *packagesinternal.PackageError
+ DepsErrors []*packagesinternal.PackageError
+}
+
+func otherFiles(p *jsonPackage) [][]string {
+ return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
+}
+
+// createDriverResponse uses the "go list" command to expand the pattern
+// words and return a response for the specified packages.
+func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) {
+ // go list uses the following identifiers in ImportPath and Imports:
+ //
+ // "p" -- importable package or main (command)
+ // "q.test" -- q's test executable
+ // "p [q.test]" -- variant of p as built for q's test executable
+ // "q_test [q.test]" -- q's external test package
+ //
+ // The packages p that are built differently for a test q.test
+ // are q itself, plus any helpers used by the external test q_test,
+ // typically including "testing" and all its dependencies.
+
+ // Run "go list" for complete
+ // information on the specified packages.
+ goVersion, err := state.getGoVersion()
+ if err != nil {
+ return nil, err
+ }
+ buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...)
+ if err != nil {
+ return nil, err
+ }
+
+ seen := make(map[string]*jsonPackage)
+ pkgs := make(map[string]*Package)
+ additionalErrors := make(map[string][]Error)
+ // Decode the JSON and convert it to Package form.
+ response := &DriverResponse{
+ GoVersion: goVersion,
+ }
+ for dec := json.NewDecoder(buf); dec.More(); {
+ p := new(jsonPackage)
+ if err := dec.Decode(p); err != nil {
+ return nil, fmt.Errorf("JSON decoding failed: %v", err)
+ }
+
+ if p.ImportPath == "" {
+ // The documentation for go list says that “[e]rroneous packages will have
+ // a non-empty ImportPath”. If for some reason it comes back empty, we
+ // prefer to error out rather than silently discarding data or handing
+ // back a package without any way to refer to it.
+ if p.Error != nil {
+ return nil, Error{
+ Pos: p.Error.Pos,
+ Msg: p.Error.Err,
+ }
+ }
+ return nil, fmt.Errorf("package missing import path: %+v", p)
+ }
+
+ // Work around https://golang.org/issue/33157:
+ // go list -e, when given an absolute path, will find the package contained at
+ // that directory. But when no package exists there, it will return a fake package
+ // with an error and the ImportPath set to the absolute path provided to go list.
+ // Try to convert that absolute path to what its package path would be if it's
+ // contained in a known module or GOPATH entry. This will allow the package to be
+ // properly "reclaimed" when overlays are processed.
+ if filepath.IsAbs(p.ImportPath) && p.Error != nil {
+ pkgPath, ok, err := state.getPkgPath(p.ImportPath)
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ p.ImportPath = pkgPath
+ }
+ }
+
+ if old, found := seen[p.ImportPath]; found {
+ // If one version of the package has an error, and the other doesn't, assume
+ // that this is a case where go list is reporting a fake dependency variant
+ // of the imported package: When a package tries to invalidly import another
+ // package, go list emits a variant of the imported package (with the same
+ // import path, but with an error on it, and the package will have a
+ // DepError set on it). An example of when this can happen is for imports of
+ // main packages: main packages can not be imported, but they may be
+ // separately matched and listed by another pattern.
+ // See golang.org/issue/36188 for more details.
+
+ // The plan is that eventually, hopefully in Go 1.15, the error will be
+ // reported on the importing package rather than the duplicate "fake"
+ // version of the imported package. Once all supported versions of Go
+ // have the new behavior this logic can be deleted.
+ // TODO(matloob): delete the workaround logic once all supported versions of
+ // Go return the errors on the proper package.
+
+ // There should be exactly one version of a package that doesn't have an
+ // error.
+ if old.Error == nil && p.Error == nil {
+ if !reflect.DeepEqual(p, old) {
+ return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
+ }
+ continue
+ }
+
+ // Determine if this package's error needs to be bubbled up.
+ // This is a hack, and we expect for go list to eventually set the error
+ // on the package.
+ if old.Error != nil {
+ var errkind string
+ if strings.Contains(old.Error.Err, "not an importable package") {
+ errkind = "not an importable package"
+ } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
+ errkind = "use of internal package not allowed"
+ }
+ if errkind != "" {
+ if len(old.Error.ImportStack) < 1 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
+ }
+ importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
+ if importingPkg == old.ImportPath {
+ // Using an older version of Go which put this package itself on top of import
+ // stack, instead of the importer. Look for importer in second from top
+ // position.
+ if len(old.Error.ImportStack) < 2 {
+ return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
+ }
+ importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
+ }
+ additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
+ Pos: old.Error.Pos,
+ Msg: old.Error.Err,
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Make sure that if there's a version of the package without an error,
+ // that's the one reported to the user.
+ if old.Error == nil {
+ continue
+ }
+
+ // This package will replace the old one at the end of the loop.
+ }
+ seen[p.ImportPath] = p
+
+ pkg := &Package{
+ Name: p.Name,
+ ID: p.ImportPath,
+ Dir: p.Dir,
+ Target: p.Target,
+ GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
+ CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
+ OtherFiles: absJoin(p.Dir, otherFiles(p)...),
+ EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
+ EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
+ IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
+ ForTest: p.ForTest,
+ depsErrors: p.DepsErrors,
+ Module: p.Module,
+ }
+
+ if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 {
+ if len(p.CompiledGoFiles) > len(p.GoFiles) {
+ // We need the cgo definitions, which are in the first
+ // CompiledGoFile after the non-cgo ones. This is a hack but there
+ // isn't currently a better way to find it. We also need the pure
+ // Go files and unprocessed cgo files, all of which are already
+ // in pkg.GoFiles.
+ cgoTypes := p.CompiledGoFiles[len(p.GoFiles)]
+ pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...)
+ } else {
+ // golang/go#38990: go list silently fails to do cgo processing
+ pkg.CompiledGoFiles = nil
+ pkg.Errors = append(pkg.Errors, Error{
+ Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.",
+ Kind: ListError,
+ })
+ }
+ }
+
+ // Work around https://golang.org/issue/28749:
+ // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
+ // Remove files from CompiledGoFiles that are non-go files
+ // (or are not files that look like they are from the cache).
+ if len(pkg.CompiledGoFiles) > 0 {
+ out := pkg.CompiledGoFiles[:0]
+ for _, f := range pkg.CompiledGoFiles {
+ if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file
+ continue
+ }
+ out = append(out, f)
+ }
+ pkg.CompiledGoFiles = out
+ }
+
+ // Extract the PkgPath from the package's ID.
+ if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
+ pkg.PkgPath = pkg.ID[:i]
+ } else {
+ pkg.PkgPath = pkg.ID
+ }
+
+ if pkg.PkgPath == "unsafe" {
+ pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929)
+ } else if len(pkg.CompiledGoFiles) == 0 {
+ // Work around for pre-go.1.11 versions of go list.
+ // TODO(matloob): they should be handled by the fallback.
+ // Can we delete this?
+ pkg.CompiledGoFiles = pkg.GoFiles
+ }
+
+ // Assume go list emits only absolute paths for Dir.
+ if p.Dir != "" && !filepath.IsAbs(p.Dir) {
+ log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
+ }
+
+ if p.Export != "" && !filepath.IsAbs(p.Export) {
+ pkg.ExportFile = filepath.Join(p.Dir, p.Export)
+ } else {
+ pkg.ExportFile = p.Export
+ }
+
+ // imports
+ //
+ // Imports contains the IDs of all imported packages.
+ // ImportsMap records (path, ID) only where they differ.
+ ids := make(map[string]bool)
+ for _, id := range p.Imports {
+ ids[id] = true
+ }
+ pkg.Imports = make(map[string]*Package)
+ for path, id := range p.ImportMap {
+ pkg.Imports[path] = &Package{ID: id} // non-identity import
+ delete(ids, id)
+ }
+ for id := range ids {
+ if id == "C" {
+ continue
+ }
+
+ pkg.Imports[id] = &Package{ID: id} // identity import
+ }
+ if !p.DepOnly {
+ response.Roots = append(response.Roots, pkg.ID)
+ }
+
+ // Temporary work-around for golang/go#39986. Parse filenames out of
+ // error messages. This happens if there are unrecoverable syntax
+ // errors in the source, so we can't match on a specific error message.
+ //
+ // TODO(rfindley): remove this heuristic, in favor of considering
+ // InvalidGoFiles from the list driver.
+ if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) {
+ addFilenameFromPos := func(pos string) bool {
+ split := strings.Split(pos, ":")
+ if len(split) < 1 {
+ return false
+ }
+ filename := strings.TrimSpace(split[0])
+ if filename == "" {
+ return false
+ }
+ if !filepath.IsAbs(filename) {
+ filename = filepath.Join(state.cfg.Dir, filename)
+ }
+ info, _ := os.Stat(filename)
+ if info == nil {
+ return false
+ }
+ pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
+ pkg.GoFiles = append(pkg.GoFiles, filename)
+ return true
+ }
+ found := addFilenameFromPos(err.Pos)
+ // In some cases, go list only reports the error position in the
+ // error text, not the error position. One such case is when the
+ // file's package name is a keyword (see golang.org/issue/39763).
+ if !found {
+ addFilenameFromPos(err.Err)
+ }
+ }
+
+ if p.Error != nil {
+ msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
+ // Address golang.org/issue/35964 by appending import stack to error message.
+ if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
+ msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
+ }
+ pkg.Errors = append(pkg.Errors, Error{
+ Pos: p.Error.Pos,
+ Msg: msg,
+ Kind: ListError,
+ })
+ }
+
+ pkgs[pkg.ID] = pkg
+ }
+
+ for id, errs := range additionalErrors {
+ if p, ok := pkgs[id]; ok {
+ p.Errors = append(p.Errors, errs...)
+ }
+ }
+ for _, pkg := range pkgs {
+ response.Packages = append(response.Packages, pkg)
+ }
+ sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
+
+ return response, nil
+}
+
+func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool {
+ if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 {
+ return false
+ }
+
+ goV, err := state.getGoVersion()
+ if err != nil {
+ return false
+ }
+
+ // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty.
+ // The import stack behaves differently for these versions than newer Go versions.
+ if goV < 15 {
+ return len(p.Error.ImportStack) == 0
+ }
+
+ // On Go 1.15 and later, only parse filenames out of error if there's no import stack,
+ // or the current package is at the top of the import stack. This is not guaranteed
+ // to work perfectly, but should avoid some cases where files in errors don't belong to this
+ // package.
+ return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath
+}
+
+// getGoVersion returns the effective minor version of the go command.
+func (state *golistState) getGoVersion() (int, error) {
+ state.goVersionOnce.Do(func() {
+ state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner)
+ })
+ return state.goVersion, state.goVersionError
+}
+
+// getPkgPath finds the package path of a directory if it's relative to a root
+// directory.
+func (state *golistState) getPkgPath(dir string) (string, bool, error) {
+ if !filepath.IsAbs(dir) {
+ panic("non-absolute dir passed to getPkgPath")
+ }
+ roots, err := state.determineRootDirs()
+ if err != nil {
+ return "", false, err
+ }
+
+ for rdir, rpath := range roots {
+ // Make sure that the directory is in the module,
+ // to avoid creating a path relative to another module.
+ if !strings.HasPrefix(dir, rdir) {
+ continue
+ }
+ // TODO(matloob): This doesn't properly handle symlinks.
+ r, err := filepath.Rel(rdir, dir)
+ if err != nil {
+ continue
+ }
+ if rpath != "" {
+ // We choose only one root even though the directory even it can belong in multiple modules
+ // or GOPATH entries. This is okay because we only need to work with absolute dirs when a
+ // file is missing from disk, for instance when gopls calls go/packages in an overlay.
+ // Once the file is saved, gopls, or the next invocation of the tool will get the correct
+ // result straight from golist.
+ // TODO(matloob): Implement module tiebreaking?
+ return path.Join(rpath, filepath.ToSlash(r)), true, nil
+ }
+ return filepath.ToSlash(r), true, nil
+ }
+ return "", false, nil
+}
+
+// absJoin absolutizes and flattens the lists of files.
+func absJoin(dir string, fileses ...[]string) (res []string) {
+ for _, files := range fileses {
+ for _, file := range files {
+ if !filepath.IsAbs(file) {
+ file = filepath.Join(dir, file)
+ }
+ res = append(res, file)
+ }
+ }
+ return res
+}
+
+func jsonFlag(cfg *Config, goVersion int) string {
+ if goVersion < 19 {
+ return "-json"
+ }
+ var fields []string
+ added := make(map[string]bool)
+ addFields := func(fs ...string) {
+ for _, f := range fs {
+ if !added[f] {
+ added[f] = true
+ fields = append(fields, f)
+ }
+ }
+ }
+ addFields("Name", "ImportPath", "Error") // These fields are always needed
+ if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
+ addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles",
+ "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles",
+ "SwigFiles", "SwigCXXFiles", "SysoFiles")
+ if cfg.Tests {
+ addFields("TestGoFiles", "XTestGoFiles")
+ }
+ }
+ if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 {
+ // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax,
+ // even when -compiled isn't passed in.
+ // TODO(#52435): Should we make the test ask for -compiled, or automatically
+ // request CompiledGoFiles in certain circumstances?
+ addFields("Dir", "CompiledGoFiles")
+ }
+ if cfg.Mode&NeedCompiledGoFiles != 0 {
+ addFields("Dir", "CompiledGoFiles", "Export")
+ }
+ if cfg.Mode&NeedImports != 0 {
+ // When imports are requested, DepOnly is used to distinguish between packages
+ // explicitly requested and transitive imports of those packages.
+ addFields("DepOnly", "Imports", "ImportMap")
+ if cfg.Tests {
+ addFields("TestImports", "XTestImports")
+ }
+ }
+ if cfg.Mode&NeedDeps != 0 {
+ addFields("DepOnly")
+ }
+ if usesExportData(cfg) {
+ // Request Dir in the unlikely case Export is not absolute.
+ addFields("Dir", "Export")
+ }
+ if cfg.Mode&NeedForTest != 0 {
+ addFields("ForTest")
+ }
+ if cfg.Mode&needInternalDepsErrors != 0 {
+ addFields("DepsErrors")
+ }
+ if cfg.Mode&NeedModule != 0 {
+ addFields("Module")
+ }
+ if cfg.Mode&NeedEmbedFiles != 0 {
+ addFields("EmbedFiles")
+ }
+ if cfg.Mode&NeedEmbedPatterns != 0 {
+ addFields("EmbedPatterns")
+ }
+ if cfg.Mode&NeedTarget != 0 {
+ addFields("Target")
+ }
+ return "-json=" + strings.Join(fields, ",")
+}
+
+func golistargs(cfg *Config, words []string, goVersion int) []string {
+ const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
+ fullargs := []string{
+ "-e", jsonFlag(cfg, goVersion),
+ fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
+ fmt.Sprintf("-test=%t", cfg.Tests),
+ fmt.Sprintf("-export=%t", usesExportData(cfg)),
+ fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
+ // go list doesn't let you pass -test and -find together,
+ // probably because you'd just get the TestMain.
+ fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)),
+ }
+
+ // golang/go#60456: with go1.21 and later, go list serves pgo variants, which
+ // can be costly to compute and may result in redundant processing for the
+ // caller. Disable these variants. If someone wants to add e.g. a NeedPGO
+ // mode flag, that should be a separate proposal.
+ if goVersion >= 21 {
+ fullargs = append(fullargs, "-pgo=off")
+ }
+
+ fullargs = append(fullargs, cfg.BuildFlags...)
+ fullargs = append(fullargs, "--")
+ fullargs = append(fullargs, words...)
+ return fullargs
+}
+
+// cfgInvocation returns an Invocation that reflects cfg's settings.
+func (state *golistState) cfgInvocation() gocommand.Invocation {
+ cfg := state.cfg
+ return gocommand.Invocation{
+ BuildFlags: cfg.BuildFlags,
+ CleanEnv: cfg.Env != nil,
+ Env: cfg.Env,
+ Logf: cfg.Logf,
+ WorkingDir: cfg.Dir,
+ Overlay: state.overlay,
+ }
+}
+
+// invokeGo returns the stdout of a go command invocation.
+func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
+ cfg := state.cfg
+
+ inv := state.cfgInvocation()
+ inv.Verb = verb
+ inv.Args = args
+
+ stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv)
+ if err != nil {
+ // Check for 'go' executable not being found.
+ if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
+ }
+
+ exitErr, ok := err.(*exec.ExitError)
+ if !ok {
+ // Catastrophic error:
+ // - context cancellation
+ return nil, fmt.Errorf("couldn't run 'go': %w", err)
+ }
+
+ // Old go version?
+ if strings.Contains(stderr.String(), "flag provided but not defined") {
+ return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
+ }
+
+ // Related to #24854
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
+ return nil, friendlyErr
+ }
+
+ // Return an error if 'go list' failed due to missing tools in
+ // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606).
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) {
+ return nil, friendlyErr
+ }
+
+ // Is there an error running the C compiler in cgo? This will be reported in the "Error" field
+ // and should be suppressed by go list -e.
+ //
+ // This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
+ isPkgPathRune := func(r rune) bool {
+ // From https://golang.org/ref/spec#Import_declarations:
+ // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
+ // using only characters belonging to Unicode's L, M, N, P, and S general categories
+ // (the Graphic characters without spaces) and may also exclude the
+ // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
+ return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
+ !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
+ }
+ // golang/go#36770: Handle case where cmd/go prints module download messages before the error.
+ msg := stderr.String()
+ for strings.HasPrefix(msg, "go: downloading") {
+ msg = msg[strings.IndexRune(msg, '\n')+1:]
+ }
+ if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
+ msg := msg[len("# "):]
+ if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
+ return stdout, nil
+ }
+ // Treat pkg-config errors as a special case (golang.org/issue/36770).
+ if strings.HasPrefix(msg, "pkg-config") {
+ return stdout, nil
+ }
+ }
+
+ // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
+ // the error in the Err section of stdout in case -e option is provided.
+ // This fix is provided for backwards compatibility.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Similar to the previous error, but currently lacks a fix in Go.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
+ // If the package doesn't exist, put the absolute path of the directory into the error message,
+ // as Go 1.13 list does.
+ const noSuchDirectory = "no such directory"
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
+ errstr := stderr.String()
+ abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ abspath, strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
+ // Note that the error message we look for in this case is different that the one looked for above.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
+ output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
+ // directory outside any module.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ // TODO(matloob): command-line-arguments isn't correct here.
+ "command-line-arguments", strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Another variation of the previous error
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ // TODO(matloob): command-line-arguments isn't correct here.
+ "command-line-arguments", strings.Trim(stderr.String(), "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
+ // status if there's a dependency on a package that doesn't exist. But it should return
+ // a zero exit status and set an error on that package.
+ if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
+ // Don't clobber stdout if `go list` actually returned something.
+ if len(stdout.String()) > 0 {
+ return stdout, nil
+ }
+ // try to extract package name from string
+ stderrStr := stderr.String()
+ var importPath string
+ colon := strings.Index(stderrStr, ":")
+ if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
+ importPath = stderrStr[len("go build "):colon]
+ }
+ output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
+ importPath, strings.Trim(stderrStr, "\n"))
+ return bytes.NewBufferString(output), nil
+ }
+
+ // Export mode entails a build.
+ // If that build fails, errors appear on stderr
+ // (despite the -e flag) and the Export field is blank.
+ // Do not fail in that case.
+ // The same is true if an ad-hoc package given to go list doesn't exist.
+ // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
+ // packages don't exist or a build fails.
+ if !usesExportData(cfg) && !containsGoFile(args) {
+ return nil, friendlyErr
+ }
+ }
+ return stdout, nil
+}
+
+func containsGoFile(s []string) bool {
+ for _, f := range s {
+ if strings.HasSuffix(f, ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.SplitN(kv, "=", 2)
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+
+ var args []string
+ for _, arg := range cmd.Args {
+ quoted := strconv.Quote(arg)
+ if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
+ args = append(args, quoted)
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
+}
+
+// getSizesForArgs queries 'go list' for the appropriate
+// Compiler and GOARCH arguments to pass to [types.SizesFor].
+func getSizesForArgs(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) {
+ inv.Verb = "list"
+ inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}
+ stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
+ var goarch, compiler string
+ if rawErr != nil {
+ rawErrMsg := rawErr.Error()
+ if strings.Contains(rawErrMsg, "cannot find main module") ||
+ strings.Contains(rawErrMsg, "go.mod file not found") {
+ // User's running outside of a module.
+ // All bets are off. Get GOARCH and guess compiler is gc.
+ // TODO(matloob): Is this a problem in practice?
+ inv.Verb = "env"
+ inv.Args = []string{"GOARCH"}
+ envout, enverr := gocmdRunner.Run(ctx, inv)
+ if enverr != nil {
+ return "", "", enverr
+ }
+ goarch = strings.TrimSpace(envout.String())
+ compiler = "gc"
+ } else if friendlyErr != nil {
+ return "", "", friendlyErr
+ } else {
+ // This should be unreachable, but be defensive
+ // in case RunRaw's error results are inconsistent.
+ return "", "", rawErr
+ }
+ } else {
+ fields := strings.Fields(stdout.String())
+ if len(fields) < 2 {
+ return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>",
+ stdout.String(), stderr.String())
+ }
+ goarch = fields[0]
+ compiler = fields[1]
+ }
+ return compiler, goarch, nil
+}
diff --git a/operator/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/operator/vendor/golang.org/x/tools/go/packages/golist_overlay.go
new file mode 100644
index 00000000..d9d5a45c
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/golist_overlay.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "encoding/json"
+ "path/filepath"
+
+ "golang.org/x/tools/internal/gocommand"
+)
+
+// determineRootDirs returns a mapping from absolute directories that could
+// contain code to their corresponding import path prefixes.
+func (state *golistState) determineRootDirs() (map[string]string, error) {
+ env, err := state.getEnv()
+ if err != nil {
+ return nil, err
+ }
+ if env["GOMOD"] != "" {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
+ })
+ } else {
+ state.rootsOnce.Do(func() {
+ state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
+ })
+ }
+ return state.rootDirs, state.rootDirsError
+}
+
+func (state *golistState) determineRootDirsModules() (map[string]string, error) {
+ // List all of the modules--the first will be the directory for the main
+ // module. Any replaced modules will also need to be treated as roots.
+ // Editing files in the module cache isn't a great idea, so we don't
+ // plan to ever support that.
+ out, err := state.invokeGo("list", "-m", "-json", "all")
+ if err != nil {
+ // 'go list all' will fail if we're outside of a module and
+ // GO111MODULE=on. Try falling back without 'all'.
+ var innerErr error
+ out, innerErr = state.invokeGo("list", "-m", "-json")
+ if innerErr != nil {
+ return nil, err
+ }
+ }
+ roots := map[string]string{}
+ modules := map[string]string{}
+ var i int
+ for dec := json.NewDecoder(out); dec.More(); {
+ mod := new(gocommand.ModuleJSON)
+ if err := dec.Decode(mod); err != nil {
+ return nil, err
+ }
+ if mod.Dir != "" && mod.Path != "" {
+ // This is a valid module; add it to the map.
+ absDir, err := state.cfg.abs(mod.Dir)
+ if err != nil {
+ return nil, err
+ }
+ modules[absDir] = mod.Path
+ // The first result is the main module.
+ if i == 0 || mod.Replace != nil && mod.Replace.Path != "" {
+ roots[absDir] = mod.Path
+ }
+ }
+ i++
+ }
+ return roots, nil
+}
+
+func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
+ m := map[string]string{}
+ for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
+ absDir, err := filepath.Abs(dir)
+ if err != nil {
+ return nil, err
+ }
+ m[filepath.Join(absDir, "src")] = ""
+ }
+ return m, nil
+}
diff --git a/operator/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/operator/vendor/golang.org/x/tools/go/packages/loadmode_string.go
new file mode 100644
index 00000000..69eec9f4
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -0,0 +1,56 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "fmt"
+ "strings"
+)
+
+var modes = [...]struct {
+ mode LoadMode
+ name string
+}{
+ {NeedName, "NeedName"},
+ {NeedFiles, "NeedFiles"},
+ {NeedCompiledGoFiles, "NeedCompiledGoFiles"},
+ {NeedImports, "NeedImports"},
+ {NeedDeps, "NeedDeps"},
+ {NeedExportFile, "NeedExportFile"},
+ {NeedTypes, "NeedTypes"},
+ {NeedSyntax, "NeedSyntax"},
+ {NeedTypesInfo, "NeedTypesInfo"},
+ {NeedTypesSizes, "NeedTypesSizes"},
+ {NeedForTest, "NeedForTest"},
+ {NeedModule, "NeedModule"},
+ {NeedEmbedFiles, "NeedEmbedFiles"},
+ {NeedEmbedPatterns, "NeedEmbedPatterns"},
+ {NeedTarget, "NeedTarget"},
+}
+
+func (mode LoadMode) String() string {
+ if mode == 0 {
+ return "LoadMode(0)"
+ }
+ var out []string
+ // named bits
+ for _, item := range modes {
+ if (mode & item.mode) != 0 {
+ mode ^= item.mode
+ out = append(out, item.name)
+ }
+ }
+ // unnamed residue
+ if mode != 0 {
+ if out == nil {
+ return fmt.Sprintf("LoadMode(%#x)", int(mode))
+ }
+ out = append(out, fmt.Sprintf("%#x", int(mode)))
+ }
+ if len(out) == 1 {
+ return out[0]
+ }
+ return "(" + strings.Join(out, "|") + ")"
+}
diff --git a/operator/vendor/golang.org/x/tools/go/packages/packages.go b/operator/vendor/golang.org/x/tools/go/packages/packages.go
new file mode 100644
index 00000000..060ab08e
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/packages.go
@@ -0,0 +1,1559 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+// See doc.go for package documentation and implementation notes.
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "go/types"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/errgroup"
+
+ "golang.org/x/tools/go/gcexportdata"
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/packagesinternal"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// A LoadMode controls the amount of detail to return when loading.
+// The bits below can be combined to specify which fields should be
+// filled in the result packages.
+//
+// The zero value is a special case, equivalent to combining
+// the NeedName, NeedFiles, and NeedCompiledGoFiles bits.
+//
+// ID and Errors (if present) will always be filled.
+// [Load] may return more information than requested.
+//
+// The Mode flag is a union of several bits named NeedName,
+// NeedFiles, and so on, each of which determines whether
+// a given field of Package (Name, Files, etc) should be
+// populated.
+//
+// For convenience, we provide named constants for the most
+// common combinations of Need flags:
+//
+// [LoadFiles] lists of files in each package
+// [LoadImports] ... plus imports
+// [LoadTypes] ... plus type information
+// [LoadSyntax] ... plus type-annotated syntax
+// [LoadAllSyntax] ... for all dependencies
+//
+// Unfortunately there are a number of open bugs related to
+// interactions among the LoadMode bits:
+// - https://go.dev/issue/56633
+// - https://go.dev/issue/56677
+// - https://go.dev/issue/58726
+// - https://go.dev/issue/63517
+type LoadMode int
+
+const (
+ // NeedName adds Name and PkgPath.
+ NeedName LoadMode = 1 << iota
+
+ // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
+ NeedFiles
+
+ // NeedCompiledGoFiles adds CompiledGoFiles.
+ NeedCompiledGoFiles
+
+ // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
+ // "placeholder" Packages with only the ID set.
+ NeedImports
+
+ // NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
+ NeedDeps
+
+ // NeedExportFile adds ExportFile.
+ NeedExportFile
+
+ // NeedTypes adds Types, Fset, and IllTyped.
+ NeedTypes
+
+ // NeedSyntax adds Syntax and Fset.
+ NeedSyntax
+
+ // NeedTypesInfo adds TypesInfo and Fset.
+ NeedTypesInfo
+
+ // NeedTypesSizes adds TypesSizes.
+ NeedTypesSizes
+
+ // needInternalDepsErrors adds the internal deps errors field for use by gopls.
+ needInternalDepsErrors
+
+ // NeedForTest adds ForTest.
+ //
+ // Tests must also be set on the context for this field to be populated.
+ NeedForTest
+
+ // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
+ // Modifies CompiledGoFiles and Types, and has no effect on its own.
+ typecheckCgo
+
+ // NeedModule adds Module.
+ NeedModule
+
+ // NeedEmbedFiles adds EmbedFiles.
+ NeedEmbedFiles
+
+ // NeedEmbedPatterns adds EmbedPatterns.
+ NeedEmbedPatterns
+
+ // NeedTarget adds Target.
+ NeedTarget
+
+ // Be sure to update loadmode_string.go when adding new items!
+)
+
+const (
+ // LoadFiles loads the name and file names for the initial packages.
+ LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
+
+ // LoadImports loads the name, file names, and import mapping for the initial packages.
+ LoadImports = LoadFiles | NeedImports
+
+ // LoadTypes loads exported type information for the initial packages.
+ LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
+
+ // LoadSyntax loads typed syntax for the initial packages.
+ LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
+
+ // LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
+ LoadAllSyntax = LoadSyntax | NeedDeps
+
+ // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
+ //
+ //go:fix inline
+ NeedExportsFile = NeedExportFile
+)
+
+// A Config specifies details about how packages should be loaded.
+// The zero value is a valid configuration.
+//
+// Calls to [Load] do not modify this struct.
+type Config struct {
+ // Mode controls the level of information returned for each package.
+ Mode LoadMode
+
+ // Context specifies the context for the load operation.
+ // Cancelling the context may cause [Load] to abort and
+ // return an error.
+ Context context.Context
+
+ // Logf is the logger for the config.
+ // If the user provides a logger, debug logging is enabled.
+ // If the GOPACKAGESDEBUG environment variable is set to true,
+ // but the logger is nil, default to log.Printf.
+ Logf func(format string, args ...any)
+
+ // Dir is the directory in which to run the build system's query tool
+ // that provides information about the packages.
+ // If Dir is empty, the tool is run in the current directory.
+ Dir string
+
+ // Env is the environment to use when invoking the build system's query tool.
+ // If Env is nil, the current environment is used.
+ // As in os/exec's Cmd, only the last value in the slice for
+ // each environment key is used. To specify the setting of only
+ // a few variables, append to the current environment, as in:
+ //
+ // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
+ //
+ Env []string
+
+ // BuildFlags is a list of command-line flags to be passed through to
+ // the build system's query tool.
+ BuildFlags []string
+
+ // Fset provides source position information for syntax trees and types.
+ // If Fset is nil, Load will use a new fileset, but preserve Fset's value.
+ Fset *token.FileSet
+
+ // ParseFile is called to read and parse each file
+ // when preparing a package's type-checked syntax tree.
+ // It must be safe to call ParseFile simultaneously from multiple goroutines.
+ // If ParseFile is nil, the loader will uses parser.ParseFile.
+ //
+ // ParseFile should parse the source from src and use filename only for
+ // recording position information.
+ //
+ // An application may supply a custom implementation of ParseFile
+ // to change the effective file contents or the behavior of the parser,
+ // or to modify the syntax tree. For example, selectively eliminating
+ // unwanted function bodies can significantly accelerate type checking.
+ ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
+
+ // If Tests is set, the loader includes not just the packages
+ // matching a particular pattern but also any related test packages,
+ // including test-only variants of the package and the test executable.
+ //
+ // For example, when using the go command, loading "fmt" with Tests=true
+ // returns four packages, with IDs "fmt" (the standard package),
+ // "fmt [fmt.test]" (the package as compiled for the test),
+ // "fmt_test" (the test functions from source files in package fmt_test),
+ // and "fmt.test" (the test binary).
+ //
+ // In build systems with explicit names for tests,
+ // setting Tests may have no effect.
+ Tests bool
+
+ // Overlay is a mapping from absolute file paths to file contents.
+ //
+ // For each map entry, [Load] uses the alternative file
+ // contents provided by the overlay mapping instead of reading
+ // from the file system. This mechanism can be used to enable
+ // editor-integrated tools to correctly analyze the contents
+ // of modified but unsaved buffers, for example.
+ //
+ // The overlay mapping is passed to the build system's driver
+ // (see "The driver protocol") so that it too can report
+ // consistent package metadata about unsaved files. However,
+ // drivers may vary in their level of support for overlays.
+ Overlay map[string][]byte
+}
+
+// Load loads and returns the Go packages named by the given patterns.
+//
+// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
+//
+// The [Config.Mode] field is a set of bits that determine what kinds
+// of information should be computed and returned. Modes that require
+// more information tend to be slower. See [LoadMode] for details
+// and important caveats. Its zero value is equivalent to
+// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
+//
+// Each call to Load returns a new set of [Package] instances.
+// The Packages and their Imports form a directed acyclic graph.
+//
+// If the [NeedTypes] mode flag was set, each call to Load uses a new
+// [types.Importer], so [types.Object] and [types.Type] values from
+// different calls to Load must not be mixed as they will have
+// inconsistent notions of type identity.
+//
+// If any of the patterns was invalid as defined by the
+// underlying build system, Load returns an error.
+// It may return an empty list of packages without an error,
+// for instance for an empty expansion of a valid wildcard.
+// Errors associated with a particular package are recorded in the
+// corresponding Package's Errors list, and do not cause Load to
+// return an error. Clients may need to handle such errors before
+// proceeding with further analysis. The [PrintErrors] function is
+// provided for convenient display of all errors.
+func Load(cfg *Config, patterns ...string) ([]*Package, error) {
+ ld := newLoader(cfg)
+ response, external, err := defaultDriver(&ld.Config, patterns...)
+ if err != nil {
+ return nil, err
+ }
+
+ ld.sizes = types.SizesFor(response.Compiler, response.Arch)
+ if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 {
+ // Type size information is needed but unavailable.
+ if external {
+ // An external driver may fail to populate the Compiler/GOARCH fields,
+ // especially since they are relatively new (see #63700).
+ // Provide a sensible fallback in this case.
+ ld.sizes = types.SizesFor("gc", runtime.GOARCH)
+ if ld.sizes == nil { // gccgo-only arch
+ ld.sizes = types.SizesFor("gc", "amd64")
+ }
+ } else {
+ // Go list should never fail to deliver accurate size information.
+ // Reject the whole Load since the error is the same for every package.
+ return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q",
+ response.Compiler, response.Arch)
+ }
+ }
+
+ return ld.refine(response)
+}
+
+// defaultDriver is a driver that implements go/packages' fallback behavior.
+// It will try to request to an external driver, if one exists. If there's
+// no external driver, or the driver returns a response with NotHandled set,
+// defaultDriver will fall back to the go list driver.
+// The boolean result indicates that an external driver handled the request.
+func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) {
+ const (
+ // windowsArgMax specifies the maximum command line length for
+ // the Windows' CreateProcess function.
+ windowsArgMax = 32767
+ // maxEnvSize is a very rough estimation of the maximum environment
+ // size of a user.
+ maxEnvSize = 16384
+ // safeArgMax specifies the maximum safe command line length to use
+ // by the underlying driver excl. the environment. We choose the Windows'
+ // ARG_MAX as the starting point because it's one of the lowest ARG_MAX
+ // constants out of the different supported platforms,
+ // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results.
+ safeArgMax = windowsArgMax - maxEnvSize
+ )
+ chunks, err := splitIntoChunks(patterns, safeArgMax)
+ if err != nil {
+ return nil, false, err
+ }
+
+ if driver := findExternalDriver(cfg); driver != nil {
+ response, err := callDriverOnChunks(driver, cfg, chunks)
+ if err != nil {
+ return nil, false, err
+ } else if !response.NotHandled {
+ return response, true, nil
+ }
+ // not handled: fall through
+ }
+
+ // go list fallback
+
+ // Write overlays once, as there are many calls
+ // to 'go list' (one per chunk plus others too).
+ overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay)
+ if err != nil {
+ return nil, false, err
+ }
+ defer cleanupOverlay()
+
+ var runner gocommand.Runner // (shared across many 'go list' calls)
+ driver := func(cfg *Config, patterns []string) (*DriverResponse, error) {
+ return goListDriver(cfg, &runner, overlayFile, patterns)
+ }
+ response, err := callDriverOnChunks(driver, cfg, chunks)
+ if err != nil {
+ return nil, false, err
+ }
+ return response, false, err
+}
+
+// splitIntoChunks chunks the slice so that the total number of characters
+// in a chunk is no longer than argMax.
+func splitIntoChunks(patterns []string, argMax int) ([][]string, error) {
+ if argMax <= 0 {
+ return nil, errors.New("failed to split patterns into chunks, negative safe argMax value")
+ }
+ var chunks [][]string
+ charsInChunk := 0
+ nextChunkStart := 0
+ for i, v := range patterns {
+ vChars := len(v)
+ if vChars > argMax {
+ // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen
+ return nil, errors.New("failed to split patterns into chunks, a pattern is too long")
+ }
+ charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too
+ if charsInChunk > argMax {
+ chunks = append(chunks, patterns[nextChunkStart:i])
+ nextChunkStart = i
+ charsInChunk = vChars
+ }
+ }
+ // add the last chunk
+ if nextChunkStart < len(patterns) {
+ chunks = append(chunks, patterns[nextChunkStart:])
+ }
+ return chunks, nil
+}
+
+func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) {
+ if len(chunks) == 0 {
+ return driver(cfg, nil)
+ }
+ responses := make([]*DriverResponse, len(chunks))
+ errNotHandled := errors.New("driver returned NotHandled")
+ var g errgroup.Group
+ for i, chunk := range chunks {
+ g.Go(func() (err error) {
+ responses[i], err = driver(cfg, chunk)
+ if responses[i] != nil && responses[i].NotHandled {
+ err = errNotHandled
+ }
+ return err
+ })
+ }
+ if err := g.Wait(); err != nil {
+ if errors.Is(err, errNotHandled) {
+ return &DriverResponse{NotHandled: true}, nil
+ }
+ return nil, err
+ }
+ return mergeResponses(responses...), nil
+}
+
+func mergeResponses(responses ...*DriverResponse) *DriverResponse {
+ if len(responses) == 0 {
+ return nil
+ }
+ response := newDeduper()
+ response.dr.NotHandled = false
+ response.dr.Compiler = responses[0].Compiler
+ response.dr.Arch = responses[0].Arch
+ response.dr.GoVersion = responses[0].GoVersion
+ for _, v := range responses {
+ response.addAll(v)
+ }
+ return response.dr
+}
+
+// A Package describes a loaded Go package.
+//
+// It also defines part of the JSON schema of [DriverResponse].
+// See the package documentation for an overview.
+type Package struct {
+ // ID is a unique identifier for a package,
+ // in a syntax provided by the underlying build system.
+ //
+ // Because the syntax varies based on the build system,
+ // clients should treat IDs as opaque and not attempt to
+ // interpret them.
+ ID string
+
+ // Name is the package name as it appears in the package source code.
+ Name string
+
+ // PkgPath is the package path as used by the go/types package.
+ PkgPath string
+
+ // Dir is the directory associated with the package, if it exists.
+ //
+ // For packages listed by the go command, this is the directory containing
+ // the package files.
+ Dir string
+
+ // Errors contains any errors encountered querying the metadata
+ // of the package, or while parsing or type-checking its files.
+ Errors []Error
+
+ // TypeErrors contains the subset of errors produced during type checking.
+ TypeErrors []types.Error
+
+ // GoFiles lists the absolute file paths of the package's Go source files.
+ // It may include files that should not be compiled, for example because
+ // they contain non-matching build tags, are documentary pseudo-files such as
+ // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing.
+ GoFiles []string
+
+ // CompiledGoFiles lists the absolute file paths of the package's source
+ // files that are suitable for type checking.
+ // This may differ from GoFiles if files are processed before compilation.
+ CompiledGoFiles []string
+
+ // OtherFiles lists the absolute file paths of the package's non-Go source files,
+ // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
+ OtherFiles []string
+
+ // EmbedFiles lists the absolute file paths of the package's files
+ // embedded with go:embed.
+ EmbedFiles []string
+
+ // EmbedPatterns lists the absolute file patterns of the package's
+ // files embedded with go:embed.
+ EmbedPatterns []string
+
+ // IgnoredFiles lists source files that are not part of the package
+ // using the current build configuration but that might be part of
+ // the package using other build configurations.
+ IgnoredFiles []string
+
+ // ExportFile is the absolute path to a file containing type
+ // information for the package as provided by the build system.
+ ExportFile string
+
+ // Target is the absolute install path of the .a file, for libraries,
+ // and of the executable file, for binaries.
+ Target string
+
+ // Imports maps import paths appearing in the package's Go source files
+ // to corresponding loaded Packages.
+ Imports map[string]*Package
+
+ // Module is the module information for the package if it exists.
+ //
+ // Note: it may be missing for std and cmd; see Go issue #65816.
+ Module *Module
+
+ // -- The following fields are not part of the driver JSON schema. --
+
+ // Types provides type information for the package.
+ // The NeedTypes LoadMode bit sets this field for packages matching the
+ // patterns; type information for dependencies may be missing or incomplete,
+ // unless NeedDeps and NeedImports are also set.
+ //
+ // Each call to [Load] returns a consistent set of type
+ // symbols, as defined by the comment at [types.Identical].
+ // Avoid mixing type information from two or more calls to [Load].
+ Types *types.Package `json:"-"`
+
+ // Fset provides position information for Types, TypesInfo, and Syntax.
+ // It is set only when Types is set.
+ Fset *token.FileSet `json:"-"`
+
+ // IllTyped indicates whether the package or any dependency contains errors.
+ // It is set only when Types is set.
+ IllTyped bool `json:"-"`
+
+ // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
+ //
+ // The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
+ // If NeedDeps and NeedImports are also set, this field will also be populated
+ // for dependencies.
+ //
+ // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are
+ // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles.
+ Syntax []*ast.File `json:"-"`
+
+ // TypesInfo provides type information about the package's syntax trees.
+ // It is set only when Syntax is set.
+ TypesInfo *types.Info `json:"-"`
+
+ // TypesSizes provides the effective size function for types in TypesInfo.
+ TypesSizes types.Sizes `json:"-"`
+
+ // -- internal --
+
+ // ForTest is the package under test, if any.
+ ForTest string
+
+ // depsErrors is the DepsErrors field from the go list response, if any.
+ depsErrors []*packagesinternal.PackageError
+}
+
+// Module provides module information for a package.
+//
+// It also defines part of the JSON schema of [DriverResponse].
+// See the package documentation for an overview.
+type Module struct {
+ Path string // module path
+ Version string // module version
+ Replace *Module // replaced by this module
+ Time *time.Time // time version was created
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
+ GoVersion string // go version used in module
+ Error *ModuleError // error loading module
+}
+
+// ModuleError holds errors loading a module.
+type ModuleError struct {
+ Err string // the error itself
+}
+
+func init() {
+ packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError {
+ return p.(*Package).depsErrors
+ }
+ packagesinternal.TypecheckCgo = int(typecheckCgo)
+ packagesinternal.DepsErrors = int(needInternalDepsErrors)
+}
+
+// An Error describes a problem with a package's metadata, syntax, or types.
+type Error struct {
+ Pos string // "file:line:col" or "file:line" or "" or "-"
+ Msg string
+ Kind ErrorKind
+}
+
+// ErrorKind describes the source of the error, allowing the user to
+// differentiate between errors generated by the driver, the parser, or the
+// type-checker.
+type ErrorKind int
+
+const (
+ UnknownError ErrorKind = iota
+ ListError
+ ParseError
+ TypeError
+)
+
+func (err Error) Error() string {
+ pos := err.Pos
+ if pos == "" {
+ pos = "-" // like token.Position{}.String()
+ }
+ return pos + ": " + err.Msg
+}
+
+// flatPackage is the JSON form of Package
+// It drops all the type and syntax fields, and transforms the Imports
+//
+// TODO(adonovan): identify this struct with Package, effectively
+// publishing the JSON protocol.
+type flatPackage struct {
+ ID string
+ Name string `json:",omitempty"`
+ PkgPath string `json:",omitempty"`
+ Errors []Error `json:",omitempty"`
+ GoFiles []string `json:",omitempty"`
+ CompiledGoFiles []string `json:",omitempty"`
+ OtherFiles []string `json:",omitempty"`
+ EmbedFiles []string `json:",omitempty"`
+ EmbedPatterns []string `json:",omitempty"`
+ IgnoredFiles []string `json:",omitempty"`
+ ExportFile string `json:",omitempty"`
+ Imports map[string]string `json:",omitempty"`
+}
+
+// MarshalJSON returns the Package in its JSON form.
+// For the most part, the structure fields are written out unmodified, and
+// the type and syntax fields are skipped.
+// The imports are written out as just a map of path to package id.
+// The errors are written using a custom type that tries to preserve the
+// structure of error types we know about.
+//
+// This method exists to enable support for additional build systems. It is
+// not intended for use by clients of the API and we may change the format.
+func (p *Package) MarshalJSON() ([]byte, error) {
+ flat := &flatPackage{
+ ID: p.ID,
+ Name: p.Name,
+ PkgPath: p.PkgPath,
+ Errors: p.Errors,
+ GoFiles: p.GoFiles,
+ CompiledGoFiles: p.CompiledGoFiles,
+ OtherFiles: p.OtherFiles,
+ EmbedFiles: p.EmbedFiles,
+ EmbedPatterns: p.EmbedPatterns,
+ IgnoredFiles: p.IgnoredFiles,
+ ExportFile: p.ExportFile,
+ }
+ if len(p.Imports) > 0 {
+ flat.Imports = make(map[string]string, len(p.Imports))
+ for path, ipkg := range p.Imports {
+ flat.Imports[path] = ipkg.ID
+ }
+ }
+ return json.Marshal(flat)
+}
+
+// UnmarshalJSON reads in a Package from its JSON format.
+// See MarshalJSON for details about the format accepted.
+func (p *Package) UnmarshalJSON(b []byte) error {
+ flat := &flatPackage{}
+ if err := json.Unmarshal(b, &flat); err != nil {
+ return err
+ }
+ *p = Package{
+ ID: flat.ID,
+ Name: flat.Name,
+ PkgPath: flat.PkgPath,
+ Errors: flat.Errors,
+ GoFiles: flat.GoFiles,
+ CompiledGoFiles: flat.CompiledGoFiles,
+ OtherFiles: flat.OtherFiles,
+ EmbedFiles: flat.EmbedFiles,
+ EmbedPatterns: flat.EmbedPatterns,
+ IgnoredFiles: flat.IgnoredFiles,
+ ExportFile: flat.ExportFile,
+ }
+ if len(flat.Imports) > 0 {
+ p.Imports = make(map[string]*Package, len(flat.Imports))
+ for path, id := range flat.Imports {
+ p.Imports[path] = &Package{ID: id}
+ }
+ }
+ return nil
+}
+
+func (p *Package) String() string { return p.ID }
+
+// loaderPackage augments Package with state used during the loading phase
+type loaderPackage struct {
+ *Package
+ importErrors map[string]error // maps each bad import to its error
+ preds []*loaderPackage // packages that import this one
+ unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded
+ color uint8 // for cycle detection
+ needsrc bool // load from source (Mode >= LoadTypes)
+ needtypes bool // type information is either requested or depended on
+ initial bool // package was matched by a pattern
+ goVersion int // minor version number of go command on PATH
+}
+
+// loader holds the working state of a single call to load.
+type loader struct {
+ pkgs map[string]*loaderPackage // keyed by Package.ID
+ Config
+ sizes types.Sizes // non-nil if needed by mode
+ parseCache map[string]*parseValue
+ parseCacheMu sync.Mutex
+ exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
+
+ // Config.Mode contains the implied mode (see impliedLoadMode).
+ // Implied mode contains all the fields we need the data for.
+ // In requestedMode there are the actually requested fields.
+ // We'll zero them out before returning packages to the user.
+ // This makes it easier for us to get the conditions where
+ // we need certain modes right.
+ requestedMode LoadMode
+}
+
+type parseValue struct {
+ f *ast.File
+ err error
+ ready chan struct{}
+}
+
+func newLoader(cfg *Config) *loader {
+ ld := &loader{
+ parseCache: map[string]*parseValue{},
+ }
+ if cfg != nil {
+ ld.Config = *cfg
+ // If the user has provided a logger, use it.
+ ld.Config.Logf = cfg.Logf
+ }
+ if ld.Config.Logf == nil {
+ // If the GOPACKAGESDEBUG environment variable is set to true,
+ // but the user has not provided a logger, default to log.Printf.
+ if debug {
+ ld.Config.Logf = log.Printf
+ } else {
+ ld.Config.Logf = func(format string, args ...any) {}
+ }
+ }
+ if ld.Config.Mode == 0 {
+ ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
+ }
+ if ld.Config.Env == nil {
+ ld.Config.Env = os.Environ()
+ }
+ if ld.Context == nil {
+ ld.Context = context.Background()
+ }
+ if ld.Dir == "" {
+ if dir, err := os.Getwd(); err == nil {
+ ld.Dir = dir
+ }
+ }
+
+ // Save the actually requested fields. We'll zero them out before returning packages to the user.
+ ld.requestedMode = ld.Mode
+ ld.Mode = impliedLoadMode(ld.Mode)
+
+ if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+ if ld.Fset == nil {
+ ld.Fset = token.NewFileSet()
+ }
+
+ // ParseFile is required even in LoadTypes mode
+ // because we load source if export data is missing.
+ if ld.ParseFile == nil {
+ ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ // We implicitly promise to keep doing ast.Object resolution. :(
+ const mode = parser.AllErrors | parser.ParseComments
+ return parser.ParseFile(fset, filename, src, mode)
+ }
+ }
+ }
+
+ return ld
+}
+
+// refine connects the supplied packages into a graph and then adds type
+// and syntax information as requested by the LoadMode.
+func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
+ roots := response.Roots
+ rootMap := make(map[string]int, len(roots))
+ for i, root := range roots {
+ rootMap[root] = i
+ }
+ ld.pkgs = make(map[string]*loaderPackage)
+ // first pass, fixup and build the map and roots
+ var initial = make([]*loaderPackage, len(roots))
+ for _, pkg := range response.Packages {
+ rootIndex := -1
+ if i, found := rootMap[pkg.ID]; found {
+ rootIndex = i
+ }
+
+ // Overlays can invalidate export data.
+ // TODO(matloob): make this check fine-grained based on dependencies on overlaid files
+ exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
+ // This package needs type information if the caller requested types and the package is
+ // either a root, or it's a non-root and the user requested dependencies ...
+ needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
+ // This package needs source if the call requested source (or types info, which implies source)
+ // and the package is either a root, or itas a non- root and the user requested dependencies...
+ needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
+ // ... or if we need types and the exportData is invalid. We fall back to (incompletely)
+ // typechecking packages from source if they fail to compile.
+ (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
+ lpkg := &loaderPackage{
+ Package: pkg,
+ needtypes: needtypes,
+ needsrc: needsrc,
+ goVersion: response.GoVersion,
+ }
+ ld.pkgs[lpkg.ID] = lpkg
+ if rootIndex >= 0 {
+ initial[rootIndex] = lpkg
+ lpkg.initial = true
+ }
+ }
+ for i, root := range roots {
+ if initial[i] == nil {
+ return nil, fmt.Errorf("root package %v is missing", root)
+ }
+ }
+
+ // Materialize the import graph if it is needed (NeedImports),
+ // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}).
+ var leaves []*loaderPackage // packages with no unfinished successors
+ if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+ const (
+ white = 0 // new
+ grey = 1 // in progress
+ black = 2 // complete
+ )
+
+ // visit traverses the import graph, depth-first,
+ // and materializes the graph as Packages.Imports.
+ //
+ // Valid imports are saved in the Packages.Import map.
+ // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
+ // Thus, even in the presence of both kinds of errors,
+ // the Import graph remains a DAG.
+ //
+ // visit returns whether the package needs src or has a transitive
+ // dependency on a package that does. These are the only packages
+ // for which we load source code.
+ var stack []*loaderPackage
+ var visit func(from, lpkg *loaderPackage) bool
+ visit = func(from, lpkg *loaderPackage) bool {
+ if lpkg.color == grey {
+ panic("internal error: grey node")
+ }
+ if lpkg.color == white {
+ lpkg.color = grey
+ stack = append(stack, lpkg) // push
+ stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
+ lpkg.Imports = make(map[string]*Package, len(stubs))
+ for importPath, ipkg := range stubs {
+ var importErr error
+ imp := ld.pkgs[ipkg.ID]
+ if imp == nil {
+ // (includes package "C" when DisableCgo)
+ importErr = fmt.Errorf("missing package: %q", ipkg.ID)
+ } else if imp.color == grey {
+ importErr = fmt.Errorf("import cycle: %s", stack)
+ }
+ if importErr != nil {
+ if lpkg.importErrors == nil {
+ lpkg.importErrors = make(map[string]error)
+ }
+ lpkg.importErrors[importPath] = importErr
+ continue
+ }
+
+ if visit(lpkg, imp) {
+ lpkg.needsrc = true
+ }
+ lpkg.Imports[importPath] = imp.Package
+ }
+
+ // -- postorder --
+
+ // Complete type information is required for the
+ // immediate dependencies of each source package.
+ if lpkg.needsrc && ld.Mode&NeedTypes != 0 {
+ for _, ipkg := range lpkg.Imports {
+ ld.pkgs[ipkg.ID].needtypes = true
+ }
+ }
+
+ // NeedTypeSizes causes TypeSizes to be set even
+ // on packages for which types aren't needed.
+ if ld.Mode&NeedTypesSizes != 0 {
+ lpkg.TypesSizes = ld.sizes
+ }
+
+ // Add packages with no imports directly to the queue of leaves.
+ if len(lpkg.Imports) == 0 {
+ leaves = append(leaves, lpkg)
+ }
+
+ stack = stack[:len(stack)-1] // pop
+ lpkg.color = black
+ }
+
+ // Add edge from predecessor.
+ if from != nil {
+ from.unfinishedSuccs.Add(+1) // incref
+ lpkg.preds = append(lpkg.preds, from)
+ }
+
+ return lpkg.needsrc
+ }
+
+ // For each initial package, create its import DAG.
+ for _, lpkg := range initial {
+ visit(nil, lpkg)
+ }
+
+ } else {
+ // !NeedImports: drop the stub (ID-only) import packages
+ // that we are not even going to try to resolve.
+ for _, lpkg := range initial {
+ lpkg.Imports = nil
+ }
+ }
+
+ // Load type data and syntax if needed, starting at
+ // the initial packages (roots of the import DAG).
+ if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 {
+
+ // We avoid using g.SetLimit to limit concurrency as
+ // it makes g.Go stop accepting work, which prevents
+ // workers from enqeuing, and thus finishing, and thus
+ // allowing the group to make progress: deadlock.
+ //
+ // Instead we use the ioLimit and cpuLimit semaphores.
+ g, _ := errgroup.WithContext(ld.Context)
+
+ // enqueues adds a package to the type-checking queue.
+ // It must have no unfinished successors.
+ var enqueue func(*loaderPackage)
+ enqueue = func(lpkg *loaderPackage) {
+ g.Go(func() error {
+ // Parse and type-check.
+ ld.loadPackage(lpkg)
+
+ // Notify each waiting predecessor,
+ // and enqueue it when it becomes a leaf.
+ for _, pred := range lpkg.preds {
+ if pred.unfinishedSuccs.Add(-1) == 0 { // decref
+ enqueue(pred)
+ }
+ }
+
+ return nil
+ })
+ }
+
+ // Load leaves first, adding new packages
+ // to the queue as they become leaves.
+ for _, leaf := range leaves {
+ enqueue(leaf)
+ }
+
+ if err := g.Wait(); err != nil {
+ return nil, err // cancelled
+ }
+ }
+
+ // If the context is done, return its error and
+ // throw out [likely] incomplete packages.
+ if err := ld.Context.Err(); err != nil {
+ return nil, err
+ }
+
+ result := make([]*Package, len(initial))
+ for i, lpkg := range initial {
+ result[i] = lpkg.Package
+ }
+ for i := range ld.pkgs {
+ // Clear all unrequested fields,
+ // to catch programs that use more than they request.
+ if ld.requestedMode&NeedName == 0 {
+ ld.pkgs[i].Name = ""
+ ld.pkgs[i].PkgPath = ""
+ }
+ if ld.requestedMode&NeedFiles == 0 {
+ ld.pkgs[i].GoFiles = nil
+ ld.pkgs[i].OtherFiles = nil
+ ld.pkgs[i].IgnoredFiles = nil
+ }
+ if ld.requestedMode&NeedEmbedFiles == 0 {
+ ld.pkgs[i].EmbedFiles = nil
+ }
+ if ld.requestedMode&NeedEmbedPatterns == 0 {
+ ld.pkgs[i].EmbedPatterns = nil
+ }
+ if ld.requestedMode&NeedCompiledGoFiles == 0 {
+ ld.pkgs[i].CompiledGoFiles = nil
+ }
+ if ld.requestedMode&NeedImports == 0 {
+ ld.pkgs[i].Imports = nil
+ }
+ if ld.requestedMode&NeedExportFile == 0 {
+ ld.pkgs[i].ExportFile = ""
+ }
+ if ld.requestedMode&NeedTypes == 0 {
+ ld.pkgs[i].Types = nil
+ ld.pkgs[i].IllTyped = false
+ }
+ if ld.requestedMode&NeedSyntax == 0 {
+ ld.pkgs[i].Syntax = nil
+ }
+ if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 {
+ ld.pkgs[i].Fset = nil
+ }
+ if ld.requestedMode&NeedTypesInfo == 0 {
+ ld.pkgs[i].TypesInfo = nil
+ }
+ if ld.requestedMode&NeedTypesSizes == 0 {
+ ld.pkgs[i].TypesSizes = nil
+ }
+ if ld.requestedMode&NeedModule == 0 {
+ ld.pkgs[i].Module = nil
+ }
+ }
+
+ return result, nil
+}
+
+// loadPackage loads/parses/typechecks the specified package.
+// It must be called only once per Package,
+// after immediate dependencies are loaded.
+// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0.
+func (ld *loader) loadPackage(lpkg *loaderPackage) {
+ if lpkg.PkgPath == "unsafe" {
+ // Fill in the blanks to avoid surprises.
+ lpkg.Types = types.Unsafe
+ lpkg.Fset = ld.Fset
+ lpkg.Syntax = []*ast.File{}
+ lpkg.TypesInfo = new(types.Info)
+ lpkg.TypesSizes = ld.sizes
+ return
+ }
+
+ // Call NewPackage directly with explicit name.
+ // This avoids skew between golist and go/types when the files'
+ // package declarations are inconsistent.
+ lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
+ lpkg.Fset = ld.Fset
+
+ // Start shutting down if the context is done and do not load
+ // source or export data files.
+ // Packages that import this one will have ld.Context.Err() != nil.
+ // ld.Context.Err() will be returned later by refine.
+ if ld.Context.Err() != nil {
+ return
+ }
+
+ // Subtle: we populate all Types fields with an empty Package
+ // before loading export data so that export data processing
+ // never has to create a types.Package for an indirect dependency,
+ // which would then require that such created packages be explicitly
+ // inserted back into the Import graph as a final step after export data loading.
+ // (Hence this return is after the Types assignment.)
+ // The Diamond test exercises this case.
+ if !lpkg.needtypes && !lpkg.needsrc {
+ return
+ }
+
+ // TODO(adonovan): this condition looks wrong:
+ // I think it should be lpkg.needtypes && !lpg.needsrc,
+ // so that NeedSyntax without NeedTypes can be satisfied by export data.
+ if !lpkg.needsrc {
+ if err := ld.loadFromExportData(lpkg); err != nil {
+ lpkg.Errors = append(lpkg.Errors, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError, // e.g. can't find/open/parse export data
+ })
+ }
+ return // not a source package, don't get syntax trees
+ }
+
+ appendError := func(err error) {
+ // Convert various error types into the one true Error.
+ var errs []Error
+ switch err := err.(type) {
+ case Error:
+ // from driver
+ errs = append(errs, err)
+
+ case *os.PathError:
+ // from parser
+ errs = append(errs, Error{
+ Pos: err.Path + ":1",
+ Msg: err.Err.Error(),
+ Kind: ParseError,
+ })
+
+ case scanner.ErrorList:
+ // from parser
+ for _, err := range err {
+ errs = append(errs, Error{
+ Pos: err.Pos.String(),
+ Msg: err.Msg,
+ Kind: ParseError,
+ })
+ }
+
+ case types.Error:
+ // from type checker
+ lpkg.TypeErrors = append(lpkg.TypeErrors, err)
+ errs = append(errs, Error{
+ Pos: err.Fset.Position(err.Pos).String(),
+ Msg: err.Msg,
+ Kind: TypeError,
+ })
+
+ default:
+ // unexpected impoverished error from parser?
+ errs = append(errs, Error{
+ Pos: "-",
+ Msg: err.Error(),
+ Kind: UnknownError,
+ })
+
+ // If you see this error message, please file a bug.
+ log.Printf("internal error: error %q (%T) without position", err, err)
+ }
+
+ lpkg.Errors = append(lpkg.Errors, errs...)
+ }
+
+ // If the go command on the PATH is newer than the runtime,
+ // then the go/{scanner,ast,parser,types} packages from the
+ // standard library may be unable to process the files
+ // selected by go list.
+ //
+ // There is currently no way to downgrade the effective
+ // version of the go command (see issue 52078), so we proceed
+ // with the newer go command but, in case of parse or type
+ // errors, we emit an additional diagnostic.
+ //
+ // See:
+ // - golang.org/issue/52078 (flag to set release tags)
+ // - golang.org/issue/50825 (gopls legacy version support)
+ // - golang.org/issue/55883 (go/packages confusing error)
+ //
+ // Should we assert a hard minimum of (currently) go1.16 here?
+ var runtimeVersion int
+ if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion {
+ defer func() {
+ if len(lpkg.Errors) > 0 {
+ appendError(Error{
+ Pos: "-",
+ Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion),
+ Kind: UnknownError,
+ })
+ }
+ }()
+ }
+
+ if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
+ // The config requested loading sources and types, but sources are missing.
+ // Add an error to the package and fall back to loading from export data.
+ appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
+ _ = ld.loadFromExportData(lpkg) // ignore any secondary errors
+
+ return // can't get syntax trees for this package
+ }
+
+ files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
+ for _, err := range errs {
+ appendError(err)
+ }
+
+ lpkg.Syntax = files
+ if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 {
+ return
+ }
+
+ // Start shutting down if the context is done and do not type check.
+ // Packages that import this one will have ld.Context.Err() != nil.
+ // ld.Context.Err() will be returned later by refine.
+ if ld.Context.Err() != nil {
+ return
+ }
+
+ // Populate TypesInfo only if needed, as it
+ // causes the type checker to work much harder.
+ if ld.Config.Mode&NeedTypesInfo != 0 {
+ lpkg.TypesInfo = &types.Info{
+ Types: make(map[ast.Expr]types.TypeAndValue),
+ Defs: make(map[*ast.Ident]types.Object),
+ Uses: make(map[*ast.Ident]types.Object),
+ Implicits: make(map[ast.Node]types.Object),
+ Instances: make(map[*ast.Ident]types.Instance),
+ Scopes: make(map[ast.Node]*types.Scope),
+ Selections: make(map[*ast.SelectorExpr]*types.Selection),
+ FileVersions: make(map[*ast.File]string),
+ }
+ }
+ lpkg.TypesSizes = ld.sizes
+
+ importer := importerFunc(func(path string) (*types.Package, error) {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+
+ // The imports map is keyed by import path.
+ ipkg := lpkg.Imports[path]
+ if ipkg == nil {
+ if err := lpkg.importErrors[path]; err != nil {
+ return nil, err
+ }
+ // There was skew between the metadata and the
+ // import declarations, likely due to an edit
+ // race, or because the ParseFile feature was
+ // used to supply alternative file contents.
+ return nil, fmt.Errorf("no metadata for %s", path)
+ }
+
+ if ipkg.Types != nil && ipkg.Types.Complete() {
+ return ipkg.Types, nil
+ }
+ log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
+ panic("unreachable")
+ })
+
+ // type-check
+ tc := &types.Config{
+ Importer: importer,
+
+ // Type-check bodies of functions only in initial packages.
+ // Example: for import graph A->B->C and initial packages {A,C},
+ // we can ignore function bodies in B.
+ IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
+
+ Error: appendError,
+ Sizes: ld.sizes, // may be nil
+ }
+ if lpkg.Module != nil && lpkg.Module.GoVersion != "" {
+ tc.GoVersion = "go" + lpkg.Module.GoVersion
+ }
+ if (ld.Mode & typecheckCgo) != 0 {
+ if !typesinternal.SetUsesCgo(tc) {
+ appendError(Error{
+ Msg: "typecheckCgo requires Go 1.15+",
+ Kind: ListError,
+ })
+ return
+ }
+ }
+
+ // Type-checking is CPU intensive.
+ cpuLimit <- unit{} // acquire a token
+ defer func() { <-cpuLimit }() // release a token
+
+ typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
+ lpkg.importErrors = nil // no longer needed
+
+ // In go/types go1.21 and go1.22, Checker.Files failed fast with a
+ // a "too new" error, without calling tc.Error and without
+ // proceeding to type-check the package (#66525).
+ // We rely on the runtimeVersion error to give the suggested remedy.
+ if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 {
+ if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") {
+ appendError(types.Error{
+ Fset: ld.Fset,
+ Pos: lpkg.Syntax[0].Package,
+ Msg: msg,
+ })
+ }
+ }
+
+ // If !Cgo, the type-checker uses FakeImportC mode, so
+ // it doesn't invoke the importer for import "C",
+ // nor report an error for the import,
+ // or for any undefined C.f reference.
+ // We must detect this explicitly and correctly
+ // mark the package as IllTyped (by reporting an error).
+ // TODO(adonovan): if these errors are annoying,
+ // we could just set IllTyped quietly.
+ if tc.FakeImportC {
+ outer:
+ for _, f := range lpkg.Syntax {
+ for _, imp := range f.Imports {
+ if imp.Path.Value == `"C"` {
+ err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
+ appendError(err)
+ break outer
+ }
+ }
+ }
+ }
+
+ // If types.Checker.Files had an error that was unreported,
+ // make sure to report the unknown error so the package is illTyped.
+ if typErr != nil && len(lpkg.Errors) == 0 {
+ appendError(typErr)
+ }
+
+ // Record accumulated errors.
+ illTyped := len(lpkg.Errors) > 0
+ if !illTyped {
+ for _, imp := range lpkg.Imports {
+ if imp.IllTyped {
+ illTyped = true
+ break
+ }
+ }
+ }
+ lpkg.IllTyped = illTyped
+}
+
+// An importFunc is an implementation of the single-method
+// types.Importer interface based on a function value.
+type importerFunc func(path string) (*types.Package, error)
+
+func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
+
+// We use a counting semaphore to limit
+// the number of parallel I/O calls or CPU threads per process.
+var (
+ ioLimit = make(chan unit, 20)
+ cpuLimit = make(chan unit, runtime.GOMAXPROCS(0))
+)
+
+func (ld *loader) parseFile(filename string) (*ast.File, error) {
+ ld.parseCacheMu.Lock()
+ v, ok := ld.parseCache[filename]
+ if ok {
+ // cache hit
+ ld.parseCacheMu.Unlock()
+ <-v.ready
+ } else {
+ // cache miss
+ v = &parseValue{ready: make(chan struct{})}
+ ld.parseCache[filename] = v
+ ld.parseCacheMu.Unlock()
+
+ var src []byte
+ for f, contents := range ld.Config.Overlay {
+ // TODO(adonovan): Inefficient for large overlays.
+ // Do an exact name-based map lookup
+ // (for nonexistent files) followed by a
+ // FileID-based map lookup (for existing ones).
+ if sameFile(f, filename) {
+ src = contents
+ break
+ }
+ }
+ var err error
+ if src == nil {
+ ioLimit <- unit{} // acquire a token
+ src, err = os.ReadFile(filename)
+ <-ioLimit // release a token
+ }
+ if err != nil {
+ v.err = err
+ } else {
+ // Parsing is CPU intensive.
+ cpuLimit <- unit{} // acquire a token
+ v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
+ <-cpuLimit // release a token
+ }
+
+ close(v.ready)
+ }
+ return v.f, v.err
+}
+
+// parseFiles reads and parses the Go source files and returns the ASTs
+// of the ones that could be at least partially parsed, along with a
+// list of I/O and parse errors encountered.
+//
+// Because files are scanned in parallel, the token.Pos
+// positions of the resulting ast.Files are not ordered.
+func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
+ var (
+ n = len(filenames)
+ parsed = make([]*ast.File, n)
+ errors = make([]error, n)
+ )
+ var g errgroup.Group
+ for i, filename := range filenames {
+ // This creates goroutines unnecessarily in the
+ // cache-hit case, but that case is uncommon.
+ g.Go(func() error {
+ parsed[i], errors[i] = ld.parseFile(filename)
+ return nil
+ })
+ }
+ g.Wait()
+
+ // Eliminate nils, preserving order.
+ var o int
+ for _, f := range parsed {
+ if f != nil {
+ parsed[o] = f
+ o++
+ }
+ }
+ parsed = parsed[:o]
+
+ o = 0
+ for _, err := range errors {
+ if err != nil {
+ errors[o] = err
+ o++
+ }
+ }
+ errors = errors[:o]
+
+ return parsed, errors
+}
+
+// sameFile returns true if x and y have the same basename and denote
+// the same file.
+func sameFile(x, y string) bool {
+ if x == y {
+ // It could be the case that y doesn't exist.
+ // For instance, it may be an overlay file that
+ // hasn't been written to disk. To handle that case
+ // let x == y through. (We added the exact absolute path
+ // string to the CompiledGoFiles list, so the unwritten
+ // overlay case implies x==y.)
+ return true
+ }
+ if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
+ if xi, err := os.Stat(x); err == nil {
+ if yi, err := os.Stat(y); err == nil {
+ return os.SameFile(xi, yi)
+ }
+ }
+ }
+ return false
+}
+
+// loadFromExportData ensures that type information is present for the specified
+// package, loading it from an export data file on the first request.
+// On success it sets lpkg.Types to a new Package.
+func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
+ if lpkg.PkgPath == "" {
+ log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
+ }
+
+ // Because gcexportdata.Read has the potential to create or
+ // modify the types.Package for each node in the transitive
+ // closure of dependencies of lpkg, all exportdata operations
+ // must be sequential. (Finer-grained locking would require
+ // changes to the gcexportdata API.)
+ //
+ // The exportMu lock guards the lpkg.Types field and the
+ // types.Package it points to, for each loaderPackage in the graph.
+ //
+ // Not all accesses to Package.Pkg need to be protected by exportMu:
+ // graph ordering ensures that direct dependencies of source
+ // packages are fully loaded before the importer reads their Pkg field.
+ ld.exportMu.Lock()
+ defer ld.exportMu.Unlock()
+
+ if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
+ return nil // cache hit
+ }
+
+ lpkg.IllTyped = true // fail safe
+
+ if lpkg.ExportFile == "" {
+ // Errors while building export data will have been printed to stderr.
+ return fmt.Errorf("no export data file")
+ }
+ f, err := os.Open(lpkg.ExportFile)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Read gc export data.
+ //
+ // We don't currently support gccgo export data because all
+ // underlying workspaces use the gc toolchain. (Even build
+ // systems that support gccgo don't use it for workspace
+ // queries.)
+ r, err := gcexportdata.NewReader(f)
+ if err != nil {
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+
+ // Build the view.
+ //
+ // The gcexportdata machinery has no concept of package ID.
+ // It identifies packages by their PkgPath, which although not
+ // globally unique is unique within the scope of one invocation
+ // of the linker, type-checker, or gcexportdata.
+ //
+ // So, we must build a PkgPath-keyed view of the global
+ // (conceptually ID-keyed) cache of packages and pass it to
+ // gcexportdata. The view must contain every existing
+ // package that might possibly be mentioned by the
+ // current package---its transitive closure.
+ //
+ // In loadPackage, we unconditionally create a types.Package for
+ // each dependency so that export data loading does not
+ // create new ones.
+ //
+ // TODO(adonovan): it would be simpler and more efficient
+ // if the export data machinery invoked a callback to
+ // get-or-create a package instead of a map.
+ //
+ view := make(map[string]*types.Package) // view seen by gcexportdata
+ seen := make(map[*loaderPackage]bool) // all visited packages
+ var visit func(pkgs map[string]*Package)
+ visit = func(pkgs map[string]*Package) {
+ for _, p := range pkgs {
+ lpkg := ld.pkgs[p.ID]
+ if !seen[lpkg] {
+ seen[lpkg] = true
+ view[lpkg.PkgPath] = lpkg.Types
+ visit(lpkg.Imports)
+ }
+ }
+ }
+ visit(lpkg.Imports)
+
+ viewLen := len(view) + 1 // adding the self package
+ // Parse the export data.
+ // (May modify incomplete packages in view but not create new ones.)
+ tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
+ if err != nil {
+ return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
+ }
+ if _, ok := view["go.shape"]; ok {
+ // Account for the pseudopackage "go.shape" that gets
+ // created by generic code.
+ viewLen++
+ }
+ if viewLen != len(view) {
+ log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath)
+ }
+
+ lpkg.Types = tpkg
+ lpkg.IllTyped = false
+ return nil
+}
+
+// impliedLoadMode returns loadMode with its dependencies.
+func impliedLoadMode(loadMode LoadMode) LoadMode {
+ if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 {
+ // All these things require knowing the import graph.
+ loadMode |= NeedImports
+ }
+ if loadMode&NeedTypes != 0 {
+ // Types require the GoVersion from Module.
+ loadMode |= NeedModule
+ }
+
+ return loadMode
+}
+
+func usesExportData(cfg *Config) bool {
+ return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
+}
+
+type unit struct{}
diff --git a/operator/vendor/golang.org/x/tools/go/packages/visit.go b/operator/vendor/golang.org/x/tools/go/packages/visit.go
new file mode 100644
index 00000000..af6a60d7
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/packages/visit.go
@@ -0,0 +1,133 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packages
+
+import (
+ "cmp"
+ "fmt"
+ "iter"
+ "os"
+ "slices"
+)
+
+// Visit visits all the packages in the import graph whose roots are
+// pkgs, calling the optional pre function the first time each package
+// is encountered (preorder), and the optional post function after a
+// package's dependencies have been visited (postorder).
+// The boolean result of pre(pkg) determines whether
+// the imports of package pkg are visited.
+//
+// Example:
+//
+// pkgs, err := Load(...)
+// if err != nil { ... }
+// Visit(pkgs, nil, func(pkg *Package) {
+// log.Println(pkg)
+// })
+//
+// In most cases, it is more convenient to use [Postorder]:
+//
+// for pkg := range Postorder(pkgs) {
+// log.Println(pkg)
+// }
+func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package)
+ visit = func(pkg *Package) {
+ if !seen[pkg] {
+ seen[pkg] = true
+
+ if pre == nil || pre(pkg) {
+ for _, imp := range sorted(pkg.Imports) { // for determinism
+ visit(imp)
+ }
+ }
+
+ if post != nil {
+ post(pkg)
+ }
+ }
+ }
+ for _, pkg := range pkgs {
+ visit(pkg)
+ }
+}
+
+// PrintErrors prints to os.Stderr the accumulated errors of all
+// packages in the import graph rooted at pkgs, dependencies first.
+// PrintErrors returns the number of errors printed.
+func PrintErrors(pkgs []*Package) int {
+ var n int
+ errModules := make(map[*Module]bool)
+ for pkg := range Postorder(pkgs) {
+ for _, err := range pkg.Errors {
+ fmt.Fprintln(os.Stderr, err)
+ n++
+ }
+
+ // Print pkg.Module.Error once if present.
+ mod := pkg.Module
+ if mod != nil && mod.Error != nil && !errModules[mod] {
+ errModules[mod] = true
+ fmt.Fprintln(os.Stderr, mod.Error.Err)
+ n++
+ }
+ }
+ return n
+}
+
+// Postorder returns an iterator over the the packages in
+// the import graph whose roots are pkg.
+// Packages are enumerated in dependencies-first order.
+func Postorder(pkgs []*Package) iter.Seq[*Package] {
+ return func(yield func(*Package) bool) {
+ seen := make(map[*Package]bool)
+ var visit func(*Package) bool
+ visit = func(pkg *Package) bool {
+ if !seen[pkg] {
+ seen[pkg] = true
+ for _, imp := range sorted(pkg.Imports) { // for determinism
+ if !visit(imp) {
+ return false
+ }
+ }
+ if !yield(pkg) {
+ return false
+ }
+ }
+ return true
+ }
+ for _, pkg := range pkgs {
+ if !visit(pkg) {
+ break
+ }
+ }
+ }
+}
+
+// -- copied from golang.org.x/tools/gopls/internal/util/moremaps --
+
+// sorted returns an iterator over the entries of m in key order.
+func sorted[M ~map[K]V, K cmp.Ordered, V any](m M) iter.Seq2[K, V] {
+ // TODO(adonovan): use maps.Sorted if proposal #68598 is accepted.
+ return func(yield func(K, V) bool) {
+ keys := keySlice(m)
+ slices.Sort(keys)
+ for _, k := range keys {
+ if !yield(k, m[k]) {
+ break
+ }
+ }
+ }
+}
+
+// KeySlice returns the keys of the map M, like slices.Collect(maps.Keys(m)).
+func keySlice[M ~map[K]V, K comparable, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
diff --git a/operator/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/operator/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
new file mode 100644
index 00000000..6c0c7496
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -0,0 +1,820 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package objectpath defines a naming scheme for types.Objects
+// (that is, named entities in Go programs) relative to their enclosing
+// package.
+//
+// Type-checker objects are canonical, so they are usually identified by
+// their address in memory (a pointer), but a pointer has meaning only
+// within one address space. By contrast, objectpath names allow the
+// identity of an object to be sent from one program to another,
+// establishing a correspondence between types.Object variables that are
+// distinct but logically equivalent.
+//
+// A single object may have multiple paths. In this example,
+//
+// type A struct{ X int }
+// type B A
+//
+// the field X has two paths due to its membership of both A and B.
+// The For(obj) function always returns one of these paths, arbitrarily
+// but consistently.
+package objectpath
+
+import (
+ "fmt"
+ "go/types"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// TODO(adonovan): think about generic aliases.
+
+// A Path is an opaque name that identifies a types.Object
+// relative to its package. Conceptually, the name consists of a
+// sequence of destructuring operations applied to the package scope
+// to obtain the original object.
+// The name does not include the package itself.
+type Path string
+
+// Encoding
+//
+// An object path is a textual and (with training) human-readable encoding
+// of a sequence of destructuring operators, starting from a types.Package.
+// The sequences represent a path through the package/object/type graph.
+// We classify these operators by their type:
+//
+// PO package->object Package.Scope.Lookup
+// OT object->type Object.Type
+// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
+// TO type->object Type.{At,Field,Method,Obj} [AFMO]
+//
+// All valid paths start with a package and end at an object
+// and thus may be defined by the regular language:
+//
+// objectpath = PO (OT TT* TO)*
+//
+// The concrete encoding follows directly:
+// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
+// - The only OT operator is Object.Type,
+// which we encode as '.' because dot cannot appear in an identifier.
+// - The TT operators are encoded as [EKPRUTrCa];
+// two of these ({,Recv}TypeParams) require an integer operand,
+// which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
+// three of these (At,Field,Method) require an integer operand,
+// which is encoded as a string of decimal digits.
+// These indices are stable across different representations
+// of the same package, even source and export data.
+// The indices used are implementation specific and may not correspond to
+// the argument to the go/types function.
+//
+// In the example below,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// field X has the path "T.UM0.RA1.F0",
+// representing the following sequence of operations:
+//
+// p.Lookup("T") T
+// .Type().Underlying().Method(0). f
+// .Type().Results().At(1) b
+// .Type().Field(0) X
+//
+// The encoding is not maximally compact---every R or P is
+// followed by an A, for example---but this simplifies the
+// encoder and decoder.
+const (
+ // object->type operators
+ opType = '.' // .Type() (Object)
+
+ // type->type operators
+ opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
+ opKey = 'K' // .Key() (Map)
+ opParams = 'P' // .Params() (Signature)
+ opResults = 'R' // .Results() (Signature)
+ opUnderlying = 'U' // .Underlying() (Named)
+ opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
+ opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
+ opConstraint = 'C' // .Constraint() (TypeParam)
+ opRhs = 'a' // .Rhs() (Alias)
+
+ // type->object operators
+ opAt = 'A' // .At(i) (Tuple)
+ opField = 'F' // .Field(i) (Struct)
+ opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
+ opObj = 'O' // .Obj() (Named, TypeParam)
+)
+
+// For is equivalent to new(Encoder).For(obj).
+//
+// It may be more efficient to reuse a single Encoder across several calls.
+func For(obj types.Object) (Path, error) {
+ return new(Encoder).For(obj)
+}
+
+// An Encoder amortizes the cost of encoding the paths of multiple objects.
+// The zero value of an Encoder is ready to use.
+type Encoder struct {
+ scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects
+}
+
+// For returns the path to an object relative to its package,
+// or an error if the object is not accessible from the package's Scope.
+//
+// The For function guarantees to return a path only for the following objects:
+// - package-level types
+// - exported package-level non-types
+// - methods
+// - parameter and result variables
+// - struct fields
+// These objects are sufficient to define the API of their package.
+// The objects described by a package's export data are drawn from this set.
+//
+// The set of objects accessible from a package's Scope depends on
+// whether the package was produced by type-checking syntax, or
+// reading export data; the latter may have a smaller Scope since
+// export data trims objects that are not reachable from an exported
+// declaration. For example, the For function will return a path for
+// an exported method of an unexported type that is not reachable
+// from any public declaration; this path will cause the Object
+// function to fail if called on a package loaded from export data.
+// TODO(adonovan): is this a bug or feature? Should this package
+// compute accessibility in the same way?
+//
+// For does not return a path for predeclared names, imported package
+// names, local names, and unexported package-level names (except
+// types).
+//
+// Example: given this definition,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// For(X) would return a path that denotes the following sequence of operations:
+//
+// p.Scope().Lookup("T") (TypeName T)
+// .Type().Underlying().Method(0). (method Func f)
+// .Type().Results().At(1) (field Var b)
+// .Type().Field(0) (field Var X)
+//
+// where p is the package (*types.Package) to which X belongs.
+func (enc *Encoder) For(obj types.Object) (Path, error) {
+ pkg := obj.Pkg()
+
+ // This table lists the cases of interest.
+ //
+ // Object Action
+ // ------ ------
+ // nil reject
+ // builtin reject
+ // pkgname reject
+ // label reject
+ // var
+ // package-level accept
+ // func param/result accept
+ // local reject
+ // struct field accept
+ // const
+ // package-level accept
+ // local reject
+ // func
+ // package-level accept
+ // init functions reject
+ // concrete method accept
+ // interface method accept
+ // type
+ // package-level accept
+ // local reject
+ //
+ // The only accessible package-level objects are members of pkg itself.
+ //
+ // The cases are handled in four steps:
+ //
+ // 1. reject nil and builtin
+ // 2. accept package-level objects
+ // 3. reject obviously invalid objects
+ // 4. search the API for the path to the param/result/field/method.
+
+ // 1. reference to nil or builtin?
+ if pkg == nil {
+ return "", fmt.Errorf("predeclared %s has no path", obj)
+ }
+ scope := pkg.Scope()
+
+ // 2. package-level object?
+ if scope.Lookup(obj.Name()) == obj {
+ // Only exported objects (and non-exported types) have a path.
+ // Non-exported types may be referenced by other objects.
+ if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
+ return "", fmt.Errorf("no path for non-exported %v", obj)
+ }
+ return Path(obj.Name()), nil
+ }
+
+ // 3. Not a package-level object.
+ // Reject obviously non-viable cases.
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
+ // With the exception of type parameters, only package-level type names
+ // have a path.
+ return "", fmt.Errorf("no path for %v", obj)
+ }
+ case *types.Const, // Only package-level constants have a path.
+ *types.Label, // Labels are function-local.
+ *types.PkgName: // PkgNames are file-local.
+ return "", fmt.Errorf("no path for %v", obj)
+
+ case *types.Var:
+ // Could be:
+ // - a field (obj.IsField())
+ // - a func parameter or result
+ // - a local var.
+ // Sadly there is no way to distinguish
+ // a param/result from a local
+ // so we must proceed to the find.
+
+ case *types.Func:
+ // A func, if not package-level, must be a method.
+ if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
+ return "", fmt.Errorf("func is not a method: %v", obj)
+ }
+
+ if path, ok := enc.concreteMethod(obj); ok {
+ // Fast path for concrete methods that avoids looping over scope.
+ return path, nil
+ }
+
+ default:
+ panic(obj)
+ }
+
+ // 4. Search the API for the path to the var (field/param/result) or method.
+
+ // First inspect package-level named types.
+ // In the presence of path aliases, these give
+ // the best paths because non-types may
+ // refer to types, but not the reverse.
+ empty := make([]byte, 0, 48) // initial space
+ objs := enc.scopeObjects(scope)
+ for _, o := range objs {
+ tname, ok := o.(*types.TypeName)
+ if !ok {
+ continue // handle non-types in second pass
+ }
+
+ path := append(empty, o.Name()...)
+ path = append(path, opType)
+
+ T := o.Type()
+ if alias, ok := T.(*types.Alias); ok {
+ if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
+ return Path(r), nil
+ }
+ if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
+ return Path(r), nil
+ }
+
+ } else if tname.IsAlias() {
+ // legacy alias
+ if r := find(obj, T, path); r != nil {
+ return Path(r), nil
+ }
+
+ } else if named, ok := T.(*types.Named); ok {
+ // defined (named) type
+ if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
+ return Path(r), nil
+ }
+ if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+
+ // Then inspect everything else:
+ // non-types, and declared methods of defined types.
+ for _, o := range objs {
+ path := append(empty, o.Name()...)
+ if _, ok := o.(*types.TypeName); !ok {
+ if o.Exported() {
+ // exported non-type (const, var, func)
+ if r := find(obj, o.Type(), append(path, opType)); r != nil {
+ return Path(r), nil
+ }
+ }
+ continue
+ }
+
+ // Inspect declared methods of defined types.
+ if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
+ path = append(path, opType)
+ // The method index here is always with respect
+ // to the underlying go/types data structures,
+ // which ultimately derives from source order
+ // and must be preserved by export data.
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ path2 := appendOpArg(path, opMethod, i)
+ if m == obj {
+ return Path(path2), nil // found declared method
+ }
+ if r := find(obj, m.Type(), append(path2, opType)); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
+}
+
+func appendOpArg(path []byte, op byte, arg int) []byte {
+ path = append(path, op)
+ path = strconv.AppendInt(path, int64(arg), 10)
+ return path
+}
+
+// concreteMethod returns the path for meth, which must have a non-nil receiver.
+// The second return value indicates success and may be false if the method is
+// an interface method or if it is an instantiated method.
+//
+// This function is just an optimization that avoids the general scope walking
+// approach. You are expected to fall back to the general approach if this
+// function fails.
+func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
+ // Concrete methods can only be declared on package-scoped named types. For
+ // that reason we can skip the expensive walk over the package scope: the
+ // path will always be package -> named type -> method. We can trivially get
+ // the type name from the receiver, and only have to look over the type's
+ // methods to find the method index.
+ //
+ // Methods on generic types require special consideration, however. Consider
+ // the following package:
+ //
+ // L1: type S[T any] struct{}
+ // L2: func (recv S[A]) Foo() { recv.Bar() }
+ // L3: func (recv S[B]) Bar() { }
+ // L4: type Alias = S[int]
+ // L5: func _[T any]() { var s S[int]; s.Foo() }
+ //
+ // The receivers of methods on generic types are instantiations. L2 and L3
+ // instantiate S with the type-parameters A and B, which are scoped to the
+ // respective methods. L4 and L5 each instantiate S with int. Each of these
+ // instantiations has its own method set, full of methods (and thus objects)
+ // with receivers whose types are the respective instantiations. In other
+ // words, we have
+ //
+ // S[A].Foo, S[A].Bar
+ // S[B].Foo, S[B].Bar
+ // S[int].Foo, S[int].Bar
+ //
+ // We may thus be trying to produce object paths for any of these objects.
+ //
+ // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
+ // and S.Bar, which are the paths that this function naturally produces.
+ //
+ // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
+ // don't correspond to the origin methods. For S[int], this is significant.
+ // The most precise object path for S[int].Foo, for example, is Alias.Foo,
+ // not S.Foo. Our function, however, would produce S.Foo, which would
+ // resolve to a different object.
+ //
+ // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
+ // still the correct paths, since only the origin methods have meaningful
+ // paths. But this is likely only true for trivial cases and has edge cases.
+ // Since this function is only an optimization, we err on the side of giving
+ // up, deferring to the slower but definitely correct algorithm. Most users
+ // of objectpath will only be giving us origin methods, anyway, as referring
+ // to instantiated methods is usually not useful.
+
+ if meth.Origin() != meth {
+ return "", false
+ }
+
+ _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv())
+ if named == nil {
+ return "", false
+ }
+
+ if types.IsInterface(named) {
+ // Named interfaces don't have to be package-scoped
+ //
+ // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
+ // methods, too, I think.
+ return "", false
+ }
+
+ // Preallocate space for the name, opType, opMethod, and some digits.
+ name := named.Obj().Name()
+ path := make([]byte, 0, len(name)+8)
+ path = append(path, name...)
+ path = append(path, opType)
+
+ // Method indices are w.r.t. the go/types data structures,
+ // ultimately deriving from source order,
+ // which is preserved by export data.
+ for i := 0; i < named.NumMethods(); i++ {
+ if named.Method(i) == meth {
+ path = appendOpArg(path, opMethod, i)
+ return Path(path), true
+ }
+ }
+
+ // Due to golang/go#59944, go/types fails to associate the receiver with
+ // certain methods on cgo types.
+ //
+ // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go
+ // versions gopls supports.
+ return "", false
+ // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named)))
+}
+
+// find finds obj within type T, returning the path to it, or nil if not found.
+//
+// The seen map is used to short circuit cycles through type parameters. If
+// nil, it will be allocated as necessary.
+//
+// The seenMethods map is used internally to short circuit cycles through
+// interface methods, such as occur in the following example:
+//
+// type I interface { f() interface{I} }
+//
+// See golang/go#68046 for details.
+func find(obj types.Object, T types.Type, path []byte) []byte {
+ return (&finder{obj: obj}).find(T, path)
+}
+
+// finder closes over search state for a call to find.
+type finder struct {
+ obj types.Object // the sought object
+ seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
+ seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
+}
+
+func (f *finder) find(T types.Type, path []byte) []byte {
+ switch T := T.(type) {
+ case *types.Alias:
+ return f.find(types.Unalias(T), path)
+ case *types.Basic, *types.Named:
+ // Named types belonging to pkg were handled already,
+ // so T must belong to another package. No path.
+ return nil
+ case *types.Pointer:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Slice:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Array:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Chan:
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Map:
+ if r := f.find(T.Key(), append(path, opKey)); r != nil {
+ return r
+ }
+ return f.find(T.Elem(), append(path, opElem))
+ case *types.Signature:
+ if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
+ return r
+ }
+ if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
+ return r
+ }
+ if r := f.find(T.Params(), append(path, opParams)); r != nil {
+ return r
+ }
+ return f.find(T.Results(), append(path, opResults))
+ case *types.Struct:
+ for i := 0; i < T.NumFields(); i++ {
+ fld := T.Field(i)
+ path2 := appendOpArg(path, opField, i)
+ if fld == f.obj {
+ return path2 // found field var
+ }
+ if r := f.find(fld.Type(), append(path2, opType)); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Tuple:
+ for i := 0; i < T.Len(); i++ {
+ v := T.At(i)
+ path2 := appendOpArg(path, opAt, i)
+ if v == f.obj {
+ return path2 // found param/result var
+ }
+ if r := f.find(v.Type(), append(path2, opType)); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Interface:
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ if f.seenMethods[m] {
+ return nil
+ }
+ path2 := appendOpArg(path, opMethod, i)
+ if m == f.obj {
+ return path2 // found interface method
+ }
+ if f.seenMethods == nil {
+ f.seenMethods = make(map[*types.Func]bool)
+ }
+ f.seenMethods[m] = true
+ if r := f.find(m.Type(), append(path2, opType)); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.TypeParam:
+ name := T.Obj()
+ if f.seenTParamNames[name] {
+ return nil
+ }
+ if name == f.obj {
+ return append(path, opObj)
+ }
+ if f.seenTParamNames == nil {
+ f.seenTParamNames = make(map[*types.TypeName]bool)
+ }
+ f.seenTParamNames[name] = true
+ if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
+ return r
+ }
+ return nil
+ }
+ panic(T)
+}
+
+func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
+ return (&finder{obj: obj}).findTypeParam(list, path, op)
+}
+
+func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ path2 := appendOpArg(path, op, i)
+ if r := f.find(tparam, path2); r != nil {
+ return r
+ }
+ }
+ return nil
+}
+
+// Object returns the object denoted by path p within the package pkg.
+func Object(pkg *types.Package, p Path) (types.Object, error) {
+ pathstr := string(p)
+ if pathstr == "" {
+ return nil, fmt.Errorf("empty path")
+ }
+
+ var pkgobj, suffix string
+ if dot := strings.IndexByte(pathstr, opType); dot < 0 {
+ pkgobj = pathstr
+ } else {
+ pkgobj = pathstr[:dot]
+ suffix = pathstr[dot:] // suffix starts with "."
+ }
+
+ obj := pkg.Scope().Lookup(pkgobj)
+ if obj == nil {
+ return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
+ }
+
+ // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
+ type hasElem interface {
+ Elem() types.Type
+ }
+ // abstraction of *types.{Named,Signature}
+ type hasTypeParams interface {
+ TypeParams() *types.TypeParamList
+ }
+ // abstraction of *types.{Alias,Named,TypeParam}
+ type hasObj interface {
+ Obj() *types.TypeName
+ }
+
+ // The loop state is the pair (t, obj),
+ // exactly one of which is non-nil, initially obj.
+ // All suffixes start with '.' (the only object->type operation),
+ // followed by optional type->type operations,
+ // then a type->object operation.
+ // The cycle then repeats.
+ var t types.Type
+ for suffix != "" {
+ code := suffix[0]
+ suffix = suffix[1:]
+
+ // Codes [AFMTr] have an integer operand.
+ var index int
+ switch code {
+ case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
+ rest := strings.TrimLeft(suffix, "0123456789")
+ numerals := suffix[:len(suffix)-len(rest)]
+ suffix = rest
+ i, err := strconv.Atoi(numerals)
+ if err != nil {
+ return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
+ }
+ index = int(i)
+ case opObj:
+ // no operand
+ default:
+ // The suffix must end with a type->object operation.
+ if suffix == "" {
+ return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
+ }
+ }
+
+ if code == opType {
+ if t != nil {
+ return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
+ }
+ t = obj.Type()
+ obj = nil
+ continue
+ }
+
+ if t == nil {
+ return nil, fmt.Errorf("invalid path: code %q in object context", code)
+ }
+
+ // Inv: t != nil, obj == nil
+
+ t = types.Unalias(t)
+ switch code {
+ case opElem:
+ hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
+ }
+ t = hasElem.Elem()
+
+ case opKey:
+ mapType, ok := t.(*types.Map)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
+ }
+ t = mapType.Key()
+
+ case opParams:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Params()
+
+ case opResults:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Results()
+
+ case opUnderlying:
+ named, ok := t.(*types.Named)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
+ }
+ t = named.Underlying()
+
+ case opRhs:
+ if alias, ok := t.(*types.Alias); ok {
+ t = aliases.Rhs(alias)
+ } else if false && aliases.Enabled() {
+ // The Enabled check is too expensive, so for now we
+ // simply assume that aliases are not enabled.
+ //
+ // Now that go1.24 is assured, we should be able to
+ // replace this with "if true {", but it causes tests
+ // to fail. TODO(adonovan): investigate.
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
+ }
+
+ case opTypeParam:
+ hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
+ }
+ tparams := hasTypeParams.TypeParams()
+ if n := tparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = tparams.At(index)
+
+ case opRecvTypeParam:
+ sig, ok := t.(*types.Signature) // Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ rtparams := sig.RecvTypeParams()
+ if n := rtparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = rtparams.At(index)
+
+ case opConstraint:
+ tparam, ok := t.(*types.TypeParam)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
+ }
+ t = tparam.Constraint()
+
+ case opAt:
+ tuple, ok := t.(*types.Tuple)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
+ }
+ if n := tuple.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ obj = tuple.At(index)
+ t = nil
+
+ case opField:
+ structType, ok := t.(*types.Struct)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
+ }
+ if n := structType.NumFields(); index >= n {
+ return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
+ }
+ obj = structType.Field(index)
+ t = nil
+
+ case opMethod:
+ switch t := t.(type) {
+ case *types.Interface:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index) // Id-ordered
+
+ case *types.Named:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index)
+
+ default:
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
+ }
+ t = nil
+
+ case opObj:
+ hasObj, ok := t.(hasObj)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
+ }
+ obj = hasObj.Obj()
+ t = nil
+
+ default:
+ return nil, fmt.Errorf("invalid path: unknown code %q", code)
+ }
+ }
+
+ if obj == nil {
+ panic(p) // path does not end in an object-valued operator
+ }
+
+ if obj.Pkg() != pkg {
+ return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
+ }
+
+ return obj, nil // success
+}
+
+// scopeObjects is a memoization of scope objects.
+// Callers must not modify the result.
+func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object {
+ m := enc.scopeMemo
+ if m == nil {
+ m = make(map[*types.Scope][]types.Object)
+ enc.scopeMemo = m
+ }
+ objs, ok := m[scope]
+ if !ok {
+ names := scope.Names() // allocates and sorts
+ objs = make([]types.Object, len(names))
+ for i, name := range names {
+ objs[i] = scope.Lookup(name)
+ }
+ m[scope] = objs
+ }
+ return objs
+}
diff --git a/operator/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/operator/vendor/golang.org/x/tools/go/types/typeutil/callee.go
new file mode 100644
index 00000000..5f10f56c
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+ "go/ast"
+ "go/types"
+ _ "unsafe" // for linkname
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+//
+// Functions and methods may potentially have type parameters.
+//
+// Note: for calls of instantiated functions and methods, Callee returns
+// the corresponding generic function or method on the generic type.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ obj := info.Uses[usedIdent(info, call.Fun)]
+ if obj == nil {
+ return nil
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return nil
+ }
+ return obj
+}
+
+// StaticCallee returns the target (function or method) of a static function
+// call, if any. It returns nil for calls to builtins.
+//
+// Note: for calls of instantiated functions and methods, StaticCallee returns
+// the corresponding generic function or method on the generic type.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+ obj := info.Uses[usedIdent(info, call.Fun)]
+ fn, _ := obj.(*types.Func)
+ if fn == nil || interfaceMethod(fn) {
+ return nil
+ }
+ return fn
+}
+
+// usedIdent is the implementation of [internal/typesinternal.UsedIdent].
+// It returns the identifier associated with e.
+// See typesinternal.UsedIdent for a fuller description.
+// This function should live in typesinternal, but cannot because it would
+// create an import cycle.
+//
+//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+ if info.Types == nil || info.Uses == nil {
+ panic("one of info.Types or info.Uses is nil; both must be populated")
+ }
+ // Look through type instantiation if necessary.
+ switch d := ast.Unparen(e).(type) {
+ case *ast.IndexExpr:
+ if info.Types[d.Index].IsType() {
+ e = d.X
+ }
+ case *ast.IndexListExpr:
+ e = d.X
+ }
+
+ switch e := ast.Unparen(e).(type) {
+ // info.Uses always has the object we want, even for selector expressions.
+ // We don't need info.Selections.
+ // See go/types/recording.go:recordSelection.
+ case *ast.Ident:
+ return e
+ case *ast.SelectorExpr:
+ return e.Sel
+ }
+ return nil
+}
+
+// interfaceMethod reports whether its argument is a method of an interface.
+// This function should live in typesinternal, but cannot because it would create an import cycle.
+//
+//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
+func interfaceMethod(f *types.Func) bool {
+ recv := f.Signature().Recv()
+ return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/operator/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/operator/vendor/golang.org/x/tools/go/types/typeutil/imports.go
new file mode 100644
index 00000000..b81ce0c3
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/types/typeutil/imports.go
@@ -0,0 +1,30 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/operator/vendor/golang.org/x/tools/go/types/typeutil/map.go b/operator/vendor/golang.org/x/tools/go/types/typeutil/map.go
new file mode 100644
index 00000000..f035a0b6
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -0,0 +1,460 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as [Map],
+// a hash table that maps [types.Type] to any value.
+package typeutil
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "hash/maphash"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Read-only map operations ([Map.At], [Map.Len], and so on) may
+// safely be called concurrently.
+//
+// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
+// and 69559, if the latter proposals for a generic hash-map type and
+// a types.Hash function are accepted.
+type Map struct {
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value any
+}
+
+// SetHasher has no effect.
+//
+// It is a relic of an optimization that is no longer profitable. Do
+// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
+func (m *Map) SetHasher(Hasher) {}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && types.Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+func (m *Map) At(key types.Type) any {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[hash(key)] {
+ if e.key != nil && types.Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value any) (prev any) {
+ if m.table != nil {
+ hash := hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if types.Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ hash := hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+func (m *Map) Iterate(f func(key types.Type, value any)) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ any) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value any) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+// -- Hasher --
+
+// hash returns the hash of type t.
+// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
+func hash(t types.Type) uint32 {
+ return theHasher.Hash(t)
+}
+
+// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
+// Hashers are stateless, and all are equivalent.
+type Hasher struct{}
+
+var theHasher Hasher
+
+// MakeHasher returns Hasher{}.
+// Hashers are stateless; all are equivalent.
+func MakeHasher() Hasher { return theHasher }
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ return hasher{inGenericSig: false}.hash(t)
+}
+
+// hasher holds the state of a single Hash traversal: whether we are
+// inside the signature of a generic function; this is used to
+// optimize [hasher.hashTypeParam].
+type hasher struct{ inGenericSig bool }
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hash computes the hash of t.
+func (h hasher) hash(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Alias:
+ return h.hash(types.Unalias(t))
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+
+ tparams := t.TypeParams()
+ if n := tparams.Len(); n > 0 {
+ h.inGenericSig = true // affects constraints, params, and results
+
+ for i := range n {
+ tparam := tparams.At(i)
+ hash += 7 * h.hash(tparam.Constraint())
+ }
+ }
+
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Union:
+ return h.hashUnion(t)
+
+ case *types.Interface:
+ // Interfaces are identical if they have the same set of methods, with
+ // identical names and types, and they have the same set of type
+ // restrictions. See go/types.identical for more details.
+ var hash uint32 = 9103
+
+ // Hash methods.
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ // Use shallow hash on method signature to
+ // avoid anonymous interface cycles.
+ hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
+ }
+
+ // Hash type restrictions.
+ terms, err := typeparams.InterfaceTermSet(t)
+ // if err != nil t has invalid type restrictions.
+ if err == nil {
+ hash += h.hashTermSet(terms)
+ }
+
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
+
+ case *types.Named:
+ hash := h.hashTypeName(t.Obj())
+ targs := t.TypeArgs()
+ for i := 0; i < targs.Len(); i++ {
+ targ := targs.At(i)
+ hash += 2 * h.hash(targ)
+ }
+ return hash
+
+ case *types.TypeParam:
+ return h.hashTypeParam(t)
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+
+ panic(fmt.Sprintf("%T: %v", t, t))
+}
+
+func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := range n {
+ hash += 3 * h.hash(tuple.At(i).Type())
+ }
+ return hash
+}
+
+func (h hasher) hashUnion(t *types.Union) uint32 {
+ // Hash type restrictions.
+ terms, err := typeparams.UnionTermSet(t)
+ // if err != nil t has invalid type restrictions. Fall back on a non-zero
+ // hash.
+ if err != nil {
+ return 9151
+ }
+ return h.hashTermSet(terms)
+}
+
+func (h hasher) hashTermSet(terms []*types.Term) uint32 {
+ hash := 9157 + 2*uint32(len(terms))
+ for _, term := range terms {
+ // term order is not significant.
+ termHash := h.hash(term.Type())
+ if term.Tilde() {
+ termHash *= 9161
+ }
+ hash += 3 * termHash
+ }
+ return hash
+}
+
+// hashTypeParam returns the hash of a type parameter.
+func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
+ // Within the signature of a generic function, TypeParams are
+ // identical if they have the same index and constraint, so we
+ // hash them based on index.
+ //
+ // When we are outside a generic function, free TypeParams are
+ // identical iff they are the same object, so we can use a
+ // more discriminating hash consistent with object identity.
+ // This optimization saves [Map] about 4% when hashing all the
+ // types.Info.Types in the forward closure of net/http.
+ if !h.inGenericSig {
+ // Optimization: outside a generic function signature,
+ // use a more discrimating hash consistent with object identity.
+ return h.hashTypeName(t.Obj())
+ }
+ return 9173 + 3*uint32(t.Index())
+}
+
+var theSeed = maphash.MakeSeed()
+
+// hashTypeName hashes the pointer of tname.
+func (hasher) hashTypeName(tname *types.TypeName) uint32 {
+ // Since types.Identical uses == to compare TypeNames,
+ // the Hash function uses maphash.Comparable.
+ hash := maphash.Comparable(theSeed, tname)
+ return uint32(hash ^ (hash >> 32))
+}
+
+// shallowHash computes a hash of t without looking at any of its
+// element Types, to avoid potential anonymous cycles in the types of
+// interface methods.
+//
+// When an unnamed non-empty interface type appears anywhere among the
+// arguments or results of an interface method, there is a potential
+// for endless recursion. Consider:
+//
+// type X interface { m() []*interface { X } }
+//
+// The problem is that the Methods of the interface in m's result type
+// include m itself; there is no mention of the named type X that
+// might help us break the cycle.
+// (See comment in go/types.identical, case *Interface, for more.)
+func (h hasher) shallowHash(t types.Type) uint32 {
+ // t is the type of an interface method (Signature),
+ // its params or results (Tuples), or their immediate
+ // elements (mostly Slice, Pointer, Basic, Named),
+ // so there's no need to optimize anything else.
+ switch t := t.(type) {
+ case *types.Alias:
+ return h.shallowHash(types.Unalias(t))
+
+ case *types.Signature:
+ var hash uint32 = 604171
+ if t.Variadic() {
+ hash *= 971767
+ }
+ // The Signature/Tuple recursion is always finite
+ // and invariably shallow.
+ return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
+
+ case *types.Tuple:
+ n := t.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := range n {
+ hash += 53471161 * h.shallowHash(t.At(i).Type())
+ }
+ return hash
+
+ case *types.Basic:
+ return 45212177 * uint32(t.Kind())
+
+ case *types.Array:
+ return 1524181 + 2*uint32(t.Len())
+
+ case *types.Slice:
+ return 2690201
+
+ case *types.Struct:
+ return 3326489
+
+ case *types.Pointer:
+ return 4393139
+
+ case *types.Union:
+ return 562448657
+
+ case *types.Interface:
+ return 2124679 // no recursion here
+
+ case *types.Map:
+ return 9109
+
+ case *types.Chan:
+ return 9127
+
+ case *types.Named:
+ return h.hashTypeName(t.Obj())
+
+ case *types.TypeParam:
+ return h.hashTypeParam(t)
+ }
+ panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
+}
diff --git a/operator/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/operator/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 00000000..f7666028
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+ "go/types"
+ "sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+ others map[types.Type]*types.MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+ if cache == nil {
+ return types.NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := types.Unalias(T).(type) {
+ case *types.Named:
+ return cache.lookupNamed(T).value
+
+ case *types.Pointer:
+ if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = types.NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[types.Type]*types.MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = types.NewMethodSet(named)
+ msets.pointer = types.NewMethodSet(types.NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/operator/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/operator/vendor/golang.org/x/tools/go/types/typeutil/ui.go
new file mode 100644
index 00000000..9dda6a25
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -0,0 +1,53 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import (
+ "go/types"
+)
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+ isPointerToConcrete := func(T types.Type) bool {
+ ptr, ok := types.Unalias(T).(*types.Pointer)
+ return ok && !types.IsInterface(ptr.Elem())
+ }
+
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if types.IsInterface(T) || isPointerToConcrete(T) {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ // T is some other concrete type.
+ // Report methods of T and *T, preferring those of T.
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+
+ }
+ return result
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/aliases/aliases.go b/operator/vendor/golang.org/x/tools/internal/aliases/aliases.go
new file mode 100644
index 00000000..b9425f5a
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/aliases/aliases.go
@@ -0,0 +1,38 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aliases
+
+import (
+ "go/token"
+ "go/types"
+)
+
+// Package aliases defines backward compatible shims
+// for the types.Alias type representation added in 1.22.
+// This defines placeholders for x/tools until 1.26.
+
+// NewAlias creates a new TypeName in Package pkg that
+// is an alias for the type rhs.
+//
+// The enabled parameter determines whether the resulting [TypeName]'s
+// type is an [types.Alias]. Its value must be the result of a call to
+// [Enabled], which computes the effective value of
+// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
+// function is expensive and should be called once per task (e.g.
+// package import), not once per call to NewAlias.
+//
+// Precondition: enabled || len(tparams)==0.
+// If materialized aliases are disabled, there must not be any type parameters.
+func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
+ if enabled {
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ SetTypeParams(types.NewAlias(tname, rhs), tparams)
+ return tname
+ }
+ if len(tparams) > 0 {
+ panic("cannot create an alias with type parameters when gotypesalias is not enabled")
+ }
+ return types.NewTypeName(pos, pkg, name, rhs)
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/operator/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
new file mode 100644
index 00000000..7716a333
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package aliases
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+)
+
+// Rhs returns the type on the right-hand side of the alias declaration.
+func Rhs(alias *types.Alias) types.Type {
+ if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
+ return alias.Rhs() // go1.23+
+ }
+
+ // go1.22's Alias didn't have the Rhs method,
+ // so Unalias is the best we can do.
+ return types.Unalias(alias)
+}
+
+// TypeParams returns the type parameter list of the alias.
+func TypeParams(alias *types.Alias) *types.TypeParamList {
+ if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
+ return alias.TypeParams() // go1.23+
+ }
+ return nil
+}
+
+// SetTypeParams sets the type parameters of the alias type.
+func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
+ if alias, ok := any(alias).(interface {
+ SetTypeParams(tparams []*types.TypeParam)
+ }); ok {
+ alias.SetTypeParams(tparams) // go1.23+
+ } else if len(tparams) > 0 {
+ panic("cannot set type parameters of an Alias type in go1.22")
+ }
+}
+
+// TypeArgs returns the type arguments used to instantiate the Alias type.
+func TypeArgs(alias *types.Alias) *types.TypeList {
+ if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
+ return alias.TypeArgs() // go1.23+
+ }
+ return nil // empty (go1.22)
+}
+
+// Origin returns the generic Alias type of which alias is an instance.
+// If alias is not an instance of a generic alias, Origin returns alias.
+func Origin(alias *types.Alias) *types.Alias {
+ if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
+ return alias.Origin() // go1.23+
+ }
+ return alias // not an instance of a generic alias (go1.22)
+}
+
+// Enabled reports whether [NewAlias] should create [types.Alias] types.
+//
+// This function is expensive! Call it sparingly.
+func Enabled() bool {
+ // The only reliable way to compute the answer is to invoke go/types.
+ // We don't parse the GODEBUG environment variable, because
+ // (a) it's tricky to do so in a manner that is consistent
+ // with the godebug package; in particular, a simple
+ // substring check is not good enough. The value is a
+ // rightmost-wins list of options. But more importantly:
+ // (b) it is impossible to detect changes to the effective
+ // setting caused by os.Setenv("GODEBUG"), as happens in
+ // many tests. Therefore any attempt to cache the result
+ // is just incorrect.
+ fset := token.NewFileSet()
+ f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
+ pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
+ _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
+ return enabled
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/core/event.go b/operator/vendor/golang.org/x/tools/internal/event/core/event.go
new file mode 100644
index 00000000..ade5d1e7
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/core/event.go
@@ -0,0 +1,80 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package core provides support for event based telemetry.
+package core
+
+import (
+ "fmt"
+ "time"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Event holds the information about an event of note that occurred.
+type Event struct {
+ at time.Time
+
+ // As events are often on the stack, storing the first few labels directly
+ // in the event can avoid an allocation at all for the very common cases of
+ // simple events.
+ // The length needs to be large enough to cope with the majority of events
+ // but no so large as to cause undue stack pressure.
+ // A log message with two values will use 3 labels (one for each value and
+ // one for the message itself).
+
+ static [3]label.Label // inline storage for the first few labels
+ dynamic []label.Label // dynamically sized storage for remaining labels
+}
+
+func (ev Event) At() time.Time { return ev.at }
+
+func (ev Event) Format(f fmt.State, r rune) {
+ if !ev.at.IsZero() {
+ fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
+ }
+ for index := 0; ev.Valid(index); index++ {
+ if l := ev.Label(index); l.Valid() {
+ fmt.Fprintf(f, "\n\t%v", l)
+ }
+ }
+}
+
+func (ev Event) Valid(index int) bool {
+ return index >= 0 && index < len(ev.static)+len(ev.dynamic)
+}
+
+func (ev Event) Label(index int) label.Label {
+ if index < len(ev.static) {
+ return ev.static[index]
+ }
+ return ev.dynamic[index-len(ev.static)]
+}
+
+func (ev Event) Find(key label.Key) label.Label {
+ for _, l := range ev.static {
+ if l.Key() == key {
+ return l
+ }
+ }
+ for _, l := range ev.dynamic {
+ if l.Key() == key {
+ return l
+ }
+ }
+ return label.Label{}
+}
+
+func MakeEvent(static [3]label.Label, labels []label.Label) Event {
+ return Event{
+ static: static,
+ dynamic: labels,
+ }
+}
+
+// CloneEvent event returns a copy of the event with the time adjusted to at.
+func CloneEvent(ev Event, at time.Time) Event {
+ ev.at = at
+ return ev
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/core/export.go b/operator/vendor/golang.org/x/tools/internal/event/core/export.go
new file mode 100644
index 00000000..05f3a9a5
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/core/export.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package core
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+ "unsafe"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Exporter is a function that handles events.
+// It may return a modified context and event.
+type Exporter func(context.Context, Event, label.Map) context.Context
+
+var (
+ exporter unsafe.Pointer
+)
+
+// SetExporter sets the global exporter function that handles all events.
+// The exporter is called synchronously from the event call site, so it should
+// return quickly so as not to hold up user code.
+func SetExporter(e Exporter) {
+ p := unsafe.Pointer(&e)
+ if e == nil {
+ // &e is always valid, and so p is always valid, but for the early abort
+ // of ProcessEvent to be efficient it needs to make the nil check on the
+ // pointer without having to dereference it, so we make the nil function
+ // also a nil pointer
+ p = nil
+ }
+ atomic.StorePointer(&exporter, p)
+}
+
+// deliver is called to deliver an event to the supplied exporter.
+// it will fill in the time.
+func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
+ // add the current time to the event
+ ev.at = time.Now()
+ // hand the event off to the current exporter
+ return exporter(ctx, ev, ev)
+}
+
+// Export is called to deliver an event to the global exporter if set.
+func Export(ctx context.Context, ev Event) context.Context {
+ // get the global exporter and abort early if there is not one
+ exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ if exporterPtr == nil {
+ return ctx
+ }
+ return deliver(ctx, *exporterPtr, ev)
+}
+
+// ExportPair is called to deliver a start event to the supplied exporter.
+// It also returns a function that will deliver the end event to the same
+// exporter.
+// It will fill in the time.
+func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
+ // get the global exporter and abort early if there is not one
+ exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
+ if exporterPtr == nil {
+ return ctx, func() {}
+ }
+ ctx = deliver(ctx, *exporterPtr, begin)
+ return ctx, func() { deliver(ctx, *exporterPtr, end) }
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/core/fast.go b/operator/vendor/golang.org/x/tools/internal/event/core/fast.go
new file mode 100644
index 00000000..06c1d461
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/core/fast.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package core
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Log1 takes a message and one label delivers a log event to the exporter.
+// It is a customized version of Print that is faster and does no allocation.
+func Log1(ctx context.Context, message string, t1 label.Label) {
+ Export(ctx, MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ t1,
+ }, nil))
+}
+
+// Log2 takes a message and two labels and delivers a log event to the exporter.
+// It is a customized version of Print that is faster and does no allocation.
+func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
+ Export(ctx, MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ t1,
+ t2,
+ }, nil))
+}
+
+// Metric1 sends a label event to the exporter with the supplied labels.
+func Metric1(ctx context.Context, t1 label.Label) context.Context {
+ return Export(ctx, MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ t1,
+ }, nil))
+}
+
+// Metric2 sends a label event to the exporter with the supplied labels.
+func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
+ return Export(ctx, MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ t1,
+ t2,
+ }, nil))
+}
+
+// Start1 sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
+ return ExportPair(ctx,
+ MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ t1,
+ }, nil),
+ MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
+
+// Start2 sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
+ return ExportPair(ctx,
+ MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ t1,
+ t2,
+ }, nil),
+ MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/doc.go b/operator/vendor/golang.org/x/tools/internal/event/doc.go
new file mode 100644
index 00000000..5dc6e6ba
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package event provides a set of packages that cover the main
+// concepts of telemetry in an implementation agnostic way.
+package event
diff --git a/operator/vendor/golang.org/x/tools/internal/event/event.go b/operator/vendor/golang.org/x/tools/internal/event/event.go
new file mode 100644
index 00000000..4d55e577
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/event.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package event
+
+import (
+ "context"
+
+ "golang.org/x/tools/internal/event/core"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Exporter is a function that handles events.
+// It may return a modified context and event.
+type Exporter func(context.Context, core.Event, label.Map) context.Context
+
+// SetExporter sets the global exporter function that handles all events.
+// The exporter is called synchronously from the event call site, so it should
+// return quickly so as not to hold up user code.
+func SetExporter(e Exporter) {
+ core.SetExporter(core.Exporter(e))
+}
+
+// Log takes a message and a label list and combines them into a single event
+// before delivering them to the exporter.
+func Log(ctx context.Context, message string, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ }, labels))
+}
+
+// IsLog returns true if the event was built by the Log function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsLog(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Msg
+}
+
+// Error takes a message and a label list and combines them into a single event
+// before delivering them to the exporter. It captures the error in the
+// delivered event.
+func Error(ctx context.Context, message string, err error, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Msg.Of(message),
+ keys.Err.Of(err),
+ }, labels))
+}
+
+// IsError returns true if the event was built by the Error function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsError(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Msg &&
+ ev.Label(1).Key() == keys.Err
+}
+
+// Metric sends a label event to the exporter with the supplied labels.
+func Metric(ctx context.Context, labels ...label.Label) {
+ core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Metric.New(),
+ }, labels))
+}
+
+// IsMetric returns true if the event was built by the Metric function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsMetric(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Metric
+}
+
+// Label sends a label event to the exporter with the supplied labels.
+func Label(ctx context.Context, labels ...label.Label) context.Context {
+ return core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Label.New(),
+ }, labels))
+}
+
+// IsLabel returns true if the event was built by the Label function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsLabel(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Label
+}
+
+// Start sends a span start event with the supplied label list to the exporter.
+// It also returns a function that will end the span, which should normally be
+// deferred.
+func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
+ return core.ExportPair(ctx,
+ core.MakeEvent([3]label.Label{
+ keys.Start.Of(name),
+ }, labels),
+ core.MakeEvent([3]label.Label{
+ keys.End.New(),
+ }, nil))
+}
+
+// IsStart returns true if the event was built by the Start function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsStart(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Start
+}
+
+// IsEnd returns true if the event was built by the End function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsEnd(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.End
+}
+
+// Detach returns a context without an associated span.
+// This allows the creation of spans that are not children of the current span.
+func Detach(ctx context.Context) context.Context {
+ return core.Export(ctx, core.MakeEvent([3]label.Label{
+ keys.Detach.New(),
+ }, nil))
+}
+
+// IsDetach returns true if the event was built by the Detach function.
+// It is intended to be used in exporters to identify the semantics of the
+// event when deciding what to do with it.
+func IsDetach(ev core.Event) bool {
+ return ev.Label(0).Key() == keys.Detach
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/keys/keys.go b/operator/vendor/golang.org/x/tools/internal/event/keys/keys.go
new file mode 100644
index 00000000..4cfa51b6
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/keys/keys.go
@@ -0,0 +1,564 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+
+ "golang.org/x/tools/internal/event/label"
+)
+
+// Value represents a key for untyped values.
+type Value struct {
+ name string
+ description string
+}
+
+// New creates a new Key for untyped values.
+func New(name, description string) *Value {
+ return &Value{name: name, description: description}
+}
+
+func (k *Value) Name() string { return k.name }
+func (k *Value) Description() string { return k.description }
+
+func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
+ fmt.Fprint(w, k.From(l))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Value) Get(lm label.Map) any {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return nil
+}
+
+// From can be used to get a value from a Label.
+func (k *Value) From(t label.Label) any { return t.UnpackValue() }
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) }
+
+// Tag represents a key for tagging labels that have no value.
+// These are used when the existence of the label is the entire information it
+// carries, such as marking events to be of a specific kind, or from a specific
+// package.
+type Tag struct {
+ name string
+ description string
+}
+
+// NewTag creates a new Key for tagging labels.
+func NewTag(name, description string) *Tag {
+ return &Tag{name: name, description: description}
+}
+
+func (k *Tag) Name() string { return k.name }
+func (k *Tag) Description() string { return k.description }
+
+func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
+
+// New creates a new Label with this key.
+func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
+
+// Int represents a key
+type Int struct {
+ name string
+ description string
+}
+
+// NewInt creates a new Key for int values.
+func NewInt(name, description string) *Int {
+ return &Int{name: name, description: description}
+}
+
+func (k *Int) Name() string { return k.name }
+func (k *Int) Description() string { return k.description }
+
+func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int) Get(lm label.Map) int {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
+
+// Int8 represents a key
+type Int8 struct {
+ name string
+ description string
+}
+
+// NewInt8 creates a new Key for int8 values.
+func NewInt8(name, description string) *Int8 {
+ return &Int8{name: name, description: description}
+}
+
+func (k *Int8) Name() string { return k.name }
+func (k *Int8) Description() string { return k.description }
+
+func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int8) Get(lm label.Map) int8 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
+
+// Int16 represents a key
+type Int16 struct {
+ name string
+ description string
+}
+
+// NewInt16 creates a new Key for int16 values.
+func NewInt16(name, description string) *Int16 {
+ return &Int16{name: name, description: description}
+}
+
+func (k *Int16) Name() string { return k.name }
+func (k *Int16) Description() string { return k.description }
+
+func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int16) Get(lm label.Map) int16 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
+
+// Int32 represents a key
+type Int32 struct {
+ name string
+ description string
+}
+
+// NewInt32 creates a new Key for int32 values.
+func NewInt32(name, description string) *Int32 {
+ return &Int32{name: name, description: description}
+}
+
+func (k *Int32) Name() string { return k.name }
+func (k *Int32) Description() string { return k.description }
+
+func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int32) Get(lm label.Map) int32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
+
+// Int64 represents a key
+type Int64 struct {
+ name string
+ description string
+}
+
+// NewInt64 creates a new Key for int64 values.
+func NewInt64(name, description string) *Int64 {
+ return &Int64{name: name, description: description}
+}
+
+func (k *Int64) Name() string { return k.name }
+func (k *Int64) Description() string { return k.description }
+
+func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendInt(buf, k.From(l), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Int64) Get(lm label.Map) int64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
+
+// UInt represents a key
+type UInt struct {
+ name string
+ description string
+}
+
+// NewUInt creates a new Key for uint values.
+func NewUInt(name, description string) *UInt {
+ return &UInt{name: name, description: description}
+}
+
+func (k *UInt) Name() string { return k.name }
+func (k *UInt) Description() string { return k.description }
+
+func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt) Get(lm label.Map) uint {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
+
+// UInt8 represents a key
+type UInt8 struct {
+ name string
+ description string
+}
+
+// NewUInt8 creates a new Key for uint8 values.
+func NewUInt8(name, description string) *UInt8 {
+ return &UInt8{name: name, description: description}
+}
+
+func (k *UInt8) Name() string { return k.name }
+func (k *UInt8) Description() string { return k.description }
+
+func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt8) Get(lm label.Map) uint8 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
+
+// UInt16 represents a key
+type UInt16 struct {
+ name string
+ description string
+}
+
+// NewUInt16 creates a new Key for uint16 values.
+func NewUInt16(name, description string) *UInt16 {
+ return &UInt16{name: name, description: description}
+}
+
+func (k *UInt16) Name() string { return k.name }
+func (k *UInt16) Description() string { return k.description }
+
+func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt16) Get(lm label.Map) uint16 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
+
+// UInt32 represents a key
+type UInt32 struct {
+ name string
+ description string
+}
+
+// NewUInt32 creates a new Key for uint32 values.
+func NewUInt32(name, description string) *UInt32 {
+ return &UInt32{name: name, description: description}
+}
+
+func (k *UInt32) Name() string { return k.name }
+func (k *UInt32) Description() string { return k.description }
+
+func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt32) Get(lm label.Map) uint32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
+
+// UInt64 represents a key
+type UInt64 struct {
+ name string
+ description string
+}
+
+// NewUInt64 creates a new Key for uint64 values.
+func NewUInt64(name, description string) *UInt64 {
+ return &UInt64{name: name, description: description}
+}
+
+func (k *UInt64) Name() string { return k.name }
+func (k *UInt64) Description() string { return k.description }
+
+func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendUint(buf, k.From(l), 10))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *UInt64) Get(lm label.Map) uint64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
+
+// Float32 represents a key
+type Float32 struct {
+ name string
+ description string
+}
+
+// NewFloat32 creates a new Key for float32 values.
+func NewFloat32(name, description string) *Float32 {
+ return &Float32{name: name, description: description}
+}
+
+func (k *Float32) Name() string { return k.name }
+func (k *Float32) Description() string { return k.description }
+
+func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Float32) Of(v float32) label.Label {
+ return label.Of64(k, uint64(math.Float32bits(v)))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Float32) Get(lm label.Map) float32 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Float32) From(t label.Label) float32 {
+ return math.Float32frombits(uint32(t.Unpack64()))
+}
+
+// Float64 represents a key
+type Float64 struct {
+ name string
+ description string
+}
+
+// NewFloat64 creates a new Key for int64 values.
+func NewFloat64(name, description string) *Float64 {
+ return &Float64{name: name, description: description}
+}
+
+func (k *Float64) Name() string { return k.name }
+func (k *Float64) Description() string { return k.description }
+
+func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Float64) Of(v float64) label.Label {
+ return label.Of64(k, math.Float64bits(v))
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Float64) Get(lm label.Map) float64 {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return 0
+}
+
+// From can be used to get a value from a Label.
+func (k *Float64) From(t label.Label) float64 {
+ return math.Float64frombits(t.Unpack64())
+}
+
+// String represents a key
+type String struct {
+ name string
+ description string
+}
+
+// NewString creates a new Key for int64 values.
+func NewString(name, description string) *String {
+ return &String{name: name, description: description}
+}
+
+func (k *String) Name() string { return k.name }
+func (k *String) Description() string { return k.description }
+
+func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendQuote(buf, k.From(l)))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *String) Get(lm label.Map) string {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return ""
+}
+
+// From can be used to get a value from a Label.
+func (k *String) From(t label.Label) string { return t.UnpackString() }
+
+// Boolean represents a key
+type Boolean struct {
+ name string
+ description string
+}
+
+// NewBoolean creates a new Key for bool values.
+func NewBoolean(name, description string) *Boolean {
+ return &Boolean{name: name, description: description}
+}
+
+func (k *Boolean) Name() string { return k.name }
+func (k *Boolean) Description() string { return k.description }
+
+func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
+ w.Write(strconv.AppendBool(buf, k.From(l)))
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Boolean) Of(v bool) label.Label {
+ if v {
+ return label.Of64(k, 1)
+ }
+ return label.Of64(k, 0)
+}
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Boolean) Get(lm label.Map) bool {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return false
+}
+
+// From can be used to get a value from a Label.
+func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
+
+// Error represents a key
+type Error struct {
+ name string
+ description string
+}
+
+// NewError creates a new Key for int64 values.
+func NewError(name, description string) *Error {
+ return &Error{name: name, description: description}
+}
+
+func (k *Error) Name() string { return k.name }
+func (k *Error) Description() string { return k.description }
+
+func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
+ io.WriteString(w, k.From(l).Error())
+}
+
+// Of creates a new Label with this key and the supplied value.
+func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
+
+// Get can be used to get a label for the key from a label.Map.
+func (k *Error) Get(lm label.Map) error {
+ if t := lm.Find(k); t.Valid() {
+ return k.From(t)
+ }
+ return nil
+}
+
+// From can be used to get a value from a Label.
+func (k *Error) From(t label.Label) error {
+ err, _ := t.UnpackValue().(error)
+ return err
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/keys/standard.go b/operator/vendor/golang.org/x/tools/internal/event/keys/standard.go
new file mode 100644
index 00000000..7e958665
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/keys/standard.go
@@ -0,0 +1,22 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+var (
+ // Msg is a key used to add message strings to label lists.
+ Msg = NewString("message", "a readable message")
+ // Label is a key used to indicate an event adds labels to the context.
+ Label = NewTag("label", "a label context marker")
+ // Start is used for things like traces that have a name.
+ Start = NewString("start", "span start")
+ // Metric is a key used to indicate an event records metrics.
+ End = NewTag("end", "a span end marker")
+ // Metric is a key used to indicate an event records metrics.
+ Detach = NewTag("detach", "a span detach marker")
+ // Err is a key used to add error values to label lists.
+ Err = NewError("error", "an error that occurred")
+ // Metric is a key used to indicate an event records metrics.
+ Metric = NewTag("metric", "a metric event marker")
+)
diff --git a/operator/vendor/golang.org/x/tools/internal/event/keys/util.go b/operator/vendor/golang.org/x/tools/internal/event/keys/util.go
new file mode 100644
index 00000000..c0e8e731
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/keys/util.go
@@ -0,0 +1,21 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package keys
+
+import (
+ "sort"
+ "strings"
+)
+
+// Join returns a canonical join of the keys in S:
+// a sorted comma-separated string list.
+func Join[S ~[]T, T ~string](s S) string {
+ strs := make([]string, 0, len(s))
+ for _, v := range s {
+ strs = append(strs, string(v))
+ }
+ sort.Strings(strs)
+ return strings.Join(strs, ",")
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/event/label/label.go b/operator/vendor/golang.org/x/tools/internal/event/label/label.go
new file mode 100644
index 00000000..92a39105
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/event/label/label.go
@@ -0,0 +1,214 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package label
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "slices"
+ "unsafe"
+)
+
+// Key is used as the identity of a Label.
+// Keys are intended to be compared by pointer only, the name should be unique
+// for communicating with external systems, but it is not required or enforced.
+type Key interface {
+ // Name returns the key name.
+ Name() string
+ // Description returns a string that can be used to describe the value.
+ Description() string
+
+ // Format is used in formatting to append the value of the label to the
+ // supplied buffer.
+ // The formatter may use the supplied buf as a scratch area to avoid
+ // allocations.
+ Format(w io.Writer, buf []byte, l Label)
+}
+
+// Label holds a key and value pair.
+// It is normally used when passing around lists of labels.
+type Label struct {
+ key Key
+ packed uint64
+ untyped any
+}
+
+// Map is the interface to a collection of Labels indexed by key.
+type Map interface {
+ // Find returns the label that matches the supplied key.
+ Find(key Key) Label
+}
+
+// List is the interface to something that provides an iterable
+// list of labels.
+// Iteration should start from 0 and continue until Valid returns false.
+type List interface {
+ // Valid returns true if the index is within range for the list.
+ // It does not imply the label at that index will itself be valid.
+ Valid(index int) bool
+ // Label returns the label at the given index.
+ Label(index int) Label
+}
+
+// list implements LabelList for a list of Labels.
+type list struct {
+ labels []Label
+}
+
+// filter wraps a LabelList filtering out specific labels.
+type filter struct {
+ keys []Key
+ underlying List
+}
+
+// listMap implements LabelMap for a simple list of labels.
+type listMap struct {
+ labels []Label
+}
+
+// mapChain implements LabelMap for a list of underlying LabelMap.
+type mapChain struct {
+ maps []Map
+}
+
+// OfValue creates a new label from the key and value.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} }
+
+// UnpackValue assumes the label was built using LabelOfValue and returns the value
+// that was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) UnpackValue() any { return t.untyped }
+
+// Of64 creates a new label from a key and a uint64. This is often
+// used for non uint64 values that can be packed into a uint64.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
+
+// Unpack64 assumes the label was built using LabelOf64 and returns the value that
+// was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) Unpack64() uint64 { return t.packed }
+
+type stringptr unsafe.Pointer
+
+// OfString creates a new label from a key and a string.
+// This method is for implementing new key types, label creation should
+// normally be done with the Of method of the key.
+func OfString(k Key, v string) Label {
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ return Label{
+ key: k,
+ packed: uint64(hdr.Len),
+ untyped: stringptr(hdr.Data),
+ }
+}
+
+// UnpackString assumes the label was built using LabelOfString and returns the
+// value that was passed to that constructor.
+// This method is for implementing new key types, for type safety normal
+// access should be done with the From method of the key.
+func (t Label) UnpackString() string {
+ var v string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ hdr.Data = uintptr(t.untyped.(stringptr))
+ hdr.Len = int(t.packed)
+ return v
+}
+
+// Valid returns true if the Label is a valid one (it has a key).
+func (t Label) Valid() bool { return t.key != nil }
+
+// Key returns the key of this Label.
+func (t Label) Key() Key { return t.key }
+
+// Format is used for debug printing of labels.
+func (t Label) Format(f fmt.State, r rune) {
+ if !t.Valid() {
+ io.WriteString(f, `nil`)
+ return
+ }
+ io.WriteString(f, t.Key().Name())
+ io.WriteString(f, "=")
+ var buf [128]byte
+ t.Key().Format(f, buf[:0], t)
+}
+
+func (l *list) Valid(index int) bool {
+ return index >= 0 && index < len(l.labels)
+}
+
+func (l *list) Label(index int) Label {
+ return l.labels[index]
+}
+
+func (f *filter) Valid(index int) bool {
+ return f.underlying.Valid(index)
+}
+
+func (f *filter) Label(index int) Label {
+ l := f.underlying.Label(index)
+ if slices.Contains(f.keys, l.Key()) {
+ return Label{}
+ }
+ return l
+}
+
+func (lm listMap) Find(key Key) Label {
+ for _, l := range lm.labels {
+ if l.Key() == key {
+ return l
+ }
+ }
+ return Label{}
+}
+
+func (c mapChain) Find(key Key) Label {
+ for _, src := range c.maps {
+ l := src.Find(key)
+ if l.Valid() {
+ return l
+ }
+ }
+ return Label{}
+}
+
+var emptyList = &list{}
+
+func NewList(labels ...Label) List {
+ if len(labels) == 0 {
+ return emptyList
+ }
+ return &list{labels: labels}
+}
+
+func Filter(l List, keys ...Key) List {
+ if len(keys) == 0 {
+ return l
+ }
+ return &filter{keys: keys, underlying: l}
+}
+
+func NewMap(labels ...Label) Map {
+ return listMap{labels: labels}
+}
+
+func MergeMaps(srcs ...Map) Map {
+ var nonNil []Map
+ for _, src := range srcs {
+ if src != nil {
+ nonNil = append(nonNil, src)
+ }
+ }
+ if len(nonNil) == 1 {
+ return nonNil[0]
+ }
+ return mapChain{maps: nonNil}
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
new file mode 100644
index 00000000..734c4619
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains the remaining vestiges of
+// $GOROOT/src/go/internal/gcimporter/bimport.go.
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sync"
+)
+
+func errorf(format string, args ...any) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*fileInfo
+}
+
+type fileInfo struct {
+ file *token.File
+ lastline int
+}
+
+const maxlines = 64 * 1024
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we reserve maxlines
+ // positions per file. We delay calling token.File.SetLines until all
+ // positions have been calculated (by way of fakeFileSet.setLines), so that
+ // we can avoid setting unnecessary lines. See also golang/go#46586.
+ f := s.files[file]
+ if f == nil {
+ f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
+ s.files[file] = f
+ }
+ if line > maxlines {
+ line = 1
+ }
+ if line > f.lastline {
+ f.lastline = line
+ }
+
+ // Return a fake position assuming that f.file consists only of newlines.
+ return token.Pos(f.file.Base() + line - 1)
+}
+
+func (s *fakeFileSet) setLines() {
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ for _, f := range s.files {
+ f.file.SetLines(fakeLines[:f.lastline])
+ }
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types.RecvOnly
+ case 2 /* Csend */ :
+ return types.SendOnly
+ case 3 /* Cboth */ :
+ return types.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
new file mode 100644
index 00000000..5662a311
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -0,0 +1,421 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
+// This file also additionally implements FindExportData for gcexportdata.NewReader.
+
+package gcimporter
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "go/build"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying cmd/compile created archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function.
+// This returns the length of the export data in bytes.
+//
+// This function is needed by [gcexportdata.Read], which must
+// accept inputs produced by the last two releases of cmd/compile,
+// plus tip.
+func FindExportData(r *bufio.Reader) (size int64, err error) {
+ arsize, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ size = int64(arsize)
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ size -= int64(len(objapi))
+ for _, h := range headers {
+ size -= int64(len(h))
+ }
+
+ // Check for the binary export data section header "$$B\n".
+ // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ hdr := string(line)
+ if hdr != "$$B\n" {
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+ size -= int64(len(hdr))
+
+ // For files with a binary export data header "$$B\n",
+ // these are always terminated by an end-of-section marker "\n$$\n".
+ // So the last bytes must always be this constant.
+ //
+ // The end-of-section marker is not a part of the export data itself.
+ // Do not include these in size.
+ //
+ // It would be nice to have sanity check that the final bytes after
+ // the export data are indeed the end-of-section marker. The split
+ // of gcexportdata.NewReader and gcexportdata.Read make checking this
+ // ugly so gcimporter gives up enforcing this. The compiler and go/types
+ // importer do enforce this, which seems good enough.
+ const endofsection = "\n$$\n"
+ size -= int64(len(endofsection))
+
+ if size < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+ return
+ }
+
+ return
+}
+
+// ReadUnified reads the contents of the unified export data from a reader r
+// that contains the contents of a GC-created archive file.
+//
+// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
+//
+// Supported GC-created archive files have 4 layers of nesting:
+// - An archive file containing a package definition file.
+// - The package definition file contains headers followed by a data section.
+// Headers are lines (≤ 4kb) that do not start with "$$".
+// - The data section starts with "$$B\n" followed by export data followed
+// by an end of section marker "\n$$\n". (The section start "$$\n" is no
+// longer supported.)
+// - The export data starts with a format byte ('u') followed by the in
+// the given format. (See ReadExportDataHeader for older formats.)
+//
+// Putting this together, the bytes in a GC-created archive files are expected
+// to look like the following.
+// See cmd/internal/archive for more details on ar file headers.
+//
+// | \n | ar file signature
+// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
+// | go object <...>\n | objabi header
+// | \n | other headers such as build id
+// | $$B\n | binary format marker
+// | u\n | unified export
+// | $$\n | end-of-section marker
+// | [optional padding] | padding byte (0x0A) if size is odd
+// | [ar file header] | other ar files
+// | [ar file data] |
+func ReadUnified(r *bufio.Reader) (data []byte, err error) {
+ // We historically guaranteed headers at the default buffer size (4096) work.
+ // This ensures we can use ReadSlice throughout.
+ const minBufferSize = 4096
+ r = bufio.NewReaderSize(r, minBufferSize)
+
+ size, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ n := size
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ n -= len(objapi)
+ for _, h := range headers {
+ n -= len(h)
+ }
+
+ hdrlen, err := ReadExportDataHeader(r)
+ if err != nil {
+ return
+ }
+ n -= hdrlen
+
+ // size also includes the end of section marker. Remove that many bytes from the end.
+ const marker = "\n$$\n"
+ n -= len(marker)
+
+ if n < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
+ return
+ }
+
+ // Read n bytes from buf.
+ data = make([]byte, n)
+ _, err = io.ReadFull(r, data)
+ if err != nil {
+ return
+ }
+
+ // Check for marker at the end.
+ var suffix [len(marker)]byte
+ _, err = io.ReadFull(r, suffix[:])
+ if err != nil {
+ return
+ }
+ if s := string(suffix[:]); s != marker {
+ err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
+ return
+ }
+
+ return
+}
+
+// FindPackageDefinition positions the reader r at the beginning of a package
+// definition file ("__.PKGDEF") within a GC-created archive by reading
+// from it, and returns the size of the package definition file in the archive.
+//
+// The reader must be positioned at the start of the archive file before calling
+// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
+//
+// See cmd/internal/archive for details on the archive format.
+func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
+ // Uses ReadSlice to limit risk of malformed inputs.
+
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ // Is the first line an archive file signature?
+ if string(line) != "!\n" {
+ err = fmt.Errorf("not the start of an archive file (%q)", line)
+ return
+ }
+
+ // package export block should be first
+ size = readArchiveHeader(r, "__.PKGDEF")
+ if size <= 0 {
+ err = fmt.Errorf("not a package file")
+ return
+ }
+
+ return
+}
+
+// ReadObjectHeaders reads object headers from the reader. Object headers are
+// lines that do not start with an end-of-section marker "$$". The first header
+// is the objabi header. On success, the reader will be positioned at the beginning
+// of the end-of-section marker.
+//
+// It returns an error if any header does not fit in r.Size() bytes.
+func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
+ // line is a temporary buffer for headers.
+ // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
+ var line []byte
+
+ // objapi header should be the first line
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ objapi = string(line)
+
+ // objapi header begins with "go object ".
+ if !strings.HasPrefix(objapi, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", objapi)
+ return
+ }
+
+ // process remaining object header lines
+ for {
+ // check for an end of section marker "$$"
+ line, err = r.Peek(2)
+ if err != nil {
+ return
+ }
+ if string(line) == "$$" {
+ return // stop
+ }
+
+ // read next header
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ headers = append(headers, string(line))
+ }
+}
+
+// ReadExportDataHeader reads the export data header and format from r.
+// It returns the number of bytes read, or an error if the format is no longer
+// supported or it failed to read.
+//
+// The only currently supported format is binary export data in the
+// unified export format.
+func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
+ // Read export data header.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+
+ hdr := string(line)
+ switch hdr {
+ case "$$\n":
+ err = fmt.Errorf("old textual export format no longer supported (recompile package)")
+ return
+
+ case "$$B\n":
+ var format byte
+ format, err = r.ReadByte()
+ if err != nil {
+ return
+ }
+ // The unified export format starts with a 'u'.
+ switch format {
+ case 'u':
+ default:
+ // Older no longer supported export formats include:
+ // indexed export format which started with an 'i'; and
+ // the older binary export format which started with a 'c',
+ // 'd', or 'v' (from "version").
+ err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
+ return
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+
+ n = len(hdr) + 1 // + 1 is for 'u'
+ return
+}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+//
+// FindPkg is only used in tests within x/tools.
+func FindPkg(path, srcDir string) (filename, id string, err error) {
+ // TODO(taking): Move internal/exportdata.FindPkg into its own file,
+ // and then this copy into a _test package.
+ if path == "" {
+ return "", "", errors.New("path is empty")
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ var bp *build.Package
+ bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ if bp.Goroot && bp.Dir != "" {
+ filename, err = lookupGorootExport(bp.Dir)
+ if err == nil {
+ _, err = os.Stat(filename)
+ }
+ if err == nil {
+ return filename, bp.ImportPath, nil
+ }
+ }
+ goto notfound
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ }
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ f, statErr := os.Stat(filename)
+ if statErr == nil && !f.IsDir() {
+ return filename, id, nil
+ }
+ if err == nil {
+ err = statErr
+ }
+ }
+
+notfound:
+ if err == nil {
+ return "", path, fmt.Errorf("can't find import: %q", path)
+ }
+ return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
+}
+
+var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
+
+var exportMap sync.Map // package dir → func() (string, error)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+//
+// lookupGorootExport is only used in tests within x/tools.
+func lookupGorootExport(pkgDir string) (string, error) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ err error
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
+ listOnce.Do(func() {
+ cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
+ var output []byte
+ output, err = cmd.Output()
+ if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ err = errors.New(string(ee.Stderr))
+ }
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, err
+ })
+ }
+
+ return f.(func() (string, error))()
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
new file mode 100644
index 00000000..3dbd21d1
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -0,0 +1,108 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
+
+// Package gcimporter provides various functions for reading
+// gc-generated object files that can be used to implement the
+// Importer interface defined by the Go 1.5 standard library package.
+//
+// The encoding is deterministic: if the encoder is applied twice to
+// the same types.Package data structure, both encodings are equal.
+// This property may be important to avoid spurious changes in
+// applications such as build systems.
+//
+// However, the encoder is not necessarily idempotent. Importing an
+// exported package may yield a types.Package that, while it
+// represents the same set of Go types as the original, may differ in
+// the details of its internal representation. Because of these
+// differences, re-encoding the imported package may yield a
+// different, but equally valid, encoding of the package.
+package gcimporter // import "golang.org/x/tools/internal/gcimporter"
+
+import (
+ "bufio"
+ "fmt"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+)
+
+const (
+ // Enable debug during development: it adds some additional checks, and
+ // prevents errors from being recovered.
+ debug = false
+
+ // If trace is set, debugging output is printed to std out.
+ trace = false
+)
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+//
+// Import is only used in tests.
+func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+ var rc io.ReadCloser
+ var id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ var filename string
+ filename, id, err = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types.Unsafe, nil
+ }
+ return nil, err
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ buf := bufio.NewReader(rc)
+ data, err := ReadUnified(buf)
+ if err != nil {
+ err = fmt.Errorf("import %q: %v", path, err)
+ return
+ }
+
+ // unified: emitted by cmd/compile since go1.20.
+ _, pkg, err = UImportData(fset, packages, data, id)
+
+ return
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
new file mode 100644
index 00000000..4a4357d2
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -0,0 +1,1595 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A' or 'B'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'B'
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "reflect"
+ "slices"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+)
+
+// IExportShallow encodes "shallow" export data for the specified package.
+//
+// For types, we use "shallow" export data. Historically, the Go
+// compiler always produced a summary of the types for a given package
+// that included types from other packages that it indirectly
+// referenced: "deep" export data. This had the advantage that the
+// compiler (and analogous tools such as gopls) need only load one
+// file per direct import. However, it meant that the files tended to
+// get larger based on the level of the package in the import
+// graph. For example, higher-level packages in the kubernetes module
+// have over 1MB of "deep" export data, even when they have almost no
+// content of their own, merely because they mention a major type that
+// references many others. In pathological cases the export data was
+// 300x larger than the source for a package due to this quadratic
+// growth.
+//
+// "Shallow" export data means that the serialized types describe only
+// a single package. If those types mention types from other packages,
+// the type checker may need to request additional packages beyond
+// just the direct imports. Type information for the entire transitive
+// closure of imports is provided (lazily) by the DAG.
+//
+// No promises are made about the encoding other than that it can be decoded by
+// the same version of IIExportShallow. If you plan to save export data in the
+// file system, be sure to include a cryptographic digest of the executable in
+// the key to avoid version skew.
+//
+// If the provided reportf func is non-nil, it is used for reporting
+// bugs (e.g. recovered panics) encountered during export, enabling us
+// to obtain via telemetry the stack that would otherwise be lost by
+// merely returning an error.
+func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) {
+ // In principle this operation can only fail if out.Write fails,
+ // but that's impossible for bytes.Buffer---and as a matter of
+ // fact iexportCommon doesn't even check for I/O errors.
+ // TODO(adonovan): handle I/O errors properly.
+ // TODO(adonovan): use byte slices throughout, avoiding copying.
+ const bundle, shallow = false, true
+ var out bytes.Buffer
+ err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf)
+ return out.Bytes(), err
+}
+
+// IImportShallow decodes "shallow" types.Package data encoded by
+// [IExportShallow] in the same executable. This function cannot import data
+// from cmd/compile or gcexportdata.Write.
+//
+// The importer calls getPackages to obtain package symbols for all
+// packages mentioned in the export data, including the one being
+// decoded.
+//
+// If the provided reportf func is non-nil, it will be used for reporting bugs
+// encountered during import.
+// TODO(rfindley): remove reportf when we are confident enough in the new
+// objectpath encoding.
+func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) {
+ const bundle = false
+ const shallow = true
+ pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf)
+ if err != nil {
+ return nil, err
+ }
+ return pkgs[0], nil
+}
+
+// ReportFunc is the type of a function used to report formatted bugs.
+type ReportFunc = func(string, ...any)
+
+// Current bundled export format version. Increase with each format change.
+// 0: initial implementation
+const bundleVersion = 0
+
+// IExportData writes indexed export data for pkg to out.
+//
+// If no file set is provided, position info will be missing.
+// The package path of the top-level package will not be recorded,
+// so that calls to IImportData can override with a provided package path.
+func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
+ const bundle, shallow = false, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil)
+}
+
+// IExportBundle writes an indexed export bundle for pkgs to out.
+func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
+ const bundle, shallow = true, false
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil)
+}
+
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) {
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ // Report the stack via telemetry (see #71067).
+ if reportf != nil {
+ reportf("panic in exporter")
+ }
+ if ierr, ok := e.(internalError); ok {
+ // internalError usually means we exported a
+ // bad go/types data structure: a violation
+ // of an implicit precondition of Export.
+ err = ierr
+ return
+ }
+ // Not an internal error; panic again.
+ panic(e)
+ }
+ }()
+ }
+
+ p := iexporter{
+ fset: fset,
+ version: version,
+ shallow: shallow,
+ allPkgs: map[*types.Package]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[types.Object]uint64{},
+ tparamNames: map[types.Object]string{},
+ typIndex: map[types.Type]uint64{},
+ }
+ if !bundle {
+ p.localpkg = pkgs[0]
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved))
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, pkg := range pkgs {
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ if token.IsExported(name) {
+ p.pushDecl(scope.Lookup(name))
+ }
+ }
+
+ if bundle {
+ // Ensure pkg and its imports are included in the index.
+ p.allPkgs[pkg] = true
+ for _, imp := range pkg.Imports() {
+ p.allPkgs[imp] = true
+ }
+ }
+ }
+
+ // Loop until no more work.
+ for !p.declTodo.empty() {
+ p.doDecl(p.declTodo.popHead())
+ }
+
+ // Produce index of offset of each file record in files.
+ var files intWriter
+ var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
+ if p.shallow {
+ fileOffset = make([]uint64, len(p.fileInfos))
+ for i, info := range p.fileInfos {
+ fileOffset[i] = uint64(files.Len())
+ p.encodeFile(&files, info.file, info.needed)
+ }
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex)
+
+ if bundle {
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.pkg(pkg)
+ imps := pkg.Imports()
+ w.uint64(uint64(len(imps)))
+ for _, imp := range imps {
+ w.pkg(imp)
+ }
+ }
+ }
+ w.flush()
+
+ // Assemble header.
+ var hdr intWriter
+ if bundle {
+ hdr.uint64(bundleVersion)
+ }
+ hdr.uint64(uint64(p.version))
+ hdr.uint64(uint64(p.strings.Len()))
+ if p.shallow {
+ hdr.uint64(uint64(files.Len()))
+ hdr.uint64(uint64(len(fileOffset)))
+ for _, offset := range fileOffset {
+ hdr.uint64(offset)
+ }
+ }
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ io.Copy(out, &hdr)
+ io.Copy(out, &p.strings)
+ if p.shallow {
+ io.Copy(out, &files)
+ }
+ io.Copy(out, &p.data0)
+
+ return nil
+}
+
+// encodeFile writes to w a representation of the file sufficient to
+// faithfully restore position information about all needed offsets.
+// Mutates the needed array.
+func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
+ _ = needed[0] // precondition: needed is non-empty
+
+ w.uint64(p.stringOff(file.Name()))
+
+ size := uint64(file.Size())
+ w.uint64(size)
+
+ // Sort the set of needed offsets. Duplicates are harmless.
+ slices.Sort(needed)
+
+ lines := file.Lines() // byte offset of each line start
+ w.uint64(uint64(len(lines)))
+
+ // Rather than record the entire array of line start offsets,
+ // we save only a sparse list of (index, offset) pairs for
+ // the start of each line that contains a needed position.
+ var sparse [][2]int // (index, offset) pairs
+outer:
+ for i, lineStart := range lines {
+ lineEnd := size
+ if i < len(lines)-1 {
+ lineEnd = uint64(lines[i+1])
+ }
+ // Does this line contains a needed offset?
+ if needed[0] < lineEnd {
+ sparse = append(sparse, [2]int{i, lineStart})
+ for needed[0] < lineEnd {
+ needed = needed[1:]
+ if len(needed) == 0 {
+ break outer
+ }
+ }
+ }
+ }
+
+ // Delta-encode the columns.
+ w.uint64(uint64(len(sparse)))
+ var prev [2]int
+ for _, pair := range sparse {
+ w.uint64(uint64(pair[0] - prev[0]))
+ w.uint64(uint64(pair[1] - prev[1]))
+ prev = pair
+ }
+}
+
+// writeIndex writes out an object index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[types.Object]uint64) {
+ type pkgObj struct {
+ obj types.Object
+ name string // qualified name; differs from obj.Name for type params
+ }
+ // Build a map from packages to objects from that package.
+ pkgObjs := map[*types.Package][]pkgObj{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if w.p.localpkg != nil {
+ pkgObjs[w.p.localpkg] = nil
+ }
+ for pkg := range w.p.allPkgs {
+ pkgObjs[pkg] = nil
+ }
+
+ for obj := range index {
+ name := w.p.exportName(obj)
+ pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name})
+ }
+
+ var pkgs []*types.Package
+ for pkg, objs := range pkgObjs {
+ pkgs = append(pkgs, pkg)
+
+ sort.Slice(objs, func(i, j int) bool {
+ return objs[i].name < objs[j].name
+ })
+ }
+
+ sort.Slice(pkgs, func(i, j int) bool {
+ return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j])
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(w.exportPath(pkg))
+ w.string(pkg.Name())
+ w.uint64(uint64(0)) // package height is not needed for go/types
+
+ objs := pkgObjs[pkg]
+ w.uint64(uint64(len(objs)))
+ for _, obj := range objs {
+ w.string(obj.name)
+ w.uint64(index[obj.obj])
+ }
+ }
+}
+
+// exportName returns the 'exported' name of an object. It differs from
+// obj.Name() only for type parameters (see tparamExportName for details).
+func (p *iexporter) exportName(obj types.Object) (res string) {
+ if name := p.tparamNames[obj]; name != "" {
+ return name
+ }
+ return obj.Name()
+}
+
+type iexporter struct {
+ fset *token.FileSet
+ version int
+
+ shallow bool // don't put types from other packages in the index
+ objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated
+ localpkg *types.Package // (nil in bundle mode)
+
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Package]bool
+
+ declTodo objQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ // In shallow mode, object positions are encoded as (file, offset).
+ // Each file is recorded as a line-number table.
+ // Only the lines of needed positions are saved faithfully.
+ fileInfo map[*token.File]uint64 // value is index in fileInfos
+ fileInfos []*filePositions
+
+ data0 intWriter
+ declIndex map[types.Object]uint64
+ tparamNames map[types.Object]string // typeparam->exported name
+ typIndex map[types.Type]uint64
+
+ indent int // for tracing support
+}
+
+type filePositions struct {
+ file *token.File
+ needed []uint64 // unordered list of needed file offsets
+}
+
+func (p *iexporter) trace(format string, args ...any) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+// objectpathEncoder returns the lazily allocated objectpath.Encoder to use
+// when encoding objects in other packages during shallow export.
+//
+// Using a shared Encoder amortizes some of cost of objectpath search.
+func (p *iexporter) objectpathEncoder() *objectpath.Encoder {
+ if p.objEncoder == nil {
+ p.objEncoder = new(objectpath.Encoder)
+ }
+ return p.objEncoder
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
+func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
+ index, ok := p.fileInfo[file]
+ if !ok {
+ index = uint64(len(p.fileInfo))
+ p.fileInfos = append(p.fileInfos, &filePositions{file: file})
+ if p.fileInfo == nil {
+ p.fileInfo = make(map[*token.File]uint64)
+ }
+ p.fileInfo[file] = index
+ }
+ // Record each needed offset.
+ info := p.fileInfos[index]
+ offset := uint64(file.Offset(pos))
+ info.needed = append(info.needed, offset)
+
+ return index, offset
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(obj types.Object) {
+ // Package unsafe is known to the compiler and predeclared.
+ // Caller should not ask us to do export it.
+ if obj.Pkg() == types.Unsafe {
+ panic("cannot export package unsafe")
+ }
+
+ // Shallow export data: don't index decls from other packages.
+ if p.shallow && obj.Pkg() != p.localpkg {
+ return
+ }
+
+ if _, ok := p.declIndex[obj]; ok {
+ return
+ }
+
+ p.declIndex[obj] = ^uint64(0) // mark obj present in work queue
+ p.declTodo.pushTail(obj)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+func (w *exportWriter) exportPath(pkg *types.Package) string {
+ if pkg == w.p.localpkg {
+ return ""
+ }
+ return pkg.Path()
+}
+
+func (p *iexporter) doDecl(obj types.Object) {
+ if trace {
+ p.trace("exporting decl %v (%T)", obj, obj)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", obj)
+ }()
+ }
+ w := p.newWriter()
+
+ switch obj := obj.(type) {
+ case *types.Var:
+ w.tag(varTag)
+ w.pos(obj.Pos())
+ w.typ(obj.Type(), obj.Pkg())
+
+ case *types.Func:
+ sig, _ := obj.Type().(*types.Signature)
+ if sig.Recv() != nil {
+ // We shouldn't see methods in the package scope,
+ // but the type checker may repair "func () F() {}"
+ // to "func (Invalid) F()" and then treat it like "func F()",
+ // so allow that. See golang/go#57729.
+ if sig.Recv().Type() != types.Typ[types.Invalid] {
+ panic(internalErrorf("unexpected method: %v", sig))
+ }
+ }
+
+ // Function.
+ if sig.TypeParams().Len() == 0 {
+ w.tag(funcTag)
+ } else {
+ w.tag(genericFuncTag)
+ }
+ w.pos(obj.Pos())
+ // The tparam list of the function type is the declaration of the type
+ // params. So, write out the type params right now. Then those type params
+ // will be referenced via their type offset (via typOff) in all other
+ // places in the signature and function where they are used.
+ //
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ if tparams := sig.TypeParams(); tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ w.signature(sig)
+
+ case *types.Const:
+ w.tag(constTag)
+ w.pos(obj.Pos())
+ w.value(obj.Type(), obj.Val())
+
+ case *types.TypeName:
+ t := obj.Type()
+
+ if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
+ w.tag(typeParamTag)
+ w.pos(obj.Pos())
+ constraint := tparam.Constraint()
+ if p.version >= iexportVersionGo1_18 {
+ implicit := false
+ if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
+ implicit = iface.IsImplicit()
+ }
+ w.bool(implicit)
+ }
+ w.typ(constraint, obj.Pkg())
+ break
+ }
+
+ if obj.IsAlias() {
+ alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
+
+ var tparams *types.TypeParamList
+ if materialized {
+ tparams = aliases.TypeParams(alias)
+ }
+ if tparams.Len() == 0 {
+ w.tag(aliasTag)
+ } else {
+ w.tag(genericAliasTag)
+ }
+ w.pos(obj.Pos())
+ if tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ if materialized {
+ // Preserve materialized aliases,
+ // even of non-exported types.
+ t = aliases.Rhs(alias)
+ }
+ w.typ(t, obj.Pkg())
+ break
+ }
+
+ // Defined type.
+ named, ok := t.(*types.Named)
+ if !ok {
+ panic(internalErrorf("%s is not a defined type", t))
+ }
+
+ if named.TypeParams().Len() == 0 {
+ w.tag(typeTag)
+ } else {
+ w.tag(genericTypeTag)
+ }
+ w.pos(obj.Pos())
+
+ if named.TypeParams().Len() > 0 {
+ // While importing the type parameters, tparamList computes and records
+ // their export name, so that it can be later used when writing the index.
+ w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg())
+ }
+
+ underlying := named.Underlying()
+ w.typ(underlying, obj.Pkg())
+
+ if types.IsInterface(t) {
+ break
+ }
+
+ n := named.NumMethods()
+ w.uint64(uint64(n))
+ for i := range n {
+ m := named.Method(i)
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+
+ // Receiver type parameters are type arguments of the receiver type, so
+ // their name must be qualified before exporting recv.
+ if rparams := sig.RecvTypeParams(); rparams.Len() > 0 {
+ prefix := obj.Name() + "." + m.Name()
+ for i := 0; i < rparams.Len(); i++ {
+ rparam := rparams.At(i)
+ name := tparamExportName(prefix, rparam)
+ w.p.tparamNames[rparam.Obj()] = name
+ }
+ }
+ w.param(sig.Recv())
+ w.signature(sig)
+ }
+
+ default:
+ panic(internalErrorf("unexpected object: %v", obj))
+ }
+
+ p.declIndex[obj] = w.flush()
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) pos(pos token.Pos) {
+ if w.p.shallow {
+ w.posV2(pos)
+ } else if w.p.version >= iexportVersionPosCol {
+ w.posV1(pos)
+ } else {
+ w.posV0(pos)
+ }
+}
+
+// posV2 encoding (used only in shallow mode) records positions as
+// (file, offset), where file is the index in the token.File table
+// (which records the file name and newline offsets) and offset is a
+// byte offset. It effectively ignores //line directives.
+func (w *exportWriter) posV2(pos token.Pos) {
+ if pos == token.NoPos {
+ w.uint64(0)
+ return
+ }
+ file := w.p.fset.File(pos) // fset must be non-nil
+ index, offset := w.p.fileIndexAndOffset(file, pos)
+ w.uint64(1 + index)
+ w.uint64(offset)
+}
+
+func (w *exportWriter) posV1(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+ column := int64(p.Column)
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) posV0(pos token.Pos) {
+ if w.p.fset == nil {
+ w.int64(0)
+ return
+ }
+
+ p := w.p.fset.Position(pos)
+ file := p.Filename
+ line := int64(p.Line)
+
+ // When file is the same as the last position (common case),
+ // we can save a few bytes by delta encoding just the line
+ // number.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile and
+ // prevLine as fields of exportWriter.
+
+ if file == w.prevFile {
+ delta := line - w.prevLine
+ w.int64(delta)
+ if delta == deltaNewFile {
+ w.int64(-1)
+ }
+ } else {
+ w.int64(deltaNewFile)
+ w.int64(line) // line >= 0
+ w.string(file)
+ w.prevFile = file
+ }
+ w.prevLine = line
+}
+
+func (w *exportWriter) pkg(pkg *types.Package) {
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(w.exportPath(pkg))
+}
+
+func (w *exportWriter) qualifiedType(obj *types.TypeName) {
+ name := w.p.exportName(obj)
+
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(obj)
+ w.string(name)
+ w.pkg(obj.Pkg())
+}
+
+// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass
+// it in explicitly into signatures and structs that may use it for
+// constructing fields.
+func (w *exportWriter) typ(t types.Type, pkg *types.Package) {
+ w.data.uint64(w.p.typOff(t, pkg))
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t, pkg)
+ off = predeclReserved + w.flush()
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
+ if trace {
+ w.p.trace("exporting type %s (%T)", t, t)
+ w.p.indent++
+ defer func() {
+ w.p.indent--
+ w.p.trace("=> %s", t)
+ }()
+ }
+ switch t := t.(type) {
+ case *types.Alias:
+ if targs := aliases.TypeArgs(t); targs.Len() > 0 {
+ w.startType(instanceType)
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(aliases.Origin(t), pkg)
+ return
+ }
+ w.startType(aliasType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Named:
+ if targs := t.TypeArgs(); targs.Len() > 0 {
+ w.startType(instanceType)
+ // TODO(rfindley): investigate if this position is correct, and if it
+ // matters.
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(t.Origin(), pkg)
+ return
+ }
+ w.startType(definedType)
+ w.qualifiedType(t.Obj())
+
+ case *types.TypeParam:
+ w.startType(typeParamType)
+ w.qualifiedType(t.Obj())
+
+ case *types.Pointer:
+ w.startType(pointerType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Slice:
+ w.startType(sliceType)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Array:
+ w.startType(arrayType)
+ w.uint64(uint64(t.Len()))
+ w.typ(t.Elem(), pkg)
+
+ case *types.Chan:
+ w.startType(chanType)
+ // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+ var dir uint64
+ switch t.Dir() {
+ case types.RecvOnly:
+ dir = 1
+ case types.SendOnly:
+ dir = 2
+ case types.SendRecv:
+ dir = 3
+ }
+ w.uint64(dir)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Map:
+ w.startType(mapType)
+ w.typ(t.Key(), pkg)
+ w.typ(t.Elem(), pkg)
+
+ case *types.Signature:
+ w.startType(signatureType)
+ w.pkg(pkg)
+ w.signature(t)
+
+ case *types.Struct:
+ w.startType(structType)
+ n := t.NumFields()
+ // Even for struct{} we must emit some qualifying package, because that's
+ // what the compiler does, and thus that's what the importer expects.
+ fieldPkg := pkg
+ if n > 0 {
+ fieldPkg = t.Field(0).Pkg()
+ }
+ if fieldPkg == nil {
+ // TODO(rfindley): improve this very hacky logic.
+ //
+ // The importer expects a package to be set for all struct types, even
+ // those with no fields. A better encoding might be to set NumFields
+ // before pkg. setPkg panics with a nil package, which may be possible
+ // to reach with invalid packages (and perhaps valid packages, too?), so
+ // (arbitrarily) set the localpkg if available.
+ //
+ // Alternatively, we may be able to simply guarantee that pkg != nil, by
+ // reconsidering the encoding of constant values.
+ if w.p.shallow {
+ fieldPkg = w.p.localpkg
+ } else {
+ panic(internalErrorf("no package to set for empty struct"))
+ }
+ }
+ w.pkg(fieldPkg)
+ w.uint64(uint64(n))
+
+ for i := range n {
+ f := t.Field(i)
+ if w.p.shallow {
+ w.objectPath(f)
+ }
+ w.pos(f.Pos())
+ w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg
+ w.typ(f.Type(), fieldPkg)
+ w.bool(f.Anonymous())
+ w.string(t.Tag(i)) // note (or tag)
+ }
+
+ case *types.Interface:
+ w.startType(interfaceType)
+ w.pkg(pkg)
+
+ n := t.NumEmbeddeds()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ ft := t.EmbeddedType(i)
+ tPkg := pkg
+ if named, _ := types.Unalias(ft).(*types.Named); named != nil {
+ w.pos(named.Obj().Pos())
+ } else {
+ w.pos(token.NoPos)
+ }
+ w.typ(ft, tPkg)
+ }
+
+ // See comment for struct fields. In shallow mode we change the encoding
+ // for interface methods that are promoted from other packages.
+
+ n = t.NumExplicitMethods()
+ w.uint64(uint64(n))
+ for i := 0; i < n; i++ {
+ m := t.ExplicitMethod(i)
+ if w.p.shallow {
+ w.objectPath(m)
+ }
+ w.pos(m.Pos())
+ w.string(m.Name())
+ sig, _ := m.Type().(*types.Signature)
+ w.signature(sig)
+ }
+
+ case *types.Union:
+ w.startType(unionType)
+ nt := t.Len()
+ w.uint64(uint64(nt))
+ for i := range nt {
+ term := t.Term(i)
+ w.bool(term.Tilde())
+ w.typ(term.Type(), pkg)
+ }
+
+ default:
+ panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t)))
+ }
+}
+
+// objectPath writes the package and objectPath to use to look up obj in a
+// different package, when encoding in "shallow" mode.
+//
+// When doing a shallow import, the importer creates only the local package,
+// and requests package symbols for dependencies from the client.
+// However, certain types defined in the local package may hold objects defined
+// (perhaps deeply) within another package.
+//
+// For example, consider the following:
+//
+// package a
+// func F() chan * map[string] struct { X int }
+//
+// package b
+// import "a"
+// var B = a.F()
+//
+// In this example, the type of b.B holds fields defined in package a.
+// In order to have the correct canonical objects for the field defined in the
+// type of B, they are encoded as objectPaths and later looked up in the
+// importer. The same problem applies to interface methods.
+func (w *exportWriter) objectPath(obj types.Object) {
+ if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg {
+ // obj.Pkg() may be nil for the builtin error.Error.
+ // In this case, or if obj is declared in the local package, no need to
+ // encode.
+ w.string("")
+ return
+ }
+ objectPath, err := w.p.objectpathEncoder().For(obj)
+ if err != nil {
+ // Fall back to the empty string, which will cause the importer to create a
+ // new object, which matches earlier behavior. Creating a new object is
+ // sufficient for many purposes (such as type checking), but causes certain
+ // references algorithms to fail (golang/go#60819). However, we didn't
+ // notice this problem during months of gopls@v0.12.0 testing.
+ //
+ // TODO(golang/go#61674): this workaround is insufficient, as in the case
+ // where the field forwarded from an instantiated type that may not appear
+ // in the export data of the original package:
+ //
+ // // package a
+ // type A[P any] struct{ F P }
+ //
+ // // package b
+ // type B a.A[int]
+ //
+ // We need to update references algorithms not to depend on this
+ // de-duplication, at which point we may want to simply remove the
+ // workaround here.
+ w.string("")
+ return
+ }
+ w.string(string(objectPath))
+ w.pkg(obj.Pkg())
+}
+
+func (w *exportWriter) signature(sig *types.Signature) {
+ w.paramList(sig.Params())
+ w.paramList(sig.Results())
+ if sig.Params().Len() > 0 {
+ w.bool(sig.Variadic())
+ }
+}
+
+func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) {
+ w.uint64(uint64(ts.Len()))
+ for i := 0; i < ts.Len(); i++ {
+ w.typ(ts.At(i), pkg)
+ }
+}
+
+func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) {
+ ll := uint64(list.Len())
+ w.uint64(ll)
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ // Set the type parameter exportName before exporting its type.
+ exportName := tparamExportName(prefix, tparam)
+ w.p.tparamNames[tparam.Obj()] = exportName
+ w.typ(list.At(i), pkg)
+ }
+}
+
+const blankMarker = "$"
+
+// tparamExportName returns the 'exported' name of a type parameter, which
+// differs from its actual object name: it is prefixed with a qualifier, and
+// blank type parameter names are disambiguated by their index in the type
+// parameter list.
+func tparamExportName(prefix string, tparam *types.TypeParam) string {
+ assert(prefix != "")
+ name := tparam.Obj().Name()
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(tparam.Index())
+ }
+ return prefix + "." + name
+}
+
+// tparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See tparamExportName
+// for details.
+func tparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ errorf("malformed type parameter export name %s: missing prefix", exportName)
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+func (w *exportWriter) paramList(tup *types.Tuple) {
+ n := tup.Len()
+ w.uint64(uint64(n))
+ for i := range n {
+ w.param(tup.At(i))
+ }
+}
+
+func (w *exportWriter) param(obj types.Object) {
+ w.pos(obj.Pos())
+ w.localIdent(obj)
+ w.typ(obj.Type(), obj.Pkg())
+}
+
+func (w *exportWriter) value(typ types.Type, v constant.Value) {
+ w.typ(typ, nil)
+ if w.p.version >= iexportVersionGo1_18 {
+ w.int64(int64(v.Kind()))
+ }
+
+ if v.Kind() == constant.Unknown {
+ // golang/go#60605: treat unknown constant values as if they have invalid type
+ //
+ // This loses some fidelity over the package type-checked from source, but that
+ // is acceptable.
+ //
+ // TODO(rfindley): we should switch on the recorded constant kind rather
+ // than the constant type
+ return
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ w.bool(constant.BoolVal(v))
+ case types.IsInteger:
+ var i big.Int
+ if i64, exact := constant.Int64Val(v); exact {
+ i.SetInt64(i64)
+ } else if ui64, exact := constant.Uint64Val(v); exact {
+ i.SetUint64(ui64)
+ } else {
+ i.SetString(v.ExactString(), 10)
+ }
+ w.mpint(&i, typ)
+ case types.IsFloat:
+ f := constantToFloat(v)
+ w.mpfloat(f, typ)
+ case types.IsComplex:
+ w.mpfloat(constantToFloat(constant.Real(v)), typ)
+ w.mpfloat(constantToFloat(constant.Imag(v)), typ)
+ case types.IsString:
+ w.string(constant.StringVal(v))
+ default:
+ if b.Kind() == types.Invalid {
+ // package contains type errors
+ break
+ }
+ panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying()))
+ }
+}
+
+// constantToFloat converts a constant.Value with kind constant.Float to a
+// big.Float.
+func constantToFloat(x constant.Value) *big.Float {
+ x = constant.ToFloat(x)
+ // Use the same floating-point precision (512) as cmd/compile
+ // (see Mpprec in cmd/compile/internal/gc/mpfloat.go).
+ const mpprec = 512
+ var f big.Float
+ f.SetPrec(mpprec)
+ if v, exact := constant.Float64Val(x); exact {
+ // float64
+ f.SetFloat64(v)
+ } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
+ // TODO(gri): add big.Rat accessor to constant.Value.
+ n := valueToRat(num)
+ d := valueToRat(denom)
+ f.SetRat(n.Quo(n, d))
+ } else {
+ // Value too large to represent as a fraction => inaccessible.
+ // TODO(gri): add big.Float accessor to constant.Value.
+ _, ok := f.SetString(x.ExactString())
+ assert(ok)
+ }
+ return &f
+}
+
+func valueToRat(x constant.Value) *big.Rat {
+ // Convert little-endian to big-endian.
+ // I can't believe this is necessary.
+ bytes := constant.Bytes(x)
+ for i := 0; i < len(bytes)/2; i++ {
+ bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
+ }
+ return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+//
+// TODO(mdempsky): Is this level of complexity really worthwhile?
+func (w *exportWriter) mpint(x *big.Int, typ types.Type) {
+ basic, ok := typ.Underlying().(*types.Basic)
+ if !ok {
+ panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying()))
+ }
+
+ signed, maxBytes := intSize(basic)
+
+ negative := x.Sign() < 0
+ if !signed && negative {
+ panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x))
+ }
+
+ b := x.Bytes()
+ if len(b) > 0 && b[0] == 0 {
+ panic(internalErrorf("leading zeros"))
+ }
+ if uint(len(b)) > maxBytes {
+ panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x))
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n))
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) {
+ if f.IsInf() {
+ panic("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc))
+ }
+ w.mpint(manti, typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+func (w *exportWriter) localIdent(obj types.Object) {
+ // Anonymous parameters.
+ if obj == nil {
+ w.string("")
+ return
+ }
+
+ name := obj.Name()
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func assert(cond bool) {
+ if !cond {
+ panic("internal error: assertion failed")
+ }
+}
+
+// The below is copied from go/src/cmd/compile/internal/gc/syntax.go.
+
+// objQueue is a FIFO queue of types.Object. The zero value of objQueue is
+// a ready-to-use empty queue.
+type objQueue struct {
+ ring []types.Object
+ head, tail int
+}
+
+// empty returns true if q contains no Nodes.
+func (q *objQueue) empty() bool {
+ return q.head == q.tail
+}
+
+// pushTail appends n to the tail of the queue.
+func (q *objQueue) pushTail(obj types.Object) {
+ if len(q.ring) == 0 {
+ q.ring = make([]types.Object, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]types.Object, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = obj
+ q.tail++
+}
+
+// popHead pops a node from the head of the queue. It panics if q is empty.
+func (q *objQueue) popHead() types.Object {
+ if q.empty() {
+ panic("dequeue empty")
+ }
+ obj := q.ring[q.head%len(q.ring)]
+ q.head++
+ return obj
+}
+
+// internalError represents an error generated inside this package.
+type internalError string
+
+func (e internalError) Error() string { return "gcimporter: " + string(e) }
+
+// TODO(adonovan): make this call panic, so that it's symmetric with errorf.
+// Otherwise it's easy to forget to do anything with the error.
+//
+// TODO(adonovan): also, consider switching the names "errorf" and
+// "internalErrorf" as the former is used for bugs, whose cause is
+// internal inconsistency, whereas the latter is used for ordinary
+// situations like bad input, whose cause is external.
+func internalErrorf(format string, args ...any) error {
+ return internalError(fmt.Sprintf(format, args...))
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
new file mode 100644
index 00000000..82e6c9d2
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -0,0 +1,1120 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See iexport.go for the export data format.
+
+package gcimporter
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "go/types"
+ "io"
+ "math/big"
+ "slices"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/types/objectpath"
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+type intReader struct {
+ *bytes.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGo1_18 = 2
+ iexportVersionGenerics = 2
+ iexportVersion = iexportVersionGenerics
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+ aliasType
+)
+
+// Object tags
+const (
+ varTag = 'V'
+ funcTag = 'F'
+ genericFuncTag = 'G'
+ constTag = 'C'
+ aliasTag = 'A'
+ genericAliasTag = 'B'
+ typeParamTag = 'P'
+ typeTag = 'T'
+ genericTypeTag = 'U'
+)
+
+// IImportData imports a package from the serialized package data
+// and returns 0 and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
+ pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil)
+ if err != nil {
+ return 0, nil, err
+ }
+ return 0, pkgs[0], nil
+}
+
+// IImportBundle imports a set of packages from the serialized package bundle.
+func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
+ return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil)
+}
+
+// A GetPackagesFunc function obtains the non-nil symbols for a set of
+// packages, creating and recursively importing them as needed. An
+// implementation should store each package symbol is in the Pkg
+// field of the items array.
+//
+// Any error causes importing to fail. This can be used to quickly read
+// the import manifest of an export data file without fully decoding it.
+type GetPackagesFunc = func(items []GetPackagesItem) error
+
+// A GetPackagesItem is a request from the importer for the package
+// symbol of the specified name and path.
+type GetPackagesItem struct {
+ Name, Path string
+ Pkg *types.Package // to be filled in by GetPackagesFunc call
+
+ // private importer state
+ pathOffset uint64
+ nameIndex map[string]uint64
+}
+
+// GetPackagesFromMap returns a GetPackagesFunc that retrieves
+// packages from the given map of package path to package.
+//
+// The returned function may mutate m: each requested package that is not
+// found is created with types.NewPackage and inserted into m.
+func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc {
+ return func(items []GetPackagesItem) error {
+ for i, item := range items {
+ pkg, ok := m[item.Path]
+ if !ok {
+ pkg = types.NewPackage(item.Path, item.Name)
+ m[item.Path] = pkg
+ }
+ items[i].Pkg = pkg
+ }
+ return nil
+ }
+}
+
+func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ if !debug {
+ defer func() {
+ if e := recover(); e != nil {
+ if bundle {
+ err = fmt.Errorf("%v", e)
+ } else if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e)
+ }
+ }
+ }()
+ }
+
+ r := &intReader{bytes.NewReader(data), path}
+
+ if bundle {
+ if v := r.uint64(); v != bundleVersion {
+ errorf("unknown bundle format version %d", v)
+ }
+ }
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ if version > iexportVersionGo1_18 {
+ errorf("unstable iexport format version %d, just rebuild compiler and std library", version)
+ } else {
+ errorf("unknown iexport format version %d", version)
+ }
+ }
+
+ sLen := int64(r.uint64())
+ var fLen int64
+ var fileOffset []uint64
+ if shallow {
+ // Shallow mode uses a different position encoding.
+ fLen = int64(r.uint64())
+ fileOffset = make([]uint64, r.uint64())
+ for i := range fileOffset {
+ fileOffset[i] = r.uint64()
+ }
+ }
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io.SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ fileData := data[whence+sLen : whence+sLen+fLen]
+ declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
+ r.Seek(sLen+fLen+dLen, io.SeekCurrent)
+
+ p := iimporter{
+ version: int(version),
+ ipath: path,
+ aliases: aliases.Enabled(),
+ shallow: shallow,
+ reportf: reportf,
+
+ stringData: stringData,
+ stringCache: make(map[uint64]string),
+ fileOffset: fileOffset,
+ fileData: fileData,
+ fileCache: make([]*token.File, len(fileOffset)),
+ pkgCache: make(map[uint64]*types.Package),
+
+ declData: declData,
+ pkgIndex: make(map[*types.Package]map[string]uint64),
+ typCache: make(map[uint64]types.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name.
+ tparamIndex: make(map[ident]types.Type),
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+ }
+ defer p.fake.setLines() // set lines for files in fset
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ // Gather the relevant packages from the manifest.
+ items := make([]GetPackagesItem, r.uint64())
+ uniquePkgPaths := make(map[string]bool)
+ for i := range items {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ _ = r.uint64() // package height; unused by go/types
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ items[i].Name = pkgName
+ items[i].Path = pkgPath
+ items[i].pathOffset = pkgPathOff
+
+ // Read index for package.
+ nameIndex := make(map[string]uint64)
+ nSyms := r.uint64()
+ // In shallow mode, only the current package (i=0) has an index.
+ assert(!(shallow && i > 0 && nSyms != 0))
+ for ; nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ items[i].nameIndex = nameIndex
+
+ uniquePkgPaths[pkgPath] = true
+ }
+ // Debugging #63822; hypothesis: there are duplicate PkgPaths.
+ if len(uniquePkgPaths) != len(items) {
+ reportf("found duplicate PkgPaths while reading export data manifest: %v", items)
+ }
+
+ // Request packages all at once from the client,
+ // enabling a parallel implementation.
+ if err := getPackages(items); err != nil {
+ return nil, err // don't wrap this error
+ }
+
+ // Check the results and complete the index.
+ pkgList := make([]*types.Package, len(items))
+ for i, item := range items {
+ pkg := item.Pkg
+ if pkg == nil {
+ errorf("internal error: getPackages returned nil package for %q", item.Path)
+ } else if pkg.Path() != item.Path {
+ errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path)
+ } else if pkg.Name() != item.Name {
+ errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name)
+ }
+ p.pkgCache[item.pathOffset] = pkg
+ p.pkgIndex[pkg] = item.nameIndex
+ pkgList[i] = pkg
+ }
+
+ if bundle {
+ pkgs = make([]*types.Package, r.uint64())
+ for i := range pkgs {
+ pkg := p.pkgAt(r.uint64())
+ imps := make([]*types.Package, r.uint64())
+ for j := range imps {
+ imps[j] = p.pkgAt(r.uint64())
+ }
+ pkg.SetImports(imps)
+ pkgs[i] = pkg
+ }
+ } else {
+ if len(pkgList) == 0 {
+ errorf("no packages found for %s", path)
+ panic("unreachable")
+ }
+ pkgs = pkgList[:1]
+
+ // record all referenced packages as imports
+ list := slices.Clone(pkgList[1:])
+ sort.Sort(byPath(list))
+ pkgs[0].SetImports(list)
+ }
+
+ for _, pkg := range pkgs {
+ if pkg.Complete() {
+ continue
+ }
+
+ names := make([]string, 0, len(p.pkgIndex[pkg]))
+ for name := range p.pkgIndex[pkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(pkg, name)
+ }
+
+ // package was imported completely and without errors
+ pkg.MarkComplete()
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the typeParamTag case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+
+ for _, typ := range p.interfaceList {
+ typ.Complete()
+ }
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ for _, typ := range p.instanceList {
+ if iface, _ := typ.Underlying().(*types.Interface); iface != nil {
+ iface.Complete()
+ }
+ }
+
+ return pkgs, nil
+}
+
+type setConstraintArgs struct {
+ t *types.TypeParam
+ constraint types.Type
+}
+
+type iimporter struct {
+ version int
+ ipath string
+
+ aliases bool
+ shallow bool
+ reportf ReportFunc // if non-nil, used to report bugs
+
+ stringData []byte
+ stringCache map[uint64]string
+ fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
+ fileData []byte
+ fileCache []*token.File // memoized decoding of file encoded as i
+ pkgCache map[uint64]*types.Package
+
+ declData []byte
+ pkgIndex map[*types.Package]map[string]uint64
+ typCache map[uint64]types.Type
+ tparamIndex map[ident]types.Type
+
+ fake fakeFileSet
+ interfaceList []*types.Interface
+
+ // Workaround for the go/types bug golang/go#61561: instances produced during
+ // instantiation may contain incomplete interfaces. Here we only complete the
+ // underlying type of the instance, which is the most common case but doesn't
+ // handle parameterized interface literals defined deeper in the type.
+ instanceList []types.Type // instances for later completion (see golang/go#61561)
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+
+ indent int // for tracing support
+}
+
+func (p *iimporter) trace(format string, args ...any) {
+ if !trace {
+ // Call sites should also be guarded, but having this check here allows
+ // easily enabling/disabling debug trace statements.
+ return
+ }
+ fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...)
+}
+
+func (p *iimporter) doDecl(pkg *types.Package, name string) {
+ if debug {
+ p.trace("import decl %s", name)
+ p.indent++
+ defer func() {
+ p.indent--
+ p.trace("=> %s", name)
+ }()
+ }
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ // In deep mode, the index should be complete. In shallow
+ // mode, we should have already recursively loaded necessary
+ // dependencies so the above Lookup succeeds.
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ r.declReader.Reset(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ if s, ok := p.stringCache[off]; ok {
+ return s
+ }
+
+ slen, n := binary.Uvarint(p.stringData[off:])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ s := string(p.stringData[spos : spos+slen])
+ p.stringCache[off] = s
+ return s
+}
+
+func (p *iimporter) fileAt(index uint64) *token.File {
+ file := p.fileCache[index]
+ if file == nil {
+ off := p.fileOffset[index]
+ file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
+ p.fileCache[index] = file
+ }
+ return file
+}
+
+func (p *iimporter) decodeFile(rd intReader) *token.File {
+ filename := p.stringAt(rd.uint64())
+ size := int(rd.uint64())
+ file := p.fake.fset.AddFile(filename, -1, size)
+
+ // SetLines requires a nondecreasing sequence.
+ // Because it is common for clients to derive the interval
+ // [start, start+len(name)] from a start position, and we
+ // want to ensure that the end offset is on the same line,
+ // we fill in the gaps of the sparse encoding with values
+ // that strictly increase by the largest possible amount.
+ // This allows us to avoid having to record the actual end
+ // offset of each needed line.
+
+ lines := make([]int, int(rd.uint64()))
+ var index, offset int
+ for i, n := 0, int(rd.uint64()); i < n; i++ {
+ index += int(rd.uint64())
+ offset += int(rd.uint64())
+ lines[index] = offset
+
+ // Ensure monotonicity between points.
+ for j := index - 1; j > 0 && lines[j] == 0; j-- {
+ lines[j] = lines[j+1] - 1
+ }
+ }
+
+ // Ensure monotonicity after last point.
+ for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
+ size--
+ lines[j] = size
+ }
+
+ if !file.SetLines(lines) {
+ errorf("SetLines failed: %d", lines) // can't happen
+ }
+ return file
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ r.declReader.Reset(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types.Named, rhs types.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := types.Unalias(rhs).(*types.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader bytes.Reader
+ currPkg *types.Package
+ prevFile string
+ prevLine int64
+ prevColumn int64
+}
+
+// markBlack is redefined in iimport_go123.go, to work around golang/go#69912.
+//
+// If TypeNames are not marked black (in the sense of go/types cycle
+// detection), they may be mutated when dot-imported. Fix this by punching a
+// hole through the type, when compiling with Go 1.23. (The bug has been fixed
+// for 1.24, but the fix was not worth back-porting).
+var markBlack = func(name *types.TypeName) {}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case aliasTag, genericAliasTag:
+ var tparams []*types.TypeParam
+ if tag == genericAliasTag {
+ tparams = r.tparamList()
+ }
+ typ := r.typ()
+ obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+ markBlack(obj) // workaround for golang/go#69912
+ r.declare(obj)
+
+ case constTag:
+ typ, val := r.value()
+
+ r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
+
+ case funcTag, genericFuncTag:
+ var tparams []*types.TypeParam
+ if tag == genericFuncTag {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types.NewFunc(pos, r.currPkg, name, sig))
+
+ case typeTag, genericTypeTag:
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types.NewTypeName(pos, r.currPkg, name, nil)
+ named := types.NewNamed(obj, nil, nil)
+
+ markBlack(obj) // workaround for golang/go#69912
+
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == genericTypeTag {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ _, recvNamed := typesinternal.ReceiverNamed(recv)
+ targs := recvNamed.TypeArgs()
+ var rparams []*types.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*types.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case typeParamTag:
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := tparamName(name)
+ tn := types.NewTypeName(pos, r.currPkg, name0, nil)
+ t := types.NewTypeParam(tn, nil)
+
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+ var implicit bool
+ if r.p.version >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := types.Unalias(constraint).(*types.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ iface.MarkImplicit()
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case varTag:
+ typ := r.typ()
+
+ v := types.NewVar(pos, r.currPkg, name, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ r.declare(v)
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.version >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind.
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
+ case types.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types.IsString:
+ val = constant.MakeString(r.string())
+
+ case types.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types.IsFloat:
+ val = r.mpfloat(b)
+
+ case types.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ if b.Kind() == types.Invalid {
+ val = constant.MakeUnknown()
+ return
+ }
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types.Float32, types.Complex64:
+ return true, 3
+ case types.Float64, types.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types.IsUnsigned) == 0
+ switch b.Kind() {
+ case types.Int8, types.Uint8:
+ maxBytes = 1
+ case types.Int16, types.Uint16:
+ maxBytes = 2
+ case types.Int32, types.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() token.Pos {
+ if r.p.shallow {
+ // precise offsets are encoded only in shallow mode
+ return r.posv2()
+ }
+ if r.p.version >= iexportVersionPosCol {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 {
+ return token.NoPos
+ }
+ return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevFile = r.string()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevFile = r.string()
+ }
+ }
+}
+
+func (r *importReader) posv2() token.Pos {
+ file := r.uint64()
+ if file == 0 {
+ return token.NoPos
+ }
+ tf := r.p.fileAt(file - 1)
+ return tf.Pos(int(r.uint64()))
+}
+
+func (r *importReader) typ() types.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types.Type) bool {
+ _, ok := types.Unalias(t).(*types.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+
+func (r *importReader) doType(base *types.Named) (res types.Type) {
+ k := r.kind()
+ if debug {
+ r.p.trace("importing type %d (base: %v)", k, base)
+ r.p.indent++
+ defer func() {
+ r.p.indent--
+ r.p.trace("=> %s", res)
+ }()
+ }
+ switch k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case aliasType, definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types.TypeName).Type()
+ case pointerType:
+ return types.NewPointer(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types.NewChan(dir, r.typ())
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ var field *types.Var
+ if r.p.shallow {
+ field, _ = r.objectPathObject().(*types.Var)
+ }
+
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ // Either this is not a shallow import, the field is local, or the
+ // encoded objectPath failed to produce an object (a bug).
+ //
+ // Even in this last, buggy case, fall back on creating a new field. As
+ // discussed in iexport.go, this is not correct, but mostly works and is
+ // preferable to failing (for now at least).
+ if field == nil {
+ field = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ }
+
+ fields[i] = field
+ tags[i] = tag
+ }
+ return types.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types.Func, r.uint64())
+ for i := range methods {
+ var method *types.Func
+ if r.p.shallow {
+ method, _ = r.objectPathObject().(*types.Func)
+ }
+
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types.Var
+ if base != nil {
+ recv = types.NewVar(token.NoPos, r.currPkg, "", base)
+ }
+ msig := r.signature(recv, nil, nil)
+
+ if method == nil {
+ method = types.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+ methods[i] = method
+ }
+
+ typ := types.NewInterfaceType(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Environment
+ t, _ := types.Instantiate(nil, baseType, targs, false)
+
+ // Workaround for golang/go#61561. See the doc for instanceList for details.
+ r.p.instanceList = append(r.p.instanceList, t)
+ return t
+
+ case unionType:
+ if r.p.version < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+// objectPathObject is the inverse of exportWriter.objectPath.
+//
+// In shallow mode, certain fields and methods may need to be looked up in an
+// imported package. See the doc for exportWriter.objectPath for a full
+// explanation.
+func (r *importReader) objectPathObject() types.Object {
+ objPath := objectpath.Path(r.string())
+ if objPath == "" {
+ return nil
+ }
+ pkg := r.pkg()
+ obj, err := objectpath.Object(pkg, objPath)
+ if err != nil {
+ if r.p.reportf != nil {
+ r.p.reportf("failed to find object for objectPath %q: %v", objPath, err)
+ }
+ }
+ return obj
+}
+
+func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*types.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types.TypeParam, n)
+ for i := range xs {
+ // Note: the standard library importer is tolerant of nil types here,
+ // though would panic in SetTypeParams.
+ xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types.Tuple {
+ xs := make([]*types.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
new file mode 100644
index 00000000..907c8557
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
@@ -0,0 +1,91 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "go/types"
+ "sync"
+)
+
+// predecl is a cache for the predeclared types in types.Universe.
+//
+// Cache a distinct result based on the runtime value of any.
+// The pointer value of the any type varies based on GODEBUG settings.
+var predeclMu sync.Mutex
+var predecl map[types.Type][]types.Type
+
+func predeclared() []types.Type {
+ anyt := types.Universe.Lookup("any").Type()
+
+ predeclMu.Lock()
+ defer predeclMu.Unlock()
+
+ if pre, ok := predecl[anyt]; ok {
+ return pre
+ }
+
+ if predecl == nil {
+ predecl = make(map[types.Type][]types.Type)
+ }
+
+ decls := []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ anyt,
+ }
+
+ predecl[anyt] = decls
+ return decls
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/support.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/support.go
new file mode 100644
index 00000000..4af810dc
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/support.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
+func readArchiveHeader(b *bufio.Reader, name string) int {
+ // architecture-independent object file output
+ const HeaderSize = 60
+
+ var buf [HeaderSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/operator/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
new file mode 100644
index 00000000..37b4a39e
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -0,0 +1,761 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Derived from go/internal/gcimporter/ureader.go
+
+package gcimporter
+
+import (
+ "fmt"
+ "go/token"
+ "go/types"
+ "sort"
+
+ "golang.org/x/tools/internal/aliases"
+ "golang.org/x/tools/internal/pkgbits"
+ "golang.org/x/tools/internal/typesinternal"
+)
+
+// A pkgReader holds the shared state for reading a unified IR package
+// description.
+type pkgReader struct {
+ pkgbits.PkgDecoder
+
+ fake fakeFileSet
+
+ ctxt *types.Context
+ imports map[string]*types.Package // previously imported packages, indexed by path
+ aliases bool // create types.Alias nodes
+
+ // lazily initialized arrays corresponding to the unified IR
+ // PosBase, Pkg, and Type sections, respectively.
+ posBases []string // position bases (i.e., file names)
+ pkgs []*types.Package
+ typs []types.Type
+
+ // laterFns holds functions that need to be invoked at the end of
+ // import reading.
+ laterFns []func()
+ // laterFors is used in case of 'type A B' to ensure that B is processed before A.
+ laterFors map[types.Type]int
+
+ // ifaces holds a list of constructed Interfaces, which need to have
+ // Complete called after importing is done.
+ ifaces []*types.Interface
+}
+
+// later adds a function to be invoked at the end of import reading.
+func (pr *pkgReader) later(fn func()) {
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// See cmd/compile/internal/noder.derivedInfo.
+type derivedInfo struct {
+ idx pkgbits.Index
+}
+
+// See cmd/compile/internal/noder.typeInfo.
+type typeInfo struct {
+ idx pkgbits.Index
+ derived bool
+}
+
+func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
+ if !debug {
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x)
+ }
+ }()
+ }
+
+ s := string(data)
+ input := pkgbits.NewPkgDecoder(path, s)
+ pkg = readUnifiedPackage(fset, nil, imports, input)
+ return
+}
+
+// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing.
+func (pr *pkgReader) laterFor(t types.Type, fn func()) {
+ if pr.laterFors == nil {
+ pr.laterFors = make(map[types.Type]int)
+ }
+ pr.laterFors[t] = len(pr.laterFns)
+ pr.laterFns = append(pr.laterFns, fn)
+}
+
+// readUnifiedPackage reads a package description from the given
+// unified IR export data decoder.
+func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package {
+ pr := pkgReader{
+ PkgDecoder: input,
+
+ fake: fakeFileSet{
+ fset: fset,
+ files: make(map[string]*fileInfo),
+ },
+
+ ctxt: ctxt,
+ imports: imports,
+ aliases: aliases.Enabled(),
+
+ posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)),
+ pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)),
+ typs: make([]types.Type, input.NumElems(pkgbits.RelocType)),
+ }
+ defer pr.fake.setLines()
+
+ r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
+ pkg := r.pkg()
+ if r.Version().Has(pkgbits.HasInit) {
+ r.Bool()
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.Sync(pkgbits.SyncObject)
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
+ r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ assert(r.Len() == 0)
+ }
+
+ r.Sync(pkgbits.SyncEOF)
+
+ for _, fn := range pr.laterFns {
+ fn()
+ }
+
+ for _, iface := range pr.ifaces {
+ iface.Complete()
+ }
+
+ // Imports() of pkg are all of the transitive packages that were loaded.
+ var imps []*types.Package
+ for _, imp := range pr.pkgs {
+ if imp != nil && imp != pkg {
+ imps = append(imps, imp)
+ }
+ }
+ sort.Sort(byPath(imps))
+ pkg.SetImports(imps)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+// A reader holds the state for reading a single unified IR element
+// within a package.
+type reader struct {
+ pkgbits.Decoder
+
+ p *pkgReader
+
+ dict *readerDict
+}
+
+// A readerDict holds the state for type parameters that parameterize
+// the current unified IR element.
+type readerDict struct {
+ // bounds is a slice of typeInfos corresponding to the underlying
+ // bounds of the element's type parameters.
+ bounds []typeInfo
+
+ // tparams is a slice of the constructed TypeParams for the element.
+ tparams []*types.TypeParam
+
+ // derived is a slice of types derived from tparams, which may be
+ // instantiated while reading the current element.
+ derived []derivedInfo
+ derivedTypes []types.Type // lazily instantiated from derived
+}
+
+func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.NewDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader {
+ return &reader{
+ Decoder: pr.TempDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+func (pr *pkgReader) retireReader(r *reader) {
+ pr.RetireDecoder(&r.Decoder)
+}
+
+// @@@ Positions
+
+func (r *reader) pos() token.Pos {
+ r.Sync(pkgbits.SyncPos)
+ if !r.Bool() {
+ return token.NoPos
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.Uint()
+ col := r.Uint()
+ return r.p.fake.pos(posBase, int(line), int(col))
+}
+
+func (r *reader) posBase() string {
+ return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase))
+}
+
+func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string {
+ if b := pr.posBases[idx]; b != "" {
+ return b
+ }
+
+ var filename string
+ {
+ r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase)
+
+ // Within types2, position bases have a lot more details (e.g.,
+ // keeping track of where //line directives appeared exactly).
+ //
+ // For go/types, we just track the file name.
+
+ filename = r.String()
+
+ if r.Bool() { // file base
+ // Was: "b = token.NewTrimmedFileBase(filename, true)"
+ } else { // line base
+ pos := r.pos()
+ line := r.Uint()
+ col := r.Uint()
+
+ // Was: "b = token.NewLineBase(pos, filename, true, line, col)"
+ _, _, _ = pos, line, col
+ }
+ pr.retireReader(r)
+ }
+ b := filename
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Package {
+ r.Sync(pkgbits.SyncPkg)
+ return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Package {
+ path := r.String()
+ switch path {
+ // cmd/compile emits path="main" for main packages because
+ // that's the linker symbol prefix it used; but we need
+ // the package's path as it would be reported by go list,
+ // hence "main" below.
+ // See test at go/packages.TestMainPackagePathInModeTypes.
+ case "", "main":
+ path = r.p.PkgPath()
+ case "builtin":
+ return nil // universe
+ case "unsafe":
+ return types.Unsafe
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.String()
+
+ pkg := types.NewPackage(path, name)
+ r.p.imports[path] = pkg
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.Sync(pkgbits.SyncType)
+ if r.Bool() {
+ return typeInfo{idx: pkgbits.Index(r.Len()), derived: true}
+ }
+ return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type {
+ idx := info.idx
+ var where *types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ var typ types.Type
+ {
+ r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx)
+ r.dict = dict
+
+ typ = r.doTyp()
+ assert(typ != nil)
+ pr.retireReader(r)
+ }
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader) doTyp() (res types.Type) {
+ switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag {
+ default:
+ errorf("unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case pkgbits.TypeBasic:
+ return types.Typ[r.Len()]
+
+ case pkgbits.TypeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types.TypeName)
+ if len(targs) != 0 {
+ t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case pkgbits.TypeTypeParam:
+ return r.dict.tparams[r.Len()]
+
+ case pkgbits.TypeArray:
+ len := int64(r.Uint64())
+ return types.NewArray(r.typ(), len)
+ case pkgbits.TypeChan:
+ dir := types.ChanDir(r.Len())
+ return types.NewChan(dir, r.typ())
+ case pkgbits.TypeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case pkgbits.TypePointer:
+ return types.NewPointer(r.typ())
+ case pkgbits.TypeSignature:
+ return r.signature(nil, nil, nil)
+ case pkgbits.TypeSlice:
+ return types.NewSlice(r.typ())
+ case pkgbits.TypeStruct:
+ return r.structType()
+ case pkgbits.TypeInterface:
+ return r.interfaceType()
+ case pkgbits.TypeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader) structType() *types.Struct {
+ fields := make([]*types.Var, r.Len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.String()
+ embedded := r.Bool()
+
+ fields[i] = types.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types.NewStruct(fields, tags)
+}
+
+func (r *reader) unionType() *types.Union {
+ terms := make([]*types.Term, r.Len())
+ for i := range terms {
+ terms[i] = types.NewTerm(r.Bool(), r.typ())
+ }
+ return types.NewUnion(terms)
+}
+
+func (r *reader) interfaceType() *types.Interface {
+ methods := make([]*types.Func, r.Len())
+ embeddeds := make([]types.Type, r.Len())
+ implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool()
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ iface := types.NewInterfaceType(methods, embeddeds)
+ if implicit {
+ iface.MarkImplicit()
+ }
+
+ // We need to call iface.Complete(), but if there are any embedded
+ // defined types, then we may not have set their underlying
+ // interface type yet. So we need to defer calling Complete until
+ // after we've called SetUnderlying everywhere.
+ //
+ // TODO(mdempsky): After CL 424876 lands, it should be safe to call
+ // iface.Complete() immediately.
+ r.p.ifaces = append(r.p.ifaces, iface)
+
+ return iface
+}
+
+func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature {
+ r.Sync(pkgbits.SyncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.Bool()
+
+ return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader) params() *types.Tuple {
+ r.Sync(pkgbits.SyncParams)
+
+ params := make([]*types.Var, r.Len())
+ for i := range params {
+ params[i] = r.param()
+ }
+
+ return types.NewTuple(params...)
+}
+
+func (r *reader) param() *types.Var {
+ r.Sync(pkgbits.SyncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader) obj() (types.Object, []types.Type) {
+ r.Sync(pkgbits.SyncObject)
+
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
+
+ pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
+ obj := pkgScope(pkg).Lookup(name)
+
+ targs := make([]types.Type, r.Len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
+
+ var objPkg *types.Package
+ var objName string
+ var tag pkgbits.CodeObj
+ {
+ rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1)
+
+ objPkg, objName = rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj))
+ pr.retireReader(rname)
+ }
+
+ if tag == pkgbits.ObjStub {
+ assert(objPkg == nil || objPkg == types.Unsafe)
+ return objPkg, objName
+ }
+
+ // Ignore local types promoted to global scope (#55110).
+ if _, suffix := splitVargenSuffix(objName); suffix != "" {
+ return objPkg, objName
+ }
+
+ if objPkg.Scope().Lookup(objName) == nil {
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1)
+ r.dict = dict
+
+ declare := func(obj types.Object) {
+ objPkg.Scope().Insert(obj)
+ }
+
+ switch tag {
+ default:
+ panic("weird")
+
+ case pkgbits.ObjAlias:
+ pos := r.pos()
+ var tparams []*types.TypeParam
+ if r.Version().Has(pkgbits.AliasTypeParamNames) {
+ tparams = r.typeParamNames()
+ }
+ typ := r.typ()
+ declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
+
+ case pkgbits.ObjConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.Value()
+ declare(types.NewConst(pos, objPkg, objName, typ, val))
+
+ case pkgbits.ObjFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ declare(types.NewFunc(pos, objPkg, objName, sig))
+
+ case pkgbits.ObjType:
+ pos := r.pos()
+
+ obj := types.NewTypeName(pos, objPkg, objName, nil)
+ named := types.NewNamed(obj, nil, nil)
+ declare(obj)
+
+ named.SetTypeParams(r.typeParamNames())
+
+ setUnderlying := func(underlying types.Type) {
+ // If the underlying type is an interface, we need to
+ // duplicate its methods so we can replace the receiver
+ // parameter's type (#49906).
+ if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ methods := make([]*types.Func, iface.NumExplicitMethods())
+ for i := range methods {
+ fn := iface.ExplicitMethod(i)
+ sig := fn.Type().(*types.Signature)
+
+ recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+ typesinternal.SetVarKind(recv, typesinternal.RecvVar)
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic()))
+ }
+
+ embeds := make([]types.Type, iface.NumEmbeddeds())
+ for i := range embeds {
+ embeds[i] = iface.EmbeddedType(i)
+ }
+
+ newIface := types.NewInterfaceType(methods, embeds)
+ r.p.ifaces = append(r.p.ifaces, newIface)
+ underlying = newIface
+ }
+
+ named.SetUnderlying(underlying)
+ }
+
+ // Since go.dev/cl/455279, we can assume rhs.Underlying() will
+ // always be non-nil. However, to temporarily support users of
+ // older snapshot releases, we continue to fallback to the old
+ // behavior for now.
+ //
+ // TODO(mdempsky): Remove fallback code and simplify after
+ // allowing time for snapshot users to upgrade.
+ rhs := r.typ()
+ if underlying := rhs.Underlying(); underlying != nil {
+ setUnderlying(underlying)
+ } else {
+ pk := r.p
+ pk.laterFor(named, func() {
+ // First be sure that the rhs is initialized, if it needs to be initialized.
+ delete(pk.laterFors, named) // prevent cycles
+ if i, ok := pk.laterFors[rhs]; ok {
+ f := pk.laterFns[i]
+ pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op
+ f() // initialize RHS
+ }
+ setUnderlying(rhs.Underlying())
+ })
+ }
+
+ for i, n := 0, r.Len(); i < n; i++ {
+ named.AddMethod(r.method())
+ }
+
+ case pkgbits.ObjVar:
+ pos := r.pos()
+ typ := r.typ()
+ v := types.NewVar(pos, objPkg, objName, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ declare(v)
+ }
+ }
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
+
+ var dict readerDict
+
+ {
+ r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1)
+ if implicits := r.Len(); implicits != 0 {
+ errorf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.Len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.Len())
+ dict.derivedTypes = make([]types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
+ if r.Version().Has(pkgbits.DerivedInfoNeeded) {
+ assert(!r.Bool())
+ }
+ }
+
+ pr.retireReader(r)
+ }
+ // function references follow, but reader doesn't need those
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() []*types.TypeParam {
+ r.Sync(pkgbits.SyncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types.NewTypeParam(tname, nil)
+ }
+
+ typs := make([]types.Type, len(r.dict.bounds))
+ for i, bound := range r.dict.bounds {
+ typs[i] = r.p.typIdx(bound, r.dict)
+ }
+
+ // TODO(mdempsky): This is subtle, elaborate further.
+ //
+ // We have to save tparams outside of the closure, because
+ // typeParamNames() can be called multiple times with the same
+ // dictionary instance.
+ //
+ // Also, this needs to happen later to make sure SetUnderlying has
+ // been called.
+ //
+ // TODO(mdempsky): Is it safe to have a single "later" slice or do
+ // we need to have multiple passes? See comments on CL 386002 and
+ // go.dev/issue/52104.
+ tparams := r.dict.tparams
+ r.p.later(func() {
+ for i, typ := range typs {
+ tparams[i].SetConstraint(typ)
+ }
+ })
+
+ return r.dict.tparams
+}
+
+func (r *reader) method() *types.Func {
+ r.Sync(pkgbits.SyncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rparams := r.typeParamNames()
+ sig := r.signature(r.param(), rparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) }
+func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) }
+func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) }
+
+func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) {
+ r.Sync(marker)
+ return r.pkg(), r.String()
+}
+
+// pkgScope returns pkg.Scope().
+// If pkg is nil, it returns types.Universe instead.
+//
+// TODO(mdempsky): Remove after x/tools can depend on Go 1.19.
+func pkgScope(pkg *types.Package) *types.Scope {
+ if pkg != nil {
+ return pkg.Scope()
+ }
+ return types.Universe
+}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/operator/vendor/golang.org/x/tools/internal/gocommand/invoke.go
new file mode 100644
index 00000000..58721202
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -0,0 +1,567 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gocommand is a helper for calling the go command.
+package gocommand
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/tools/internal/event"
+ "golang.org/x/tools/internal/event/keys"
+ "golang.org/x/tools/internal/event/label"
+)
+
+// A Runner will run go command invocations and serialize
+// them if it sees a concurrency error.
+type Runner struct {
+ // once guards the runner initialization.
+ once sync.Once
+
+ // inFlight tracks available workers.
+ inFlight chan struct{}
+
+ // serialized guards the ability to run a go command serially,
+ // to avoid deadlocks when claiming workers.
+ serialized chan struct{}
+}
+
+const maxInFlight = 10
+
+func (runner *Runner) initialize() {
+ runner.once.Do(func() {
+ runner.inFlight = make(chan struct{}, maxInFlight)
+ runner.serialized = make(chan struct{}, 1)
+ })
+}
+
+// 1.13: go: updates to go.mod needed, but contents have changed
+// 1.14: go: updating go.mod: existing contents have changed since last read
+var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
+
+// event keys for go command invocations
+var (
+ verb = keys.NewString("verb", "go command verb")
+ directory = keys.NewString("directory", "")
+)
+
+func invLabels(inv Invocation) []label.Label {
+ return []label.Label{verb.Of(inv.Verb), directory.Of(inv.WorkingDir)}
+}
+
+// Run is a convenience wrapper around RunRaw.
+// It returns only stdout and a "friendly" error.
+func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...)
+ defer done()
+
+ stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
+ return stdout, friendly
+}
+
+// RunPiped runs the invocation serially, always waiting for any concurrent
+// invocations to complete first.
+func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...)
+ defer done()
+
+ _, err := runner.runPiped(ctx, inv, stdout, stderr)
+ return err
+}
+
+// RunRaw runs the invocation, serializing requests only if they fight over
+// go.mod changes.
+// Postcondition: both error results have same nilness.
+func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...)
+ defer done()
+ // Make sure the runner is always initialized.
+ runner.initialize()
+
+ // First, try to run the go command concurrently.
+ stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
+
+ // If we encounter a load concurrency error, we need to retry serially.
+ if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) {
+ event.Error(ctx, "Load concurrency error, will retry serially", err)
+
+ // Run serially by calling runPiped.
+ stdout.Reset()
+ stderr.Reset()
+ friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
+ }
+
+ return stdout, stderr, friendlyErr, err
+}
+
+// Postcondition: both error results have same nilness.
+func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
+ // Wait for 1 worker to become available.
+ select {
+ case <-ctx.Done():
+ return nil, nil, ctx.Err(), ctx.Err()
+ case runner.inFlight <- struct{}{}:
+ defer func() { <-runner.inFlight }()
+ }
+
+ stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
+ friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
+ return stdout, stderr, friendlyErr, err
+}
+
+// Postcondition: both error results have same nilness.
+func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
+ // Make sure the runner is always initialized.
+ runner.initialize()
+
+ // Acquire the serialization lock. This avoids deadlocks between two
+ // runPiped commands.
+ select {
+ case <-ctx.Done():
+ return ctx.Err(), ctx.Err()
+ case runner.serialized <- struct{}{}:
+ defer func() { <-runner.serialized }()
+ }
+
+ // Wait for all in-progress go commands to return before proceeding,
+ // to avoid load concurrency errors.
+ for range maxInFlight {
+ select {
+ case <-ctx.Done():
+ return ctx.Err(), ctx.Err()
+ case runner.inFlight <- struct{}{}:
+ // Make sure we always "return" any workers we took.
+ defer func() { <-runner.inFlight }()
+ }
+ }
+
+ return inv.runWithFriendlyError(ctx, stdout, stderr)
+}
+
+// An Invocation represents a call to the go command.
+type Invocation struct {
+ Verb string
+ Args []string
+ BuildFlags []string
+
+ // If ModFlag is set, the go command is invoked with -mod=ModFlag.
+ // TODO(rfindley): remove, in favor of Args.
+ ModFlag string
+
+ // If ModFile is set, the go command is invoked with -modfile=ModFile.
+ // TODO(rfindley): remove, in favor of Args.
+ ModFile string
+
+ // Overlay is the name of the JSON overlay file that describes
+ // unsaved editor buffers; see [WriteOverlays].
+ // If set, the go command is invoked with -overlay=Overlay.
+ // TODO(rfindley): remove, in favor of Args.
+ Overlay string
+
+ // If CleanEnv is set, the invocation will run only with the environment
+ // in Env, not starting with os.Environ.
+ CleanEnv bool
+ Env []string
+ WorkingDir string
+ Logf func(format string, args ...any)
+}
+
+// Postcondition: both error results have same nilness.
+func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
+ rawError = i.run(ctx, stdout, stderr)
+ if rawError != nil {
+ friendlyError = rawError
+ // Check for 'go' executable not being found.
+ if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
+ friendlyError = fmt.Errorf("go command required, not found: %v", ee)
+ }
+ if ctx.Err() != nil {
+ friendlyError = ctx.Err()
+ }
+ friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
+ }
+ return
+}
+
+// logf logs if i.Logf is non-nil.
+func (i *Invocation) logf(format string, args ...any) {
+ if i.Logf != nil {
+ i.Logf(format, args...)
+ }
+}
+
+func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
+ goArgs := []string{i.Verb}
+
+ appendModFile := func() {
+ if i.ModFile != "" {
+ goArgs = append(goArgs, "-modfile="+i.ModFile)
+ }
+ }
+ appendModFlag := func() {
+ if i.ModFlag != "" {
+ goArgs = append(goArgs, "-mod="+i.ModFlag)
+ }
+ }
+ appendOverlayFlag := func() {
+ if i.Overlay != "" {
+ goArgs = append(goArgs, "-overlay="+i.Overlay)
+ }
+ }
+
+ switch i.Verb {
+ case "env", "version":
+ goArgs = append(goArgs, i.Args...)
+ case "mod":
+ // mod needs the sub-verb before flags.
+ goArgs = append(goArgs, i.Args[0])
+ appendModFile()
+ goArgs = append(goArgs, i.Args[1:]...)
+ case "get":
+ goArgs = append(goArgs, i.BuildFlags...)
+ appendModFile()
+ goArgs = append(goArgs, i.Args...)
+
+ default: // notably list and build.
+ goArgs = append(goArgs, i.BuildFlags...)
+ appendModFile()
+ appendModFlag()
+ appendOverlayFlag()
+ goArgs = append(goArgs, i.Args...)
+ }
+ cmd := exec.Command("go", goArgs...)
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+
+ // https://go.dev/issue/59541: don't wait forever copying stderr
+ // after the command has exited.
+ // After CL 484741 we copy stdout manually, so we we'll stop reading that as
+ // soon as ctx is done. However, we also don't want to wait around forever
+ // for stderr. Give a much-longer-than-reasonable delay and then assume that
+ // something has wedged in the kernel or runtime.
+ cmd.WaitDelay = 30 * time.Second
+
+ // The cwd gets resolved to the real path. On Darwin, where
+ // /tmp is a symlink, this breaks anything that expects the
+ // working directory to keep the original path, including the
+ // go command when dealing with modules.
+ //
+ // os.Getwd has a special feature where if the cwd and the PWD
+ // are the same node then it trusts the PWD, so by setting it
+ // in the env for the child process we fix up all the paths
+ // returned by the go command.
+ if !i.CleanEnv {
+ cmd.Env = os.Environ()
+ }
+ cmd.Env = append(cmd.Env, i.Env...)
+ if i.WorkingDir != "" {
+ cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
+ cmd.Dir = i.WorkingDir
+ }
+
+ debugStr := cmdDebugStr(cmd)
+ i.logf("starting %v", debugStr)
+ start := time.Now()
+ defer func() {
+ i.logf("%s for %v", time.Since(start), debugStr)
+ }()
+
+ return runCmdContext(ctx, cmd)
+}
+
+// DebugHangingGoCommands may be set by tests to enable additional
+// instrumentation (including panics) for debugging hanging Go commands.
+//
+// See golang/go#54461 for details.
+var DebugHangingGoCommands = false
+
+// runCmdContext is like exec.CommandContext except it sends os.Interrupt
+// before os.Kill.
+func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
+ // If cmd.Stdout is not an *os.File, the exec package will create a pipe and
+ // copy it to the Writer in a goroutine until the process has finished and
+ // either the pipe reaches EOF or command's WaitDelay expires.
+ //
+ // However, the output from 'go list' can be quite large, and we don't want to
+ // keep reading (and allocating buffers) if we've already decided we don't
+ // care about the output. We don't want to wait for the process to finish, and
+ // we don't wait to wait for the WaitDelay to expire either.
+ //
+ // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace
+ // it with a pipe (which is an *os.File), which we can close in order to stop
+ // copying output as soon as we realize we don't care about it.
+ var stdoutW *os.File
+ if cmd.Stdout != nil {
+ if _, ok := cmd.Stdout.(*os.File); !ok {
+ var stdoutR *os.File
+ stdoutR, stdoutW, err = os.Pipe()
+ if err != nil {
+ return err
+ }
+ prevStdout := cmd.Stdout
+ cmd.Stdout = stdoutW
+
+ stdoutErr := make(chan error, 1)
+ go func() {
+ _, err := io.Copy(prevStdout, stdoutR)
+ if err != nil {
+ err = fmt.Errorf("copying stdout: %w", err)
+ }
+ stdoutErr <- err
+ }()
+ defer func() {
+ // We started a goroutine to copy a stdout pipe.
+ // Wait for it to finish, or terminate it if need be.
+ var err2 error
+ select {
+ case err2 = <-stdoutErr:
+ stdoutR.Close()
+ case <-ctx.Done():
+ stdoutR.Close()
+ // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close
+ // should cause the Read call in io.Copy to unblock and return
+ // immediately, but we still need to receive from stdoutErr to confirm
+ // that it has happened.
+ <-stdoutErr
+ err2 = ctx.Err()
+ }
+ if err == nil {
+ err = err2
+ }
+ }()
+
+ // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the
+ // same writer, and have a type that can be compared with ==, at most
+ // one goroutine at a time will call Write.”
+ //
+ // Since we're starting a goroutine that writes to cmd.Stdout, we must
+ // also update cmd.Stderr so that it still holds.
+ func() {
+ defer func() { recover() }()
+ if cmd.Stderr == prevStdout {
+ cmd.Stderr = cmd.Stdout
+ }
+ }()
+ }
+ }
+
+ startTime := time.Now()
+ err = cmd.Start()
+ if stdoutW != nil {
+ // The child process has inherited the pipe file,
+ // so close the copy held in this process.
+ stdoutW.Close()
+ stdoutW = nil
+ }
+ if err != nil {
+ return err
+ }
+
+ resChan := make(chan error, 1)
+ go func() {
+ resChan <- cmd.Wait()
+ }()
+
+ // If we're interested in debugging hanging Go commands, stop waiting after a
+ // minute and panic with interesting information.
+ debug := DebugHangingGoCommands
+ if debug {
+ timer := time.NewTimer(1 * time.Minute)
+ defer timer.Stop()
+ select {
+ case err := <-resChan:
+ return err
+ case <-timer.C:
+ // HandleHangingGoCommand terminates this process.
+ // Pass off resChan in case we can collect the command error.
+ handleHangingGoCommand(startTime, cmd, resChan)
+ case <-ctx.Done():
+ }
+ } else {
+ select {
+ case err := <-resChan:
+ return err
+ case <-ctx.Done():
+ }
+ }
+
+ // Cancelled. Interrupt and see if it ends voluntarily.
+ if err := cmd.Process.Signal(os.Interrupt); err == nil {
+ // (We used to wait only 1s but this proved
+ // fragile on loaded builder machines.)
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+ select {
+ case err := <-resChan:
+ return err
+ case <-timer.C:
+ }
+ }
+
+ // Didn't shut down in response to interrupt. Kill it hard.
+ if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
+ log.Printf("error killing the Go command: %v", err)
+ }
+
+ return <-resChan
+}
+
+// handleHangingGoCommand outputs debugging information to help diagnose the
+// cause of a hanging Go command, and then exits with log.Fatalf.
+func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
+ switch runtime.GOOS {
+ case "linux", "darwin", "freebsd", "netbsd", "openbsd":
+ fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
+
+ The gopls test runner has detected a hanging go command. In order to debug
+ this, the output of ps and lsof/fstat is printed below.
+
+ See golang/go#54461 for more details.`)
+
+ fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
+ fmt.Fprintln(os.Stderr, "-------------------------")
+ psCmd := exec.Command("ps", "axo", "ppid,pid,command")
+ psCmd.Stdout = os.Stderr
+ psCmd.Stderr = os.Stderr
+ if err := psCmd.Run(); err != nil {
+ log.Printf("Handling hanging Go command: running ps: %v", err)
+ }
+
+ listFiles := "lsof"
+ if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
+ listFiles = "fstat"
+ }
+
+ fmt.Fprintln(os.Stderr, "\n"+listFiles+":")
+ fmt.Fprintln(os.Stderr, "-----")
+ listFilesCmd := exec.Command(listFiles)
+ listFilesCmd.Stdout = os.Stderr
+ listFilesCmd.Stderr = os.Stderr
+ if err := listFilesCmd.Run(); err != nil {
+ log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
+ }
+ // Try to extract information about the slow go process by issuing a SIGQUIT.
+ if err := cmd.Process.Signal(sigStuckProcess); err == nil {
+ select {
+ case err := <-resChan:
+ stderr := "not a bytes.Buffer"
+ if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
+ stderr = buf.String()
+ }
+ log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
+ case <-time.After(5 * time.Second):
+ }
+ } else {
+ log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
+ }
+ }
+ log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
+}
+
+func cmdDebugStr(cmd *exec.Cmd) string {
+ env := make(map[string]string)
+ for _, kv := range cmd.Env {
+ split := strings.SplitN(kv, "=", 2)
+ if len(split) == 2 {
+ k, v := split[0], split[1]
+ env[k] = v
+ }
+ }
+
+ var args []string
+ for _, arg := range cmd.Args {
+ quoted := strconv.Quote(arg)
+ if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") {
+ args = append(args, quoted)
+ } else {
+ args = append(args, arg)
+ }
+ }
+ return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " "))
+}
+
+// WriteOverlays writes each value in the overlay (see the Overlay
+// field of go/packages.Config) to a temporary file and returns the name
+// of a JSON file describing the mapping that is suitable for the "go
+// list -overlay" flag.
+//
+// On success, the caller must call the cleanup function exactly once
+// when the files are no longer needed.
+func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(), err error) {
+ // Do nothing if there are no overlays in the config.
+ if len(overlay) == 0 {
+ return "", func() {}, nil
+ }
+
+ dir, err := os.MkdirTemp("", "gocommand-*")
+ if err != nil {
+ return "", nil, err
+ }
+
+ // The caller must clean up this directory,
+ // unless this function returns an error.
+ // (The cleanup operand of each return
+ // statement below is ignored.)
+ defer func() {
+ cleanup = func() {
+ os.RemoveAll(dir)
+ }
+ if err != nil {
+ cleanup()
+ cleanup = nil
+ }
+ }()
+
+ // Write each map entry to a temporary file.
+ overlays := make(map[string]string)
+ for k, v := range overlay {
+ // Use a unique basename for each file (001-foo.go),
+ // to avoid creating nested directories.
+ base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
+ filename := filepath.Join(dir, base)
+ err := os.WriteFile(filename, v, 0666)
+ if err != nil {
+ return "", nil, err
+ }
+ overlays[k] = filename
+ }
+
+ // Write the JSON overlay file that maps logical file names to temp files.
+ //
+ // OverlayJSON is the format overlay files are expected to be in.
+ // The Replace map maps from overlaid paths to replacement paths:
+ // the Go command will forward all reads trying to open
+ // each overlaid path to its replacement path, or consider the overlaid
+ // path not to exist if the replacement path is empty.
+ //
+ // From golang/go#39958.
+ type OverlayJSON struct {
+ Replace map[string]string `json:"replace,omitempty"`
+ }
+ b, err := json.Marshal(OverlayJSON{Replace: overlays})
+ if err != nil {
+ return "", nil, err
+ }
+ filename = filepath.Join(dir, "overlay.json")
+ if err := os.WriteFile(filename, b, 0666); err != nil {
+ return "", nil, err
+ }
+
+ return filename, nil, nil
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/operator/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
new file mode 100644
index 00000000..469c648e
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package gocommand
+
+import "os"
+
+// sigStuckProcess is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var sigStuckProcess = os.Kill
diff --git a/operator/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/operator/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
new file mode 100644
index 00000000..169d37c8
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package gocommand
+
+import "syscall"
+
+// Sigstuckprocess is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var sigStuckProcess = syscall.SIGQUIT
diff --git a/operator/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/operator/vendor/golang.org/x/tools/internal/gocommand/vendor.go
new file mode 100644
index 00000000..e38d1fb4
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gocommand/vendor.go
@@ -0,0 +1,163 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "time"
+
+ "golang.org/x/mod/semver"
+)
+
+// ModuleJSON holds information about a module.
+type ModuleJSON struct {
+ Path string // module path
+ Version string // module version
+ Versions []string // available module versions (with -versions)
+ Replace *ModuleJSON // replaced by this module
+ Time *time.Time // time version was created
+ Update *ModuleJSON // available update, if any (with -u)
+ Main bool // is this the main module?
+ Indirect bool // is this module only an indirect dependency of main module?
+ Dir string // directory holding files for this module, if any
+ GoMod string // path to go.mod file used when loading this module, if any
+ GoVersion string // go version used in module
+}
+
+var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
+
+// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
+// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
+// of which only Verb and Args are modified to run the appropriate Go command.
+// Inspired by setDefaultBuildMod in modload/init.go
+func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) {
+ mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
+ if err != nil {
+ return false, nil, err
+ }
+
+ // We check the GOFLAGS to see if there is anything overridden or not.
+ inv.Verb = "env"
+ inv.Args = []string{"GOFLAGS"}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return false, nil, err
+ }
+ goflags := string(bytes.TrimSpace(stdout.Bytes()))
+ matches := modFlagRegexp.FindStringSubmatch(goflags)
+ var modFlag string
+ if len(matches) != 0 {
+ modFlag = matches[1]
+ }
+ // Don't override an explicit '-mod=' argument.
+ if modFlag == "vendor" {
+ return true, mainMod, nil
+ } else if modFlag != "" {
+ return false, nil, nil
+ }
+ if mainMod == nil || !go114 {
+ return false, nil, nil
+ }
+ // Check 1.14's automatic vendor mode.
+ if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
+ if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
+ // The Go version is at least 1.14, and a vendor directory exists.
+ // Set -mod=vendor by default.
+ return true, mainMod, nil
+ }
+ }
+ return false, nil, nil
+}
+
+// getMainModuleAnd114 gets one of the main modules' information and whether the
+// go command in use is 1.14+. This is the information needed to figure out
+// if vendoring should be enabled.
+func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
+ const format = `{{.Path}}
+{{.Dir}}
+{{.GoMod}}
+{{.GoVersion}}
+{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
+`
+ inv.Verb = "list"
+ inv.Args = []string{"-m", "-f", format}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return nil, false, err
+ }
+
+ lines := strings.Split(stdout.String(), "\n")
+ if len(lines) < 5 {
+ return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
+ }
+ mod := &ModuleJSON{
+ Path: lines[0],
+ Dir: lines[1],
+ GoMod: lines[2],
+ GoVersion: lines[3],
+ Main: true,
+ }
+ return mod, lines[4] == "go1.14", nil
+}
+
+// WorkspaceVendorEnabled reports whether workspace vendoring is enabled. It takes a *Runner to execute Go commands
+// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
+// of which only Verb and Args are modified to run the appropriate Go command.
+// Inspired by setDefaultBuildMod in modload/init.go
+func WorkspaceVendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, []*ModuleJSON, error) {
+ inv.Verb = "env"
+ inv.Args = []string{"GOWORK"}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return false, nil, err
+ }
+ goWork := string(bytes.TrimSpace(stdout.Bytes()))
+ if fi, err := os.Stat(filepath.Join(filepath.Dir(goWork), "vendor")); err == nil && fi.IsDir() {
+ mainMods, err := getWorkspaceMainModules(ctx, inv, r)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, mainMods, nil
+ }
+ return false, nil, nil
+}
+
+// getWorkspaceMainModules gets the main modules' information.
+// This is the information needed to figure out if vendoring should be enabled.
+func getWorkspaceMainModules(ctx context.Context, inv Invocation, r *Runner) ([]*ModuleJSON, error) {
+ const format = `{{.Path}}
+{{.Dir}}
+{{.GoMod}}
+{{.GoVersion}}
+`
+ inv.Verb = "list"
+ inv.Args = []string{"-m", "-f", format}
+ stdout, err := r.Run(ctx, inv)
+ if err != nil {
+ return nil, err
+ }
+
+ lines := strings.Split(strings.TrimSuffix(stdout.String(), "\n"), "\n")
+ if len(lines) < 4 {
+ return nil, fmt.Errorf("unexpected stdout: %q", stdout.String())
+ }
+ mods := make([]*ModuleJSON, 0, len(lines)/4)
+ for i := 0; i < len(lines); i += 4 {
+ mods = append(mods, &ModuleJSON{
+ Path: lines[i],
+ Dir: lines[i+1],
+ GoMod: lines[i+2],
+ GoVersion: lines[i+3],
+ Main: true,
+ })
+ }
+ return mods, nil
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/gocommand/version.go b/operator/vendor/golang.org/x/tools/internal/gocommand/version.go
new file mode 100644
index 00000000..446c5846
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/gocommand/version.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gocommand
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// GoVersion reports the minor version number of the highest release
+// tag built into the go command on the PATH.
+//
+// Note that this may be higher than the version of the go tool used
+// to build this application, and thus the versions of the standard
+// go/{scanner,parser,ast,types} packages that are linked into it.
+// In that case, callers should either downgrade to the version of
+// go used to build the application, or report an error that the
+// application is too old to use the go command on the PATH.
+func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) {
+ inv.Verb = "list"
+ inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`}
+ inv.BuildFlags = nil // This is not a build command.
+ inv.ModFlag = ""
+ inv.ModFile = ""
+ inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off")
+
+ stdoutBytes, err := r.Run(ctx, inv)
+ if err != nil {
+ return 0, err
+ }
+ stdout := stdoutBytes.String()
+ if len(stdout) < 3 {
+ return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout)
+ }
+ // Split up "[go1.1 go1.15]" and return highest go1.X value.
+ tags := strings.Fields(stdout[1 : len(stdout)-2])
+ for i := len(tags) - 1; i >= 0; i-- {
+ var version int
+ if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil {
+ continue
+ }
+ return version, nil
+ }
+ return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags)
+}
+
+// GoVersionOutput returns the complete output of the go version command.
+func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) {
+ inv.Verb = "version"
+ goVersion, err := r.Run(ctx, inv)
+ if err != nil {
+ return "", err
+ }
+ return goVersion.String(), nil
+}
+
+// ParseGoVersionOutput extracts the Go version string
+// from the output of the "go version" command.
+// Given an unrecognized form, it returns an empty string.
+func ParseGoVersionOutput(data string) string {
+ re := regexp.MustCompile(`^go version (go\S+|devel \S+)`)
+ m := re.FindStringSubmatch(data)
+ if len(m) != 2 {
+ return "" // unrecognized version
+ }
+ return m[1]
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/operator/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
new file mode 100644
index 00000000..929b470b
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packagesinternal exposes internal-only fields from go/packages.
+package packagesinternal
+
+import "fmt"
+
+var GetDepsErrors = func(p any) []*PackageError { return nil }
+
+type PackageError struct {
+ ImportStack []string // shortest path from package named on command line to this one
+ Pos string // position of error (if present, file:line:col)
+ Err string // the error itself
+}
+
+func (err PackageError) String() string {
+ return fmt.Sprintf("%s: %s (import stack: %s)", err.Pos, err.Err, err.ImportStack)
+}
+
+var TypecheckCgo int
+var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/codes.go
new file mode 100644
index 00000000..f0cabde9
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/codes.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A Code is an enum value that can be encoded into bitstreams.
+//
+// Code types are preferable for enum types, because they allow
+// Decoder to detect desyncs.
+type Code interface {
+ // Marker returns the SyncMarker for the Code's dynamic type.
+ Marker() SyncMarker
+
+ // Value returns the Code's ordinal value.
+ Value() int
+}
+
+// A CodeVal distinguishes among go/constant.Value encodings.
+type CodeVal int
+
+func (c CodeVal) Marker() SyncMarker { return SyncVal }
+func (c CodeVal) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ValBool CodeVal = iota
+ ValString
+ ValInt64
+ ValBigInt
+ ValBigRat
+ ValBigFloat
+)
+
+// A CodeType distinguishes among go/types.Type encodings.
+type CodeType int
+
+func (c CodeType) Marker() SyncMarker { return SyncType }
+func (c CodeType) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ TypeBasic CodeType = iota
+ TypeNamed
+ TypePointer
+ TypeSlice
+ TypeArray
+ TypeChan
+ TypeMap
+ TypeSignature
+ TypeStruct
+ TypeInterface
+ TypeUnion
+ TypeTypeParam
+)
+
+// A CodeObj distinguishes among go/types.Object encodings.
+type CodeObj int
+
+func (c CodeObj) Marker() SyncMarker { return SyncCodeObj }
+func (c CodeObj) Value() int { return int(c) }
+
+// Note: These values are public and cannot be changed without
+// updating the go/types importers.
+
+const (
+ ObjAlias CodeObj = iota
+ ObjConst
+ ObjType
+ ObjFunc
+ ObjVar
+ ObjStub
+)
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
new file mode 100644
index 00000000..c0aba26c
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -0,0 +1,519 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+)
+
+// A PkgDecoder provides methods for decoding a package's Unified IR
+// export data.
+type PkgDecoder struct {
+ // version is the file format version.
+ version Version
+
+ // sync indicates whether the file uses sync markers.
+ sync bool
+
+ // pkgPath is the package path for the package to be decoded.
+ //
+ // TODO(mdempsky): Remove; unneeded since CL 391014.
+ pkgPath string
+
+ // elemData is the full data payload of the encoded package.
+ // Elements are densely and contiguously packed together.
+ //
+ // The last 8 bytes of elemData are the package fingerprint.
+ elemData string
+
+ // elemEnds stores the byte-offset end positions of element
+ // bitstreams within elemData.
+ //
+ // For example, element I's bitstream data starts at elemEnds[I-1]
+ // (or 0, if I==0) and ends at elemEnds[I].
+ //
+ // Note: elemEnds is indexed by absolute indices, not
+ // section-relative indices.
+ elemEnds []uint32
+
+ // elemEndsEnds stores the index-offset end positions of relocation
+ // sections within elemEnds.
+ //
+ // For example, section K's end positions start at elemEndsEnds[K-1]
+ // (or 0, if K==0) and end at elemEndsEnds[K].
+ elemEndsEnds [numRelocs]uint32
+
+ scratchRelocEnt []RelocEnt
+}
+
+// PkgPath returns the package path for the package
+//
+// TODO(mdempsky): Remove; unneeded since CL 391014.
+func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath }
+
+// SyncMarkers reports whether pr uses sync markers.
+func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
+
+// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
+// IR export data from input. pkgPath is the package path for the
+// compilation unit that produced the export data.
+func NewPkgDecoder(pkgPath, input string) PkgDecoder {
+ pr := PkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ var ver uint32
+ assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+ pr.version = Version(ver)
+
+ if pr.version >= numVersions {
+ panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+ }
+
+ if pr.version.Has(Flags) {
+ var flags uint32
+ assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
+ pr.sync = flags&flagSyncMarkers != 0
+ }
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, io.SeekCurrent)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+
+ const fingerprintSize = 8
+ assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+// NumElems returns the number of elements in section k.
+func (pr *PkgDecoder) NumElems(k RelocKind) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+// TotalElems returns the total number of elements across all sections.
+func (pr *PkgDecoder) TotalElems() int {
+ return len(pr.elemEnds)
+}
+
+// Fingerprint returns the package fingerprint.
+func (pr *PkgDecoder) Fingerprint() [8]byte {
+ var fp [8]byte
+ copy(fp[:], pr.elemData[len(pr.elemData)-8:])
+ return fp
+}
+
+// AbsIdx returns the absolute index for the given (section, index)
+// pair.
+func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
+ absIdx := int(idx)
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+// DataIdx returns the raw element bitstream for the given (section,
+// index) pair.
+func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string {
+ absIdx := pr.AbsIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+// StringIdx returns the string value for the given string index.
+func (pr *PkgDecoder) StringIdx(idx Index) string {
+ return pr.DataIdx(RelocString, idx)
+}
+
+// NewDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.NewDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+// TempDecoder returns a Decoder for the given (section, index) pair,
+// and decodes the given SyncMarker from the element bitstream.
+// If possible the Decoder should be RetireDecoder'd when it is no longer
+// needed, this will avoid heap allocations.
+func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder {
+ r := pr.TempDecoderRaw(k, idx)
+ r.Sync(marker)
+ return r
+}
+
+func (pr *PkgDecoder) RetireDecoder(d *Decoder) {
+ pr.scratchRelocEnt = d.Relocs
+ d.Relocs = nil
+}
+
+// NewDecoderRaw returns a Decoder for the given (section, index) pair.
+//
+// Most callers should use NewDecoder instead.
+func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ r.Relocs = make([]RelocEnt, r.Len())
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder {
+ r := Decoder{
+ common: pr,
+ k: k,
+ Idx: idx,
+ }
+
+ r.Data.Reset(pr.DataIdx(k, idx))
+ r.Sync(SyncRelocs)
+ l := r.Len()
+ if cap(pr.scratchRelocEnt) >= l {
+ r.Relocs = pr.scratchRelocEnt[:l]
+ pr.scratchRelocEnt = nil
+ } else {
+ r.Relocs = make([]RelocEnt, l)
+ }
+ for i := range r.Relocs {
+ r.Sync(SyncReloc)
+ r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())}
+ }
+
+ return r
+}
+
+// A Decoder provides methods for decoding an individual element's
+// bitstream data.
+type Decoder struct {
+ common *PkgDecoder
+
+ Relocs []RelocEnt
+ Data strings.Reader
+
+ k RelocKind
+ Idx Index
+}
+
+func (r *Decoder) checkErr(err error) {
+ if err != nil {
+ panicf("unexpected decoding error: %w", err)
+ }
+}
+
+func (r *Decoder) rawUvarint() uint64 {
+ x, err := readUvarint(&r.Data)
+ r.checkErr(err)
+ return x
+}
+
+// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint.
+// This avoids the interface conversion and thus has better escape properties,
+// which flows up the stack.
+func readUvarint(r *strings.Reader) (uint64, error) {
+ var x uint64
+ var s uint
+ for i := range binary.MaxVarintLen64 {
+ b, err := r.ReadByte()
+ if err != nil {
+ if i > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return x, err
+ }
+ if b < 0x80 {
+ if i == binary.MaxVarintLen64-1 && b > 1 {
+ return x, overflow
+ }
+ return x | uint64(b)<> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *Decoder) rawReloc(k RelocKind, idx int) Index {
+ e := r.Relocs[idx]
+ assert(e.Kind == k)
+ return e.Idx
+}
+
+// Sync decodes a sync marker from the element bitstream and asserts
+// that it matches the expected marker.
+//
+// If r.common.sync is false, then Sync is a no-op.
+func (r *Decoder) Sync(mWant SyncMarker) {
+ if !r.common.sync {
+ return
+ }
+
+ pos, _ := r.Data.Seek(0, io.SeekCurrent)
+ mHave := SyncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+// Bool decodes and returns a bool value from the element bitstream.
+func (r *Decoder) Bool() bool {
+ r.Sync(SyncBool)
+ x, err := r.Data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+// Int64 decodes and returns an int64 value from the element bitstream.
+func (r *Decoder) Int64() int64 {
+ r.Sync(SyncInt64)
+ return r.rawVarint()
+}
+
+// Uint64 decodes and returns a uint64 value from the element bitstream.
+func (r *Decoder) Uint64() uint64 {
+ r.Sync(SyncUint64)
+ return r.rawUvarint()
+}
+
+// Len decodes and returns a non-negative int value from the element bitstream.
+func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v }
+
+// Int decodes and returns an int value from the element bitstream.
+func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }
+
+// Uint decodes and returns a uint value from the element bitstream.
+func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+// Code decodes a Code value from the element bitstream and returns
+// its ordinal value. It's the caller's responsibility to convert the
+// result to an appropriate Code type.
+//
+// TODO(mdempsky): Ideally this method would have signature "Code[T
+// Code] T" instead, but we don't allow generic methods and the
+// compiler can't depend on generics yet anyway.
+func (r *Decoder) Code(mark SyncMarker) int {
+ r.Sync(mark)
+ return r.Len()
+}
+
+// Reloc decodes a relocation of expected section k from the element
+// bitstream and returns an index to the referenced element.
+func (r *Decoder) Reloc(k RelocKind) Index {
+ r.Sync(SyncUseReloc)
+ return r.rawReloc(k, r.Len())
+}
+
+// String decodes and returns a string value from the element
+// bitstream.
+func (r *Decoder) String() string {
+ r.Sync(SyncString)
+ return r.common.StringIdx(r.Reloc(RelocString))
+}
+
+// Strings decodes and returns a variable-length slice of strings from
+// the element bitstream.
+func (r *Decoder) Strings() []string {
+ res := make([]string, r.Len())
+ for i := range res {
+ res[i] = r.String()
+ }
+ return res
+}
+
+// Value decodes and returns a constant.Value from the element
+// bitstream.
+func (r *Decoder) Value() constant.Value {
+ r.Sync(SyncValue)
+ isComplex := r.Bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *Decoder) scalar() constant.Value {
+ switch tag := CodeVal(r.Code(SyncVal)); tag {
+ default:
+ panic(fmt.Errorf("unexpected scalar tag: %v", tag))
+
+ case ValBool:
+ return constant.MakeBool(r.Bool())
+ case ValString:
+ return constant.MakeString(r.String())
+ case ValInt64:
+ return constant.MakeInt64(r.Int64())
+ case ValBigInt:
+ return constant.Make(r.bigInt())
+ case ValBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case ValBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *Decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.String()))
+ if r.Bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *Decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.String())) == nil)
+ return v
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+// PeekPkgPath returns the package path for the specified package
+// index.
+func (pr *PkgDecoder) PeekPkgPath(idx Index) string {
+ var path string
+ {
+ r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef)
+ path = r.String()
+ pr.RetireDecoder(&r)
+ }
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+// PeekObj returns the package path, object name, and CodeObj for the
+// specified object index.
+func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
+ var ridx Index
+ var name string
+ var rcode int
+ {
+ r := pr.TempDecoder(RelocName, idx, SyncObject1)
+ r.Sync(SyncSym)
+ r.Sync(SyncPkg)
+ ridx = r.Reloc(RelocPkg)
+ name = r.String()
+ rcode = r.Code(SyncCodeObj)
+ pr.RetireDecoder(&r)
+ }
+
+ path := pr.PeekPkgPath(ridx)
+ assert(name != "")
+
+ tag := CodeObj(rcode)
+
+ return path, name, tag
+}
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/doc.go
new file mode 100644
index 00000000..c8a2796b
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/doc.go
@@ -0,0 +1,32 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkgbits implements low-level coding abstractions for
+// Unified IR's export data format.
+//
+// At a low-level, a package is a collection of bitstream elements.
+// Each element has a "kind" and a dense, non-negative index.
+// Elements can be randomly accessed given their kind and index.
+//
+// Individual elements are sequences of variable-length values (e.g.,
+// integers, booleans, strings, go/constant values, cross-references
+// to other elements). Package pkgbits provides APIs for encoding and
+// decoding these low-level values, but the details of mapping
+// higher-level Go constructs into elements is left to higher-level
+// abstractions.
+//
+// Elements may cross-reference each other with "relocations." For
+// example, an element representing a pointer type has a relocation
+// referring to the element type.
+//
+// Go constructs may be composed as a constellation of multiple
+// elements. For example, a declared function may have one element to
+// describe the object (e.g., its name, type, position), and a
+// separate element to describe its function body. This allows readers
+// some flexibility in efficiently seeking or re-reading data (e.g.,
+// inlining requires re-reading the function body for each inlined
+// call, without needing to re-read the object-level details).
+//
+// This is a copy of internal/pkgbits in the Go implementation.
+package pkgbits
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
new file mode 100644
index 00000000..c17a1239
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -0,0 +1,392 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+ "strings"
+)
+
+// A PkgEncoder provides methods for encoding a package's Unified IR
+// export data.
+type PkgEncoder struct {
+ // version of the bitstream.
+ version Version
+
+ // elems holds the bitstream for previously encoded elements.
+ elems [numRelocs][]string
+
+ // stringsIdx maps previously encoded strings to their index within
+ // the RelocString section, to allow deduplication. That is,
+ // elems[RelocString][stringsIdx[s]] == s (if present).
+ stringsIdx map[string]Index
+
+ // syncFrames is the number of frames to write at each sync
+ // marker. A negative value means sync markers are omitted.
+ syncFrames int
+}
+
+// SyncMarkers reports whether pw uses sync markers.
+func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
+
+// NewPkgEncoder returns an initialized PkgEncoder.
+//
+// syncFrames is the number of caller frames that should be serialized
+// at Sync points. Serializing additional frames results in larger
+// export data files, but can help diagnosing desync errors in
+// higher-level Unified IR reader/writer code. If syncFrames is
+// negative, then sync markers are omitted entirely.
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
+ return PkgEncoder{
+ version: version,
+ stringsIdx: make(map[string]Index),
+ syncFrames: syncFrames,
+ }
+}
+
+// DumpTo writes the package's encoded data to out0 and returns the
+// package fingerprint.
+func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
+ h := md5.New()
+ out := io.MultiWriter(out0, h)
+
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ writeUint32(uint32(pw.version))
+
+ if pw.version.Has(Flags) {
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
+ }
+
+ // Write elemEndsEnds.
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ // Write elemEnds.
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ // Write elemData.
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+
+ // Write fingerprint.
+ copy(fingerprint[:], h.Sum(nil))
+ _, err := out0.Write(fingerprint[:])
+ assert(err == nil)
+
+ return
+}
+
+// StringIdx adds a string value to the strings section, if not
+// already present, and returns its index.
+func (pw *PkgEncoder) StringIdx(s string) Index {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[RelocString][idx] == s)
+ return idx
+ }
+
+ idx := Index(len(pw.elems[RelocString]))
+ pw.elems[RelocString] = append(pw.elems[RelocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+// NewEncoder returns an Encoder for a new element within the given
+// section, and encodes the given SyncMarker as the start of the
+// element bitstream.
+func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder {
+ e := pw.NewEncoderRaw(k)
+ e.Sync(marker)
+ return e
+}
+
+// NewEncoderRaw returns an Encoder for a new element within the given
+// section.
+//
+// Most callers should use NewEncoder instead.
+func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder {
+ idx := Index(len(pw.elems[k]))
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return Encoder{
+ p: pw,
+ k: k,
+ Idx: idx,
+ }
+}
+
+// An Encoder provides methods for encoding an individual element's
+// bitstream data.
+type Encoder struct {
+ p *PkgEncoder
+
+ Relocs []RelocEnt
+ RelocMap map[RelocEnt]uint32
+ Data bytes.Buffer // accumulated element bitstream data
+
+ encodingRelocHeader bool
+
+ k RelocKind
+ Idx Index // index within relocation section
+}
+
+// Flush finalizes the element's bitstream and returns its Index.
+func (w *Encoder) Flush() Index {
+ var sb strings.Builder
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.Data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ panic("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.Sync(SyncRelocs)
+ w.Len(len(w.Relocs))
+ for _, rEnt := range w.Relocs {
+ w.Sync(SyncReloc)
+ w.Len(int(rEnt.Kind))
+ w.Len(int(rEnt.Idx))
+ }
+
+ io.Copy(&sb, &w.Data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.Idx] = sb.String()
+
+ return w.Idx
+}
+
+func (w *Encoder) checkErr(err error) {
+ if err != nil {
+ panicf("unexpected encoding error: %v", err)
+ }
+}
+
+func (w *Encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.Data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *Encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *Encoder) rawReloc(r RelocKind, idx Index) int {
+ e := RelocEnt{r, idx}
+ if w.RelocMap != nil {
+ if i, ok := w.RelocMap[e]; ok {
+ return int(i)
+ }
+ } else {
+ w.RelocMap = make(map[RelocEnt]uint32)
+ }
+
+ i := len(w.Relocs)
+ w.RelocMap[e] = uint32(i)
+ w.Relocs = append(w.Relocs, e)
+ return i
+}
+
+func (w *Encoder) Sync(m SyncMarker) {
+ if !w.p.SyncMarkers() {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && w.p.syncFrames > 0 {
+ pcs := make([]uintptr, w.p.syncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame))))
+ }
+}
+
+// Bool encodes and writes a bool value into the element bitstream,
+// and then returns the bool value.
+//
+// For simple, 2-alternative encodings, the idiomatic way to call Bool
+// is something like:
+//
+// if w.Bool(x != 0) {
+// // alternative #1
+// } else {
+// // alternative #2
+// }
+//
+// For multi-alternative encodings, use Code instead.
+func (w *Encoder) Bool(b bool) bool {
+ w.Sync(SyncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.Data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+// Int64 encodes and writes an int64 value into the element bitstream.
+func (w *Encoder) Int64(x int64) {
+ w.Sync(SyncInt64)
+ w.rawVarint(x)
+}
+
+// Uint64 encodes and writes a uint64 value into the element bitstream.
+func (w *Encoder) Uint64(x uint64) {
+ w.Sync(SyncUint64)
+ w.rawUvarint(x)
+}
+
+// Len encodes and writes a non-negative int value into the element bitstream.
+func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
+
+// Int encodes and writes an int value into the element bitstream.
+func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
+
+// Uint encodes and writes a uint value into the element bitstream.
+func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
+
+// Reloc encodes and writes a relocation for the given (section,
+// index) pair into the element bitstream.
+//
+// Note: Only the index is formally written into the element
+// bitstream, so bitstream decoders must know from context which
+// section an encoded relocation refers to.
+func (w *Encoder) Reloc(r RelocKind, idx Index) {
+ w.Sync(SyncUseReloc)
+ w.Len(w.rawReloc(r, idx))
+}
+
+// Code encodes and writes a Code value into the element bitstream.
+func (w *Encoder) Code(c Code) {
+ w.Sync(c.Marker())
+ w.Len(c.Value())
+}
+
+// String encodes and writes a string value into the element
+// bitstream.
+//
+// Internally, strings are deduplicated by adding them to the strings
+// section (if not already present), and then writing a relocation
+// into the element bitstream.
+func (w *Encoder) String(s string) {
+ w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
+ w.Sync(SyncString)
+ w.Reloc(RelocString, idx)
+}
+
+// Strings encodes and writes a variable-length slice of strings into
+// the element bitstream.
+func (w *Encoder) Strings(ss []string) {
+ w.Len(len(ss))
+ for _, s := range ss {
+ w.String(s)
+ }
+}
+
+// Value encodes and writes a constant.Value into the element
+// bitstream.
+func (w *Encoder) Value(val constant.Value) {
+ w.Sync(SyncValue)
+ if w.Bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *Encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ panicf("unhandled %v (%v)", val, val.Kind())
+ case bool:
+ w.Code(ValBool)
+ w.Bool(v)
+ case string:
+ w.Code(ValString)
+ w.String(v)
+ case int64:
+ w.Code(ValInt64)
+ w.Int64(v)
+ case *big.Int:
+ w.Code(ValBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.Code(ValBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.Code(ValBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *Encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.String(string(b)) // TODO: More efficient encoding.
+ w.Bool(v.Sign() < 0)
+}
+
+func (w *Encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.String(string(b)) // TODO: More efficient encoding.
+}
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/flags.go
new file mode 100644
index 00000000..65422274
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+const (
+ flagSyncMarkers = 1 << iota // file format contains sync markers
+)
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/reloc.go
new file mode 100644
index 00000000..fcdfb97c
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/reloc.go
@@ -0,0 +1,42 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// A RelocKind indicates a particular section within a unified IR export.
+type RelocKind int32
+
+// An Index represents a bitstream element index within a particular
+// section.
+type Index int32
+
+// A relocEnt (relocation entry) is an entry in an element's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type RelocEnt struct {
+ Kind RelocKind
+ Idx Index
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ PublicRootIdx Index = 0
+ PrivateRootIdx Index = 1
+)
+
+const (
+ RelocString RelocKind = iota
+ RelocMeta
+ RelocPosBase
+ RelocPkg
+ RelocName
+ RelocType
+ RelocObj
+ RelocObjExt
+ RelocObjDict
+ RelocBody
+
+ numRelocs = iota
+)
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/support.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/support.go
new file mode 100644
index 00000000..50534a29
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import "fmt"
+
+func assert(b bool) {
+ if !b {
+ panic("assertion failed")
+ }
+}
+
+func panicf(format string, args ...any) {
+ panic(fmt.Errorf(format, args...))
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/sync.go
new file mode 100644
index 00000000..1520b73a
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -0,0 +1,136 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
+
+// SyncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type SyncMarker int
+
+//go:generate stringer -type=SyncMarker -trimprefix=Sync
+
+const (
+ _ SyncMarker = iota
+
+ // Public markers (known to go/types importers).
+
+ // Low-level coding markers.
+ SyncEOF
+ SyncBool
+ SyncInt64
+ SyncUint64
+ SyncString
+ SyncValue
+ SyncVal
+ SyncRelocs
+ SyncReloc
+ SyncUseReloc
+
+ // Higher-level object and type markers.
+ SyncPublic
+ SyncPos
+ SyncPosBase
+ SyncObject
+ SyncObject1
+ SyncPkg
+ SyncPkgDef
+ SyncMethod
+ SyncType
+ SyncTypeIdx
+ SyncTypeParamNames
+ SyncSignature
+ SyncParams
+ SyncParam
+ SyncCodeObj
+ SyncSym
+ SyncLocalIdent
+ SyncSelector
+
+ // Private markers (only known to cmd/compile).
+ SyncPrivate
+
+ SyncFuncExt
+ SyncVarExt
+ SyncTypeExt
+ SyncPragma
+
+ SyncExprList
+ SyncExprs
+ SyncExpr
+ SyncExprType
+ SyncAssign
+ SyncOp
+ SyncFuncLit
+ SyncCompLit
+
+ SyncDecl
+ SyncFuncBody
+ SyncOpenScope
+ SyncCloseScope
+ SyncCloseAnotherScope
+ SyncDeclNames
+ SyncDeclName
+
+ SyncStmts
+ SyncBlockStmt
+ SyncIfStmt
+ SyncForStmt
+ SyncSwitchStmt
+ SyncRangeStmt
+ SyncCaseClause
+ SyncCommClause
+ SyncSelectStmt
+ SyncDecls
+ SyncLabeledStmt
+ SyncUseObjLocal
+ SyncAddLocal
+ SyncLinkname
+ SyncStmt1
+ SyncStmtsEnd
+ SyncLabel
+ SyncOptLabel
+
+ SyncMultiExpr
+ SyncRType
+ SyncConvRTTI
+)
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
new file mode 100644
index 00000000..582ad56d
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT.
+
+package pkgbits
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SyncEOF-1]
+ _ = x[SyncBool-2]
+ _ = x[SyncInt64-3]
+ _ = x[SyncUint64-4]
+ _ = x[SyncString-5]
+ _ = x[SyncValue-6]
+ _ = x[SyncVal-7]
+ _ = x[SyncRelocs-8]
+ _ = x[SyncReloc-9]
+ _ = x[SyncUseReloc-10]
+ _ = x[SyncPublic-11]
+ _ = x[SyncPos-12]
+ _ = x[SyncPosBase-13]
+ _ = x[SyncObject-14]
+ _ = x[SyncObject1-15]
+ _ = x[SyncPkg-16]
+ _ = x[SyncPkgDef-17]
+ _ = x[SyncMethod-18]
+ _ = x[SyncType-19]
+ _ = x[SyncTypeIdx-20]
+ _ = x[SyncTypeParamNames-21]
+ _ = x[SyncSignature-22]
+ _ = x[SyncParams-23]
+ _ = x[SyncParam-24]
+ _ = x[SyncCodeObj-25]
+ _ = x[SyncSym-26]
+ _ = x[SyncLocalIdent-27]
+ _ = x[SyncSelector-28]
+ _ = x[SyncPrivate-29]
+ _ = x[SyncFuncExt-30]
+ _ = x[SyncVarExt-31]
+ _ = x[SyncTypeExt-32]
+ _ = x[SyncPragma-33]
+ _ = x[SyncExprList-34]
+ _ = x[SyncExprs-35]
+ _ = x[SyncExpr-36]
+ _ = x[SyncExprType-37]
+ _ = x[SyncAssign-38]
+ _ = x[SyncOp-39]
+ _ = x[SyncFuncLit-40]
+ _ = x[SyncCompLit-41]
+ _ = x[SyncDecl-42]
+ _ = x[SyncFuncBody-43]
+ _ = x[SyncOpenScope-44]
+ _ = x[SyncCloseScope-45]
+ _ = x[SyncCloseAnotherScope-46]
+ _ = x[SyncDeclNames-47]
+ _ = x[SyncDeclName-48]
+ _ = x[SyncStmts-49]
+ _ = x[SyncBlockStmt-50]
+ _ = x[SyncIfStmt-51]
+ _ = x[SyncForStmt-52]
+ _ = x[SyncSwitchStmt-53]
+ _ = x[SyncRangeStmt-54]
+ _ = x[SyncCaseClause-55]
+ _ = x[SyncCommClause-56]
+ _ = x[SyncSelectStmt-57]
+ _ = x[SyncDecls-58]
+ _ = x[SyncLabeledStmt-59]
+ _ = x[SyncUseObjLocal-60]
+ _ = x[SyncAddLocal-61]
+ _ = x[SyncLinkname-62]
+ _ = x[SyncStmt1-63]
+ _ = x[SyncStmtsEnd-64]
+ _ = x[SyncLabel-65]
+ _ = x[SyncOptLabel-66]
+ _ = x[SyncMultiExpr-67]
+ _ = x[SyncRType-68]
+ _ = x[SyncConvRTTI-69]
+}
+
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
+
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
+
+func (i SyncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) {
+ return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]]
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/pkgbits/version.go b/operator/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 00000000..53af9df2
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+ // V0: initial prototype.
+ //
+ // All data that is not assigned a Field is in version V0
+ // and has not been deprecated.
+ V0 Version = iota
+
+ // V1: adds the Flags uint32 word
+ V1
+
+ // V2: removes unused legacy fields and supports type parameters for aliases.
+ // - remove the legacy "has init" bool from the public root
+ // - remove obj's "derived func instance" bool
+ // - add a TypeParamNames field to ObjAlias
+ // - remove derived info "needed" bool
+ V2
+
+ numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+ // Flags in a uint32 in the header of a bitstream
+ // that is used to indicate whether optional features are enabled.
+ Flags Field = iota
+
+ // Deprecated: HasInit was a bool indicating whether a package
+ // has any init functions.
+ HasInit
+
+ // Deprecated: DerivedFuncInstance was a bool indicating
+ // whether an object was a function instance.
+ DerivedFuncInstance
+
+ // ObjAlias has a list of TypeParamNames.
+ AliasTypeParamNames
+
+ // Deprecated: DerivedInfoNeeded was a bool indicating
+ // whether a type was a derived type.
+ DerivedInfoNeeded
+
+ numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+ Flags: V1,
+ AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+ HasInit: V2,
+ DerivedFuncInstance: V2,
+ DerivedInfoNeeded: V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+ return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/stdlib/deps.go b/operator/vendor/golang.org/x/tools/internal/stdlib/deps.go
new file mode 100644
index 00000000..96ad6c58
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -0,0 +1,365 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+type pkginfo struct {
+ name string
+ deps string // list of indices of dependencies, as varint-encoded deltas
+}
+
+var deps = [...]pkginfo{
+ {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"},
+ {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"},
+ {"bufio", "\x03k\x83\x01D\x14"},
+ {"bytes", "n*Y\x03\fG\x02\x02"},
+ {"cmp", ""},
+ {"compress/bzip2", "\x02\x02\xed\x01A"},
+ {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"},
+ {"compress/gzip", "\x02\x04a\a\x03\x14lT"},
+ {"compress/lzw", "\x02l\x03\x80\x01"},
+ {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"},
+ {"container/heap", "\xb3\x02"},
+ {"container/list", ""},
+ {"container/ring", ""},
+ {"context", "n\\m\x01\r"},
+ {"crypto", "\x83\x01nC"},
+ {"crypto/aes", "\x10\n\a\x93\x02"},
+ {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"},
+ {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"},
+ {"crypto/dsa", "A\x04)\x83\x01\r"},
+ {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"},
+ {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"},
+ {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"},
+ {"crypto/elliptic", "0>\x83\x01\r9"},
+ {"crypto/fips140", " \x05"},
+ {"crypto/hkdf", "-\x13\x01-\x15"},
+ {"crypto/hmac", "\x1a\x14\x12\x01\x111"},
+ {"crypto/internal/boring", "\x0e\x02\rf"},
+ {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"},
+ {"crypto/internal/boring/bcache", "\xb8\x02\x13"},
+ {"crypto/internal/boring/sig", ""},
+ {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"},
+ {"crypto/internal/entropy", "F"},
+ {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"},
+ {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"},
+ {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"},
+ {"crypto/internal/fips140/alias", "\xcb\x02"},
+ {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"},
+ {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"},
+ {"crypto/internal/fips140/check/checktest", "%\x85\x02!"},
+ {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"},
+ {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"},
+ {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"},
+ {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"},
+ {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"},
+ {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"},
+ {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"},
+ {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"},
+ {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"},
+ {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"},
+ {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"},
+ {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"},
+ {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"},
+ {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"},
+ {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"},
+ {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"},
+ {"crypto/internal/fips140/ssh", "%^"},
+ {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"},
+ {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"},
+ {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"},
+ {"crypto/internal/fips140cache", "\xaa\x02\r&"},
+ {"crypto/internal/fips140deps", ""},
+ {"crypto/internal/fips140deps/byteorder", "\x99\x01"},
+ {"crypto/internal/fips140deps/cpu", "\xae\x01\a"},
+ {"crypto/internal/fips140deps/godebug", "\xb6\x01"},
+ {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"},
+ {"crypto/internal/fips140only", "'\r\x01\x01M3;"},
+ {"crypto/internal/fips140test", ""},
+ {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"},
+ {"crypto/internal/impl", "\xb5\x02"},
+ {"crypto/internal/randutil", "\xf1\x01\x12"},
+ {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"},
+ {"crypto/internal/sysrand/internal/seccomp", "n"},
+ {"crypto/md5", "\x0e3-\x15\x16g"},
+ {"crypto/mlkem", "/"},
+ {"crypto/pbkdf2", "2\x0e\x01-\x15"},
+ {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"},
+ {"crypto/rc4", "#\x1e-\xc6\x01"},
+ {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"},
+ {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"},
+ {"crypto/sha256", "\x0e\f\x1aO"},
+ {"crypto/sha3", "\x0e'N\xc8\x01"},
+ {"crypto/sha512", "\x0e\f\x1cM"},
+ {"crypto/subtle", "8\x9b\x01W"},
+ {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"},
+ {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"},
+ {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"},
+ {"crypto/x509/pkix", "d\x06\a\x8d\x01G"},
+ {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"},
+ {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"},
+ {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"},
+ {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"},
+ {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"},
+ {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"},
+ {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"},
+ {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"},
+ {"debug/plan9obj", "g\a\x03e\x1b,"},
+ {"embed", "n*@\x19\x01S"},
+ {"embed/internal/embedtest", ""},
+ {"encoding", ""},
+ {"encoding/ascii85", "\xf1\x01C"},
+ {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"},
+ {"encoding/base32", "\xf1\x01A\x02"},
+ {"encoding/base64", "\x99\x01XA\x02"},
+ {"encoding/binary", "n\x83\x01\f(\r\x05"},
+ {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"},
+ {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"},
+ {"encoding/hex", "n\x03\x80\x01A\x03"},
+ {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"},
+ {"encoding/pem", "\x03c\b\x83\x01A\x03"},
+ {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"},
+ {"errors", "\xca\x01\x81\x01"},
+ {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"},
+ {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"},
+ {"fmt", "nE>\f \b\r\x02\x03\x12"},
+ {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"},
+ {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"},
+ {"go/build/constraint", "n\xc6\x01\x01\x12\x02"},
+ {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"},
+ {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"},
+ {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"},
+ {"go/format", "\x03n\x01\v\x01\x02qD"},
+ {"go/importer", "s\a\x01\x01\x04\x01p9"},
+ {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"},
+ {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"},
+ {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"},
+ {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"},
+ {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"},
+ {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"},
+ {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"},
+ {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"},
+ {"go/version", "\xbb\x01z"},
+ {"hash", "\xf1\x01"},
+ {"hash/adler32", "n\x15\x16"},
+ {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"},
+ {"hash/crc64", "n\x15\x16\x9e\x01"},
+ {"hash/fnv", "n\x15\x16g"},
+ {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"},
+ {"html", "\xb5\x02\x02\x12"},
+ {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"},
+ {"image", "\x02l\x1ee\x0f4\x03\x01"},
+ {"image/color", ""},
+ {"image/color/palette", "\x8c\x01"},
+ {"image/draw", "\x8b\x01\x01\x04"},
+ {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"},
+ {"image/internal/imageutil", "\x8b\x01"},
+ {"image/jpeg", "\x02l\x1d\x01\x04a"},
+ {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"},
+ {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"},
+ {"internal/abi", "\xb5\x01\x96\x01"},
+ {"internal/asan", "\xcb\x02"},
+ {"internal/bisect", "\xaa\x02\r\x01"},
+ {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"},
+ {"internal/bytealg", "\xae\x01\x9d\x01"},
+ {"internal/byteorder", ""},
+ {"internal/cfg", ""},
+ {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"},
+ {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"},
+ {"internal/copyright", ""},
+ {"internal/coverage", ""},
+ {"internal/coverage/calloc", ""},
+ {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"},
+ {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"},
+ {"internal/coverage/cmerge", "q-_"},
+ {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"},
+ {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"},
+ {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"},
+ {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."},
+ {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"},
+ {"internal/coverage/rtcov", "\xcb\x02"},
+ {"internal/coverage/slicereader", "g\n\x80\x01Z"},
+ {"internal/coverage/slicewriter", "q\x80\x01"},
+ {"internal/coverage/stringtab", "q8\x04D"},
+ {"internal/coverage/test", ""},
+ {"internal/coverage/uleb128", ""},
+ {"internal/cpu", "\xcb\x02"},
+ {"internal/dag", "\x04m\xc1\x01\x03"},
+ {"internal/diff", "\x03n\xc2\x01\x02"},
+ {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"},
+ {"internal/filepathlite", "n*@\x1a@"},
+ {"internal/fmtsort", "\x04\xa1\x02\r"},
+ {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"},
+ {"internal/goarch", ""},
+ {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"},
+ {"internal/godebugs", ""},
+ {"internal/goexperiment", ""},
+ {"internal/goos", ""},
+ {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"},
+ {"internal/gover", "\x04"},
+ {"internal/goversion", ""},
+ {"internal/itoa", ""},
+ {"internal/lazyregexp", "\x9d\x02\v\r\x02"},
+ {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"},
+ {"internal/msan", "\xcb\x02"},
+ {"internal/nettrace", ""},
+ {"internal/obscuretestdata", "f\x8b\x01,"},
+ {"internal/oserror", "n"},
+ {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"},
+ {"internal/platform", ""},
+ {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"},
+ {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"},
+ {"internal/profilerecord", ""},
+ {"internal/race", "\x94\x01\xb7\x01"},
+ {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"},
+ {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"},
+ {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"},
+ {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"},
+ {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"},
+ {"net/http/httptrace", "\rFnF\x14\n "},
+ {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"},
+ {"net/http/internal", "\x02\x01k\x03\x80\x01"},
+ {"net/http/internal/ascii", "\xb5\x02\x12"},
+ {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"},
+ {"net/http/internal/testcert", "\xb5\x02"},
+ {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"},
+ {"net/internal/cgotest", ""},
+ {"net/internal/socktest", "q\xc6\x01\x02"},
+ {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"},
+ {"net/netip", "\x04j*\x01$@\x034\x16"},
+ {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"},
+ {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"},
+ {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"},
+ {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"},
+ {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"},
+ {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"},
+ {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"},
+ {"os/exec/internal/fdtest", "\xb9\x02"},
+ {"os/signal", "\r\x90\x02\x15\x05\x02"},
+ {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"},
+ {"path", "n*\xb1\x01"},
+ {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"},
+ {"plugin", "n"},
+ {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"},
+ {"reflect/internal/example1", ""},
+ {"reflect/internal/example2", ""},
+ {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"},
+ {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"},
+ {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"},
+ {"runtime/coverage", "\xa0\x01Q"},
+ {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"},
+ {"runtime/metrics", "\xb7\x01F-!"},
+ {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"},
+ {"runtime/race", "\xb0\x02"},
+ {"runtime/race/internal/amd64v1", ""},
+ {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"},
+ {"slices", "\x04\xf0\x01\fK"},
+ {"sort", "\xca\x0162"},
+ {"strconv", "n*@%\x03I"},
+ {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"},
+ {"structs", ""},
+ {"sync", "\xc9\x01\x10\x01P\x0e\x13"},
+ {"sync/atomic", "\xcb\x02"},
+ {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"},
+ {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"},
+ {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"},
+ {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"},
+ {"testing/iotest", "\x03k\x03\x80\x01\x04"},
+ {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"},
+ {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"},
+ {"testing/synctest", "\xda\x01`\x11"},
+ {"text/scanner", "\x03n\x80\x01,*\x02"},
+ {"text/tabwriter", "q\x80\x01X"},
+ {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"},
+ {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"},
+ {"time", "n*\x1e\"(*\r\x02\x12"},
+ {"time/tzdata", "n\xcb\x01\x12"},
+ {"unicode", ""},
+ {"unicode/utf16", ""},
+ {"unicode/utf8", ""},
+ {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"},
+ {"unsafe", ""},
+ {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"},
+ {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"},
+ {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"},
+ {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
+ {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"},
+ {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"},
+ {"vendor/golang.org/x/net/dns/dnsmessage", "n"},
+ {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"},
+ {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"},
+ {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"},
+ {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"},
+ {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"},
+ {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"},
+ {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"},
+ {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"},
+ {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"},
+ {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"},
+ {"weak", "\x94\x01\x96\x01!"},
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/stdlib/import.go b/operator/vendor/golang.org/x/tools/internal/stdlib/import.go
new file mode 100644
index 00000000..f6909878
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/stdlib/import.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stdlib
+
+// This file provides the API for the import graph of the standard library.
+//
+// Be aware that the compiler-generated code for every package
+// implicitly depends on package "runtime" and a handful of others
+// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
+
+import (
+ "encoding/binary"
+ "iter"
+ "slices"
+ "strings"
+)
+
+// Imports returns the sequence of packages directly imported by the
+// named standard packages, in name order.
+// The imports of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Imports(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !yield(deps[depIndex].name) {
+ return
+ }
+ data = data[n:]
+ }
+ }
+ }
+ }
+}
+
+// Dependencies returns the set of all dependencies of the named
+// standard packages, including the initial package,
+// in a deterministic topological order.
+// The dependencies of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Dependencies(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var seen [1 + len(deps)/8]byte // bit set of seen packages
+ var visit func(i int) bool
+ visit = func(i int) bool {
+ bit := byte(1) << (i % 8)
+ if seen[i/8]&bit == 0 {
+ seen[i/8] |= bit
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !visit(int(depIndex)) {
+ return false
+ }
+ data = data[n:]
+ }
+ if !yield(deps[i].name) {
+ return false
+ }
+ }
+ return true
+ }
+ if !visit(i) {
+ return
+ }
+ }
+ }
+ }
+}
+
+// find returns the index of pkg in the deps table.
+func find(pkg string) (int, bool) {
+ return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
+ return strings.Compare(p.name, n)
+ })
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/operator/vendor/golang.org/x/tools/internal/stdlib/manifest.go
new file mode 100644
index 00000000..c1faa50d
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -0,0 +1,17726 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+var PackageSymbols = map[string][]Symbol{
+ "archive/tar": {
+ {"(*Header).FileInfo", Method, 1, ""},
+ {"(*Reader).Next", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Writer).AddFS", Method, 22, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(*Writer).WriteHeader", Method, 0, ""},
+ {"(Format).String", Method, 10, ""},
+ {"ErrFieldTooLong", Var, 0, ""},
+ {"ErrHeader", Var, 0, ""},
+ {"ErrInsecurePath", Var, 20, ""},
+ {"ErrWriteAfterClose", Var, 0, ""},
+ {"ErrWriteTooLong", Var, 0, ""},
+ {"FileInfoHeader", Func, 1, "func(fi fs.FileInfo, link string) (*Header, error)"},
+ {"FileInfoNames", Type, 23, ""},
+ {"Format", Type, 10, ""},
+ {"FormatGNU", Const, 10, ""},
+ {"FormatPAX", Const, 10, ""},
+ {"FormatUSTAR", Const, 10, ""},
+ {"FormatUnknown", Const, 10, ""},
+ {"Header", Type, 0, ""},
+ {"Header.AccessTime", Field, 0, ""},
+ {"Header.ChangeTime", Field, 0, ""},
+ {"Header.Devmajor", Field, 0, ""},
+ {"Header.Devminor", Field, 0, ""},
+ {"Header.Format", Field, 10, ""},
+ {"Header.Gid", Field, 0, ""},
+ {"Header.Gname", Field, 0, ""},
+ {"Header.Linkname", Field, 0, ""},
+ {"Header.ModTime", Field, 0, ""},
+ {"Header.Mode", Field, 0, ""},
+ {"Header.Name", Field, 0, ""},
+ {"Header.PAXRecords", Field, 10, ""},
+ {"Header.Size", Field, 0, ""},
+ {"Header.Typeflag", Field, 0, ""},
+ {"Header.Uid", Field, 0, ""},
+ {"Header.Uname", Field, 0, ""},
+ {"Header.Xattrs", Field, 3, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) *Reader"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"Reader", Type, 0, ""},
+ {"TypeBlock", Const, 0, ""},
+ {"TypeChar", Const, 0, ""},
+ {"TypeCont", Const, 0, ""},
+ {"TypeDir", Const, 0, ""},
+ {"TypeFifo", Const, 0, ""},
+ {"TypeGNULongLink", Const, 1, ""},
+ {"TypeGNULongName", Const, 1, ""},
+ {"TypeGNUSparse", Const, 3, ""},
+ {"TypeLink", Const, 0, ""},
+ {"TypeReg", Const, 0, ""},
+ {"TypeRegA", Const, 0, ""},
+ {"TypeSymlink", Const, 0, ""},
+ {"TypeXGlobalHeader", Const, 0, ""},
+ {"TypeXHeader", Const, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "archive/zip": {
+ {"(*File).DataOffset", Method, 2, ""},
+ {"(*File).FileInfo", Method, 0, ""},
+ {"(*File).ModTime", Method, 0, ""},
+ {"(*File).Mode", Method, 0, ""},
+ {"(*File).Open", Method, 0, ""},
+ {"(*File).OpenRaw", Method, 17, ""},
+ {"(*File).SetModTime", Method, 0, ""},
+ {"(*File).SetMode", Method, 0, ""},
+ {"(*FileHeader).FileInfo", Method, 0, ""},
+ {"(*FileHeader).ModTime", Method, 0, ""},
+ {"(*FileHeader).Mode", Method, 0, ""},
+ {"(*FileHeader).SetModTime", Method, 0, ""},
+ {"(*FileHeader).SetMode", Method, 0, ""},
+ {"(*ReadCloser).Close", Method, 0, ""},
+ {"(*ReadCloser).Open", Method, 16, ""},
+ {"(*ReadCloser).RegisterDecompressor", Method, 6, ""},
+ {"(*Reader).Open", Method, 16, ""},
+ {"(*Reader).RegisterDecompressor", Method, 6, ""},
+ {"(*Writer).AddFS", Method, 22, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Copy", Method, 17, ""},
+ {"(*Writer).Create", Method, 0, ""},
+ {"(*Writer).CreateHeader", Method, 0, ""},
+ {"(*Writer).CreateRaw", Method, 17, ""},
+ {"(*Writer).Flush", Method, 4, ""},
+ {"(*Writer).RegisterCompressor", Method, 6, ""},
+ {"(*Writer).SetComment", Method, 10, ""},
+ {"(*Writer).SetOffset", Method, 5, ""},
+ {"Compressor", Type, 2, ""},
+ {"Decompressor", Type, 2, ""},
+ {"Deflate", Const, 0, ""},
+ {"ErrAlgorithm", Var, 0, ""},
+ {"ErrChecksum", Var, 0, ""},
+ {"ErrFormat", Var, 0, ""},
+ {"ErrInsecurePath", Var, 20, ""},
+ {"File", Type, 0, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.CRC32", Field, 0, ""},
+ {"FileHeader.Comment", Field, 0, ""},
+ {"FileHeader.CompressedSize", Field, 0, ""},
+ {"FileHeader.CompressedSize64", Field, 1, ""},
+ {"FileHeader.CreatorVersion", Field, 0, ""},
+ {"FileHeader.ExternalAttrs", Field, 0, ""},
+ {"FileHeader.Extra", Field, 0, ""},
+ {"FileHeader.Flags", Field, 0, ""},
+ {"FileHeader.Method", Field, 0, ""},
+ {"FileHeader.Modified", Field, 10, ""},
+ {"FileHeader.ModifiedDate", Field, 0, ""},
+ {"FileHeader.ModifiedTime", Field, 0, ""},
+ {"FileHeader.Name", Field, 0, ""},
+ {"FileHeader.NonUTF8", Field, 10, ""},
+ {"FileHeader.ReaderVersion", Field, 0, ""},
+ {"FileHeader.UncompressedSize", Field, 0, ""},
+ {"FileHeader.UncompressedSize64", Field, 1, ""},
+ {"FileInfoHeader", Func, 0, "func(fi fs.FileInfo) (*FileHeader, error)"},
+ {"NewReader", Func, 0, "func(r io.ReaderAt, size int64) (*Reader, error)"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"OpenReader", Func, 0, "func(name string) (*ReadCloser, error)"},
+ {"ReadCloser", Type, 0, ""},
+ {"ReadCloser.Reader", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.Comment", Field, 0, ""},
+ {"Reader.File", Field, 0, ""},
+ {"RegisterCompressor", Func, 2, "func(method uint16, comp Compressor)"},
+ {"RegisterDecompressor", Func, 2, "func(method uint16, dcomp Decompressor)"},
+ {"Store", Const, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "bufio": {
+ {"(*Reader).Buffered", Method, 0, ""},
+ {"(*Reader).Discard", Method, 5, ""},
+ {"(*Reader).Peek", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadByte", Method, 0, ""},
+ {"(*Reader).ReadBytes", Method, 0, ""},
+ {"(*Reader).ReadLine", Method, 0, ""},
+ {"(*Reader).ReadRune", Method, 0, ""},
+ {"(*Reader).ReadSlice", Method, 0, ""},
+ {"(*Reader).ReadString", Method, 0, ""},
+ {"(*Reader).Reset", Method, 2, ""},
+ {"(*Reader).Size", Method, 10, ""},
+ {"(*Reader).UnreadByte", Method, 0, ""},
+ {"(*Reader).UnreadRune", Method, 0, ""},
+ {"(*Reader).WriteTo", Method, 1, ""},
+ {"(*Scanner).Buffer", Method, 6, ""},
+ {"(*Scanner).Bytes", Method, 1, ""},
+ {"(*Scanner).Err", Method, 1, ""},
+ {"(*Scanner).Scan", Method, 1, ""},
+ {"(*Scanner).Split", Method, 1, ""},
+ {"(*Scanner).Text", Method, 1, ""},
+ {"(*Writer).Available", Method, 0, ""},
+ {"(*Writer).AvailableBuffer", Method, 18, ""},
+ {"(*Writer).Buffered", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).ReadFrom", Method, 1, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Size", Method, 10, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(*Writer).WriteByte", Method, 0, ""},
+ {"(*Writer).WriteRune", Method, 0, ""},
+ {"(*Writer).WriteString", Method, 0, ""},
+ {"(ReadWriter).Available", Method, 0, ""},
+ {"(ReadWriter).AvailableBuffer", Method, 18, ""},
+ {"(ReadWriter).Discard", Method, 5, ""},
+ {"(ReadWriter).Flush", Method, 0, ""},
+ {"(ReadWriter).Peek", Method, 0, ""},
+ {"(ReadWriter).Read", Method, 0, ""},
+ {"(ReadWriter).ReadByte", Method, 0, ""},
+ {"(ReadWriter).ReadBytes", Method, 0, ""},
+ {"(ReadWriter).ReadFrom", Method, 1, ""},
+ {"(ReadWriter).ReadLine", Method, 0, ""},
+ {"(ReadWriter).ReadRune", Method, 0, ""},
+ {"(ReadWriter).ReadSlice", Method, 0, ""},
+ {"(ReadWriter).ReadString", Method, 0, ""},
+ {"(ReadWriter).UnreadByte", Method, 0, ""},
+ {"(ReadWriter).UnreadRune", Method, 0, ""},
+ {"(ReadWriter).Write", Method, 0, ""},
+ {"(ReadWriter).WriteByte", Method, 0, ""},
+ {"(ReadWriter).WriteRune", Method, 0, ""},
+ {"(ReadWriter).WriteString", Method, 0, ""},
+ {"(ReadWriter).WriteTo", Method, 1, ""},
+ {"ErrAdvanceTooFar", Var, 1, ""},
+ {"ErrBadReadCount", Var, 15, ""},
+ {"ErrBufferFull", Var, 0, ""},
+ {"ErrFinalToken", Var, 6, ""},
+ {"ErrInvalidUnreadByte", Var, 0, ""},
+ {"ErrInvalidUnreadRune", Var, 0, ""},
+ {"ErrNegativeAdvance", Var, 1, ""},
+ {"ErrNegativeCount", Var, 0, ""},
+ {"ErrTooLong", Var, 1, ""},
+ {"MaxScanTokenSize", Const, 1, ""},
+ {"NewReadWriter", Func, 0, "func(r *Reader, w *Writer) *ReadWriter"},
+ {"NewReader", Func, 0, "func(rd io.Reader) *Reader"},
+ {"NewReaderSize", Func, 0, "func(rd io.Reader, size int) *Reader"},
+ {"NewScanner", Func, 1, "func(r io.Reader) *Scanner"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"NewWriterSize", Func, 0, "func(w io.Writer, size int) *Writer"},
+ {"ReadWriter", Type, 0, ""},
+ {"ReadWriter.Reader", Field, 0, ""},
+ {"ReadWriter.Writer", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"ScanBytes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"ScanLines", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"ScanRunes", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"ScanWords", Func, 1, "func(data []byte, atEOF bool) (advance int, token []byte, err error)"},
+ {"Scanner", Type, 1, ""},
+ {"SplitFunc", Type, 1, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "bytes": {
+ {"(*Buffer).Available", Method, 21, ""},
+ {"(*Buffer).AvailableBuffer", Method, 21, ""},
+ {"(*Buffer).Bytes", Method, 0, ""},
+ {"(*Buffer).Cap", Method, 5, ""},
+ {"(*Buffer).Grow", Method, 1, ""},
+ {"(*Buffer).Len", Method, 0, ""},
+ {"(*Buffer).Next", Method, 0, ""},
+ {"(*Buffer).Read", Method, 0, ""},
+ {"(*Buffer).ReadByte", Method, 0, ""},
+ {"(*Buffer).ReadBytes", Method, 0, ""},
+ {"(*Buffer).ReadFrom", Method, 0, ""},
+ {"(*Buffer).ReadRune", Method, 0, ""},
+ {"(*Buffer).ReadString", Method, 0, ""},
+ {"(*Buffer).Reset", Method, 0, ""},
+ {"(*Buffer).String", Method, 0, ""},
+ {"(*Buffer).Truncate", Method, 0, ""},
+ {"(*Buffer).UnreadByte", Method, 0, ""},
+ {"(*Buffer).UnreadRune", Method, 0, ""},
+ {"(*Buffer).Write", Method, 0, ""},
+ {"(*Buffer).WriteByte", Method, 0, ""},
+ {"(*Buffer).WriteRune", Method, 0, ""},
+ {"(*Buffer).WriteString", Method, 0, ""},
+ {"(*Buffer).WriteTo", Method, 0, ""},
+ {"(*Reader).Len", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadAt", Method, 0, ""},
+ {"(*Reader).ReadByte", Method, 0, ""},
+ {"(*Reader).ReadRune", Method, 0, ""},
+ {"(*Reader).Reset", Method, 7, ""},
+ {"(*Reader).Seek", Method, 0, ""},
+ {"(*Reader).Size", Method, 5, ""},
+ {"(*Reader).UnreadByte", Method, 0, ""},
+ {"(*Reader).UnreadRune", Method, 0, ""},
+ {"(*Reader).WriteTo", Method, 1, ""},
+ {"Buffer", Type, 0, ""},
+ {"Clone", Func, 20, "func(b []byte) []byte"},
+ {"Compare", Func, 0, "func(a []byte, b []byte) int"},
+ {"Contains", Func, 0, "func(b []byte, subslice []byte) bool"},
+ {"ContainsAny", Func, 7, "func(b []byte, chars string) bool"},
+ {"ContainsFunc", Func, 21, "func(b []byte, f func(rune) bool) bool"},
+ {"ContainsRune", Func, 7, "func(b []byte, r rune) bool"},
+ {"Count", Func, 0, "func(s []byte, sep []byte) int"},
+ {"Cut", Func, 18, "func(s []byte, sep []byte) (before []byte, after []byte, found bool)"},
+ {"CutPrefix", Func, 20, "func(s []byte, prefix []byte) (after []byte, found bool)"},
+ {"CutSuffix", Func, 20, "func(s []byte, suffix []byte) (before []byte, found bool)"},
+ {"Equal", Func, 0, "func(a []byte, b []byte) bool"},
+ {"EqualFold", Func, 0, "func(s []byte, t []byte) bool"},
+ {"ErrTooLarge", Var, 0, ""},
+ {"Fields", Func, 0, "func(s []byte) [][]byte"},
+ {"FieldsFunc", Func, 0, "func(s []byte, f func(rune) bool) [][]byte"},
+ {"FieldsFuncSeq", Func, 24, "func(s []byte, f func(rune) bool) iter.Seq[[]byte]"},
+ {"FieldsSeq", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
+ {"HasPrefix", Func, 0, "func(s []byte, prefix []byte) bool"},
+ {"HasSuffix", Func, 0, "func(s []byte, suffix []byte) bool"},
+ {"Index", Func, 0, "func(s []byte, sep []byte) int"},
+ {"IndexAny", Func, 0, "func(s []byte, chars string) int"},
+ {"IndexByte", Func, 0, "func(b []byte, c byte) int"},
+ {"IndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
+ {"IndexRune", Func, 0, "func(s []byte, r rune) int"},
+ {"Join", Func, 0, "func(s [][]byte, sep []byte) []byte"},
+ {"LastIndex", Func, 0, "func(s []byte, sep []byte) int"},
+ {"LastIndexAny", Func, 0, "func(s []byte, chars string) int"},
+ {"LastIndexByte", Func, 5, "func(s []byte, c byte) int"},
+ {"LastIndexFunc", Func, 0, "func(s []byte, f func(r rune) bool) int"},
+ {"Lines", Func, 24, "func(s []byte) iter.Seq[[]byte]"},
+ {"Map", Func, 0, "func(mapping func(r rune) rune, s []byte) []byte"},
+ {"MinRead", Const, 0, ""},
+ {"NewBuffer", Func, 0, "func(buf []byte) *Buffer"},
+ {"NewBufferString", Func, 0, "func(s string) *Buffer"},
+ {"NewReader", Func, 0, "func(b []byte) *Reader"},
+ {"Reader", Type, 0, ""},
+ {"Repeat", Func, 0, "func(b []byte, count int) []byte"},
+ {"Replace", Func, 0, "func(s []byte, old []byte, new []byte, n int) []byte"},
+ {"ReplaceAll", Func, 12, "func(s []byte, old []byte, new []byte) []byte"},
+ {"Runes", Func, 0, "func(s []byte) []rune"},
+ {"Split", Func, 0, "func(s []byte, sep []byte) [][]byte"},
+ {"SplitAfter", Func, 0, "func(s []byte, sep []byte) [][]byte"},
+ {"SplitAfterN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
+ {"SplitAfterSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
+ {"SplitN", Func, 0, "func(s []byte, sep []byte, n int) [][]byte"},
+ {"SplitSeq", Func, 24, "func(s []byte, sep []byte) iter.Seq[[]byte]"},
+ {"Title", Func, 0, "func(s []byte) []byte"},
+ {"ToLower", Func, 0, "func(s []byte) []byte"},
+ {"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+ {"ToTitle", Func, 0, "func(s []byte) []byte"},
+ {"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+ {"ToUpper", Func, 0, "func(s []byte) []byte"},
+ {"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s []byte) []byte"},
+ {"ToValidUTF8", Func, 13, "func(s []byte, replacement []byte) []byte"},
+ {"Trim", Func, 0, "func(s []byte, cutset string) []byte"},
+ {"TrimFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+ {"TrimLeft", Func, 0, "func(s []byte, cutset string) []byte"},
+ {"TrimLeftFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+ {"TrimPrefix", Func, 1, "func(s []byte, prefix []byte) []byte"},
+ {"TrimRight", Func, 0, "func(s []byte, cutset string) []byte"},
+ {"TrimRightFunc", Func, 0, "func(s []byte, f func(r rune) bool) []byte"},
+ {"TrimSpace", Func, 0, "func(s []byte) []byte"},
+ {"TrimSuffix", Func, 1, "func(s []byte, suffix []byte) []byte"},
+ },
+ "cmp": {
+ {"Compare", Func, 21, "func[T Ordered](x T, y T) int"},
+ {"Less", Func, 21, "func[T Ordered](x T, y T) bool"},
+ {"Or", Func, 22, "func[T comparable](vals ...T) T"},
+ {"Ordered", Type, 21, ""},
+ },
+ "compress/bzip2": {
+ {"(StructuralError).Error", Method, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"StructuralError", Type, 0, ""},
+ },
+ "compress/flate": {
+ {"(*ReadError).Error", Method, 0, ""},
+ {"(*WriteError).Error", Method, 0, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"(InternalError).Error", Method, 0, ""},
+ {"BestCompression", Const, 0, ""},
+ {"BestSpeed", Const, 0, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"DefaultCompression", Const, 0, ""},
+ {"HuffmanOnly", Const, 7, ""},
+ {"InternalError", Type, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) io.ReadCloser"},
+ {"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) io.ReadCloser"},
+ {"NewWriter", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+ {"NewWriterDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
+ {"NoCompression", Const, 0, ""},
+ {"ReadError", Type, 0, ""},
+ {"ReadError.Err", Field, 0, ""},
+ {"ReadError.Offset", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Resetter", Type, 4, ""},
+ {"WriteError", Type, 0, ""},
+ {"WriteError.Err", Field, 0, ""},
+ {"WriteError.Offset", Field, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "compress/gzip": {
+ {"(*Reader).Close", Method, 0, ""},
+ {"(*Reader).Multistream", Method, 4, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).Reset", Method, 3, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 1, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"BestCompression", Const, 0, ""},
+ {"BestSpeed", Const, 0, ""},
+ {"DefaultCompression", Const, 0, ""},
+ {"ErrChecksum", Var, 0, ""},
+ {"ErrHeader", Var, 0, ""},
+ {"Header", Type, 0, ""},
+ {"Header.Comment", Field, 0, ""},
+ {"Header.Extra", Field, 0, ""},
+ {"Header.ModTime", Field, 0, ""},
+ {"Header.Name", Field, 0, ""},
+ {"Header.OS", Field, 0, ""},
+ {"HuffmanOnly", Const, 8, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) (*Reader, error)"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+ {"NoCompression", Const, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.Header", Field, 0, ""},
+ {"Writer", Type, 0, ""},
+ {"Writer.Header", Field, 0, ""},
+ },
+ "compress/lzw": {
+ {"(*Reader).Close", Method, 17, ""},
+ {"(*Reader).Read", Method, 17, ""},
+ {"(*Reader).Reset", Method, 17, ""},
+ {"(*Writer).Close", Method, 17, ""},
+ {"(*Writer).Reset", Method, 17, ""},
+ {"(*Writer).Write", Method, 17, ""},
+ {"LSB", Const, 0, ""},
+ {"MSB", Const, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader, order Order, litWidth int) io.ReadCloser"},
+ {"NewWriter", Func, 0, "func(w io.Writer, order Order, litWidth int) io.WriteCloser"},
+ {"Order", Type, 0, ""},
+ {"Reader", Type, 17, ""},
+ {"Writer", Type, 17, ""},
+ },
+ "compress/zlib": {
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Reset", Method, 2, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"BestCompression", Const, 0, ""},
+ {"BestSpeed", Const, 0, ""},
+ {"DefaultCompression", Const, 0, ""},
+ {"ErrChecksum", Var, 0, ""},
+ {"ErrDictionary", Var, 0, ""},
+ {"ErrHeader", Var, 0, ""},
+ {"HuffmanOnly", Const, 8, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) (io.ReadCloser, error)"},
+ {"NewReaderDict", Func, 0, "func(r io.Reader, dict []byte) (io.ReadCloser, error)"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"NewWriterLevel", Func, 0, "func(w io.Writer, level int) (*Writer, error)"},
+ {"NewWriterLevelDict", Func, 0, "func(w io.Writer, level int, dict []byte) (*Writer, error)"},
+ {"NoCompression", Const, 0, ""},
+ {"Resetter", Type, 4, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "container/heap": {
+ {"Fix", Func, 2, "func(h Interface, i int)"},
+ {"Init", Func, 0, "func(h Interface)"},
+ {"Interface", Type, 0, ""},
+ {"Pop", Func, 0, "func(h Interface) any"},
+ {"Push", Func, 0, "func(h Interface, x any)"},
+ {"Remove", Func, 0, "func(h Interface, i int) any"},
+ },
+ "container/list": {
+ {"(*Element).Next", Method, 0, ""},
+ {"(*Element).Prev", Method, 0, ""},
+ {"(*List).Back", Method, 0, ""},
+ {"(*List).Front", Method, 0, ""},
+ {"(*List).Init", Method, 0, ""},
+ {"(*List).InsertAfter", Method, 0, ""},
+ {"(*List).InsertBefore", Method, 0, ""},
+ {"(*List).Len", Method, 0, ""},
+ {"(*List).MoveAfter", Method, 2, ""},
+ {"(*List).MoveBefore", Method, 2, ""},
+ {"(*List).MoveToBack", Method, 0, ""},
+ {"(*List).MoveToFront", Method, 0, ""},
+ {"(*List).PushBack", Method, 0, ""},
+ {"(*List).PushBackList", Method, 0, ""},
+ {"(*List).PushFront", Method, 0, ""},
+ {"(*List).PushFrontList", Method, 0, ""},
+ {"(*List).Remove", Method, 0, ""},
+ {"Element", Type, 0, ""},
+ {"Element.Value", Field, 0, ""},
+ {"List", Type, 0, ""},
+ {"New", Func, 0, "func() *List"},
+ },
+ "container/ring": {
+ {"(*Ring).Do", Method, 0, ""},
+ {"(*Ring).Len", Method, 0, ""},
+ {"(*Ring).Link", Method, 0, ""},
+ {"(*Ring).Move", Method, 0, ""},
+ {"(*Ring).Next", Method, 0, ""},
+ {"(*Ring).Prev", Method, 0, ""},
+ {"(*Ring).Unlink", Method, 0, ""},
+ {"New", Func, 0, "func(n int) *Ring"},
+ {"Ring", Type, 0, ""},
+ {"Ring.Value", Field, 0, ""},
+ },
+ "context": {
+ {"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"},
+ {"Background", Func, 7, "func() Context"},
+ {"CancelCauseFunc", Type, 20, ""},
+ {"CancelFunc", Type, 7, ""},
+ {"Canceled", Var, 7, ""},
+ {"Cause", Func, 20, "func(c Context) error"},
+ {"Context", Type, 7, ""},
+ {"DeadlineExceeded", Var, 7, ""},
+ {"TODO", Func, 7, "func() Context"},
+ {"WithCancel", Func, 7, "func(parent Context) (ctx Context, cancel CancelFunc)"},
+ {"WithCancelCause", Func, 20, "func(parent Context) (ctx Context, cancel CancelCauseFunc)"},
+ {"WithDeadline", Func, 7, "func(parent Context, d time.Time) (Context, CancelFunc)"},
+ {"WithDeadlineCause", Func, 21, "func(parent Context, d time.Time, cause error) (Context, CancelFunc)"},
+ {"WithTimeout", Func, 7, "func(parent Context, timeout time.Duration) (Context, CancelFunc)"},
+ {"WithTimeoutCause", Func, 21, "func(parent Context, timeout time.Duration, cause error) (Context, CancelFunc)"},
+ {"WithValue", Func, 7, "func(parent Context, key any, val any) Context"},
+ {"WithoutCancel", Func, 21, "func(parent Context) Context"},
+ },
+ "crypto": {
+ {"(Hash).Available", Method, 0, ""},
+ {"(Hash).HashFunc", Method, 4, ""},
+ {"(Hash).New", Method, 0, ""},
+ {"(Hash).Size", Method, 0, ""},
+ {"(Hash).String", Method, 15, ""},
+ {"BLAKE2b_256", Const, 9, ""},
+ {"BLAKE2b_384", Const, 9, ""},
+ {"BLAKE2b_512", Const, 9, ""},
+ {"BLAKE2s_256", Const, 9, ""},
+ {"Decrypter", Type, 5, ""},
+ {"DecrypterOpts", Type, 5, ""},
+ {"Hash", Type, 0, ""},
+ {"MD4", Const, 0, ""},
+ {"MD5", Const, 0, ""},
+ {"MD5SHA1", Const, 0, ""},
+ {"MessageSigner", Type, 25, ""},
+ {"PrivateKey", Type, 0, ""},
+ {"PublicKey", Type, 2, ""},
+ {"RIPEMD160", Const, 0, ""},
+ {"RegisterHash", Func, 0, "func(h Hash, f func() hash.Hash)"},
+ {"SHA1", Const, 0, ""},
+ {"SHA224", Const, 0, ""},
+ {"SHA256", Const, 0, ""},
+ {"SHA384", Const, 0, ""},
+ {"SHA3_224", Const, 4, ""},
+ {"SHA3_256", Const, 4, ""},
+ {"SHA3_384", Const, 4, ""},
+ {"SHA3_512", Const, 4, ""},
+ {"SHA512", Const, 0, ""},
+ {"SHA512_224", Const, 5, ""},
+ {"SHA512_256", Const, 5, ""},
+ {"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"},
+ {"Signer", Type, 4, ""},
+ {"SignerOpts", Type, 4, ""},
+ },
+ "crypto/aes": {
+ {"(KeySizeError).Error", Method, 0, ""},
+ {"BlockSize", Const, 0, ""},
+ {"KeySizeError", Type, 0, ""},
+ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+ },
+ "crypto/cipher": {
+ {"(StreamReader).Read", Method, 0, ""},
+ {"(StreamWriter).Close", Method, 0, ""},
+ {"(StreamWriter).Write", Method, 0, ""},
+ {"AEAD", Type, 2, ""},
+ {"Block", Type, 0, ""},
+ {"BlockMode", Type, 0, ""},
+ {"NewCBCDecrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
+ {"NewCBCEncrypter", Func, 0, "func(b Block, iv []byte) BlockMode"},
+ {"NewCFBDecrypter", Func, 0, "func(block Block, iv []byte) Stream"},
+ {"NewCFBEncrypter", Func, 0, "func(block Block, iv []byte) Stream"},
+ {"NewCTR", Func, 0, "func(block Block, iv []byte) Stream"},
+ {"NewGCM", Func, 2, "func(cipher Block) (AEAD, error)"},
+ {"NewGCMWithNonceSize", Func, 5, "func(cipher Block, size int) (AEAD, error)"},
+ {"NewGCMWithRandomNonce", Func, 24, "func(cipher Block) (AEAD, error)"},
+ {"NewGCMWithTagSize", Func, 11, "func(cipher Block, tagSize int) (AEAD, error)"},
+ {"NewOFB", Func, 0, "func(b Block, iv []byte) Stream"},
+ {"Stream", Type, 0, ""},
+ {"StreamReader", Type, 0, ""},
+ {"StreamReader.R", Field, 0, ""},
+ {"StreamReader.S", Field, 0, ""},
+ {"StreamWriter", Type, 0, ""},
+ {"StreamWriter.Err", Field, 0, ""},
+ {"StreamWriter.S", Field, 0, ""},
+ {"StreamWriter.W", Field, 0, ""},
+ },
+ "crypto/des": {
+ {"(KeySizeError).Error", Method, 0, ""},
+ {"BlockSize", Const, 0, ""},
+ {"KeySizeError", Type, 0, ""},
+ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+ {"NewTripleDESCipher", Func, 0, "func(key []byte) (cipher.Block, error)"},
+ },
+ "crypto/dsa": {
+ {"ErrInvalidPublicKey", Var, 0, ""},
+ {"GenerateKey", Func, 0, "func(priv *PrivateKey, rand io.Reader) error"},
+ {"GenerateParameters", Func, 0, "func(params *Parameters, rand io.Reader, sizes ParameterSizes) error"},
+ {"L1024N160", Const, 0, ""},
+ {"L2048N224", Const, 0, ""},
+ {"L2048N256", Const, 0, ""},
+ {"L3072N256", Const, 0, ""},
+ {"ParameterSizes", Type, 0, ""},
+ {"Parameters", Type, 0, ""},
+ {"Parameters.G", Field, 0, ""},
+ {"Parameters.P", Field, 0, ""},
+ {"Parameters.Q", Field, 0, ""},
+ {"PrivateKey", Type, 0, ""},
+ {"PrivateKey.PublicKey", Field, 0, ""},
+ {"PrivateKey.X", Field, 0, ""},
+ {"PublicKey", Type, 0, ""},
+ {"PublicKey.Parameters", Field, 0, ""},
+ {"PublicKey.Y", Field, 0, ""},
+ {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
+ {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
+ },
+ "crypto/ecdh": {
+ {"(*PrivateKey).Bytes", Method, 20, ""},
+ {"(*PrivateKey).Curve", Method, 20, ""},
+ {"(*PrivateKey).ECDH", Method, 20, ""},
+ {"(*PrivateKey).Equal", Method, 20, ""},
+ {"(*PrivateKey).Public", Method, 20, ""},
+ {"(*PrivateKey).PublicKey", Method, 20, ""},
+ {"(*PublicKey).Bytes", Method, 20, ""},
+ {"(*PublicKey).Curve", Method, 20, ""},
+ {"(*PublicKey).Equal", Method, 20, ""},
+ {"Curve", Type, 20, ""},
+ {"P256", Func, 20, "func() Curve"},
+ {"P384", Func, 20, "func() Curve"},
+ {"P521", Func, 20, "func() Curve"},
+ {"PrivateKey", Type, 20, ""},
+ {"PublicKey", Type, 20, ""},
+ {"X25519", Func, 20, "func() Curve"},
+ },
+ "crypto/ecdsa": {
+ {"(*PrivateKey).Bytes", Method, 25, ""},
+ {"(*PrivateKey).ECDH", Method, 20, ""},
+ {"(*PrivateKey).Equal", Method, 15, ""},
+ {"(*PrivateKey).Public", Method, 4, ""},
+ {"(*PrivateKey).Sign", Method, 4, ""},
+ {"(*PublicKey).Bytes", Method, 25, ""},
+ {"(*PublicKey).ECDH", Method, 20, ""},
+ {"(*PublicKey).Equal", Method, 15, ""},
+ {"(PrivateKey).Add", Method, 0, ""},
+ {"(PrivateKey).Double", Method, 0, ""},
+ {"(PrivateKey).IsOnCurve", Method, 0, ""},
+ {"(PrivateKey).Params", Method, 0, ""},
+ {"(PrivateKey).ScalarBaseMult", Method, 0, ""},
+ {"(PrivateKey).ScalarMult", Method, 0, ""},
+ {"(PublicKey).Add", Method, 0, ""},
+ {"(PublicKey).Double", Method, 0, ""},
+ {"(PublicKey).IsOnCurve", Method, 0, ""},
+ {"(PublicKey).Params", Method, 0, ""},
+ {"(PublicKey).ScalarBaseMult", Method, 0, ""},
+ {"(PublicKey).ScalarMult", Method, 0, ""},
+ {"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"},
+ {"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"},
+ {"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"},
+ {"PrivateKey", Type, 0, ""},
+ {"PrivateKey.D", Field, 0, ""},
+ {"PrivateKey.PublicKey", Field, 0, ""},
+ {"PublicKey", Type, 0, ""},
+ {"PublicKey.Curve", Field, 0, ""},
+ {"PublicKey.X", Field, 0, ""},
+ {"PublicKey.Y", Field, 0, ""},
+ {"Sign", Func, 0, "func(rand io.Reader, priv *PrivateKey, hash []byte) (r *big.Int, s *big.Int, err error)"},
+ {"SignASN1", Func, 15, "func(rand io.Reader, priv *PrivateKey, hash []byte) ([]byte, error)"},
+ {"Verify", Func, 0, "func(pub *PublicKey, hash []byte, r *big.Int, s *big.Int) bool"},
+ {"VerifyASN1", Func, 15, "func(pub *PublicKey, hash []byte, sig []byte) bool"},
+ },
+ "crypto/ed25519": {
+ {"(*Options).HashFunc", Method, 20, ""},
+ {"(PrivateKey).Equal", Method, 15, ""},
+ {"(PrivateKey).Public", Method, 13, ""},
+ {"(PrivateKey).Seed", Method, 13, ""},
+ {"(PrivateKey).Sign", Method, 13, ""},
+ {"(PublicKey).Equal", Method, 15, ""},
+ {"GenerateKey", Func, 13, "func(rand io.Reader) (PublicKey, PrivateKey, error)"},
+ {"NewKeyFromSeed", Func, 13, "func(seed []byte) PrivateKey"},
+ {"Options", Type, 20, ""},
+ {"Options.Context", Field, 20, ""},
+ {"Options.Hash", Field, 20, ""},
+ {"PrivateKey", Type, 13, ""},
+ {"PrivateKeySize", Const, 13, ""},
+ {"PublicKey", Type, 13, ""},
+ {"PublicKeySize", Const, 13, ""},
+ {"SeedSize", Const, 13, ""},
+ {"Sign", Func, 13, "func(privateKey PrivateKey, message []byte) []byte"},
+ {"SignatureSize", Const, 13, ""},
+ {"Verify", Func, 13, "func(publicKey PublicKey, message []byte, sig []byte) bool"},
+ {"VerifyWithOptions", Func, 20, "func(publicKey PublicKey, message []byte, sig []byte, opts *Options) error"},
+ },
+ "crypto/elliptic": {
+ {"(*CurveParams).Add", Method, 0, ""},
+ {"(*CurveParams).Double", Method, 0, ""},
+ {"(*CurveParams).IsOnCurve", Method, 0, ""},
+ {"(*CurveParams).Params", Method, 0, ""},
+ {"(*CurveParams).ScalarBaseMult", Method, 0, ""},
+ {"(*CurveParams).ScalarMult", Method, 0, ""},
+ {"Curve", Type, 0, ""},
+ {"CurveParams", Type, 0, ""},
+ {"CurveParams.B", Field, 0, ""},
+ {"CurveParams.BitSize", Field, 0, ""},
+ {"CurveParams.Gx", Field, 0, ""},
+ {"CurveParams.Gy", Field, 0, ""},
+ {"CurveParams.N", Field, 0, ""},
+ {"CurveParams.Name", Field, 5, ""},
+ {"CurveParams.P", Field, 0, ""},
+ {"GenerateKey", Func, 0, "func(curve Curve, rand io.Reader) (priv []byte, x *big.Int, y *big.Int, err error)"},
+ {"Marshal", Func, 0, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
+ {"MarshalCompressed", Func, 15, "func(curve Curve, x *big.Int, y *big.Int) []byte"},
+ {"P224", Func, 0, "func() Curve"},
+ {"P256", Func, 0, "func() Curve"},
+ {"P384", Func, 0, "func() Curve"},
+ {"P521", Func, 0, "func() Curve"},
+ {"Unmarshal", Func, 0, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
+ {"UnmarshalCompressed", Func, 15, "func(curve Curve, data []byte) (x *big.Int, y *big.Int)"},
+ },
+ "crypto/fips140": {
+ {"Enabled", Func, 24, "func() bool"},
+ },
+ "crypto/hkdf": {
+ {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"},
+ {"Extract", Func, 24, "func[H hash.Hash](h func() H, secret []byte, salt []byte) ([]byte, error)"},
+ {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, secret []byte, salt []byte, info string, keyLength int) ([]byte, error)"},
+ },
+ "crypto/hmac": {
+ {"Equal", Func, 1, "func(mac1 []byte, mac2 []byte) bool"},
+ {"New", Func, 0, "func(h func() hash.Hash, key []byte) hash.Hash"},
+ },
+ "crypto/md5": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Sum", Func, 2, "func(data []byte) [16]byte"},
+ },
+ "crypto/mlkem": {
+ {"(*DecapsulationKey1024).Bytes", Method, 24, ""},
+ {"(*DecapsulationKey1024).Decapsulate", Method, 24, ""},
+ {"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""},
+ {"(*DecapsulationKey768).Bytes", Method, 24, ""},
+ {"(*DecapsulationKey768).Decapsulate", Method, 24, ""},
+ {"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""},
+ {"(*EncapsulationKey1024).Bytes", Method, 24, ""},
+ {"(*EncapsulationKey1024).Encapsulate", Method, 24, ""},
+ {"(*EncapsulationKey768).Bytes", Method, 24, ""},
+ {"(*EncapsulationKey768).Encapsulate", Method, 24, ""},
+ {"CiphertextSize1024", Const, 24, ""},
+ {"CiphertextSize768", Const, 24, ""},
+ {"DecapsulationKey1024", Type, 24, ""},
+ {"DecapsulationKey768", Type, 24, ""},
+ {"EncapsulationKey1024", Type, 24, ""},
+ {"EncapsulationKey768", Type, 24, ""},
+ {"EncapsulationKeySize1024", Const, 24, ""},
+ {"EncapsulationKeySize768", Const, 24, ""},
+ {"GenerateKey1024", Func, 24, "func() (*DecapsulationKey1024, error)"},
+ {"GenerateKey768", Func, 24, "func() (*DecapsulationKey768, error)"},
+ {"NewDecapsulationKey1024", Func, 24, "func(seed []byte) (*DecapsulationKey1024, error)"},
+ {"NewDecapsulationKey768", Func, 24, "func(seed []byte) (*DecapsulationKey768, error)"},
+ {"NewEncapsulationKey1024", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey1024, error)"},
+ {"NewEncapsulationKey768", Func, 24, "func(encapsulationKey []byte) (*EncapsulationKey768, error)"},
+ {"SeedSize", Const, 24, ""},
+ {"SharedKeySize", Const, 24, ""},
+ },
+ "crypto/pbkdf2": {
+ {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"},
+ },
+ "crypto/rand": {
+ {"Int", Func, 0, "func(rand io.Reader, max *big.Int) (n *big.Int, err error)"},
+ {"Prime", Func, 0, "func(rand io.Reader, bits int) (*big.Int, error)"},
+ {"Read", Func, 0, "func(b []byte) (n int, err error)"},
+ {"Reader", Var, 0, ""},
+ {"Text", Func, 24, "func() string"},
+ },
+ "crypto/rc4": {
+ {"(*Cipher).Reset", Method, 0, ""},
+ {"(*Cipher).XORKeyStream", Method, 0, ""},
+ {"(KeySizeError).Error", Method, 0, ""},
+ {"Cipher", Type, 0, ""},
+ {"KeySizeError", Type, 0, ""},
+ {"NewCipher", Func, 0, "func(key []byte) (*Cipher, error)"},
+ },
+ "crypto/rsa": {
+ {"(*PSSOptions).HashFunc", Method, 4, ""},
+ {"(*PrivateKey).Decrypt", Method, 5, ""},
+ {"(*PrivateKey).Equal", Method, 15, ""},
+ {"(*PrivateKey).Precompute", Method, 0, ""},
+ {"(*PrivateKey).Public", Method, 4, ""},
+ {"(*PrivateKey).Sign", Method, 4, ""},
+ {"(*PrivateKey).Size", Method, 11, ""},
+ {"(*PrivateKey).Validate", Method, 0, ""},
+ {"(*PublicKey).Equal", Method, 15, ""},
+ {"(*PublicKey).Size", Method, 11, ""},
+ {"CRTValue", Type, 0, ""},
+ {"CRTValue.Coeff", Field, 0, ""},
+ {"CRTValue.Exp", Field, 0, ""},
+ {"CRTValue.R", Field, 0, ""},
+ {"DecryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) ([]byte, error)"},
+ {"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"},
+ {"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"},
+ {"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"},
+ {"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"},
+ {"ErrDecryption", Var, 0, ""},
+ {"ErrMessageTooLong", Var, 0, ""},
+ {"ErrVerification", Var, 0, ""},
+ {"GenerateKey", Func, 0, "func(random io.Reader, bits int) (*PrivateKey, error)"},
+ {"GenerateMultiPrimeKey", Func, 0, "func(random io.Reader, nprimes int, bits int) (*PrivateKey, error)"},
+ {"OAEPOptions", Type, 5, ""},
+ {"OAEPOptions.Hash", Field, 5, ""},
+ {"OAEPOptions.Label", Field, 5, ""},
+ {"OAEPOptions.MGFHash", Field, 20, ""},
+ {"PKCS1v15DecryptOptions", Type, 5, ""},
+ {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5, ""},
+ {"PSSOptions", Type, 2, ""},
+ {"PSSOptions.Hash", Field, 4, ""},
+ {"PSSOptions.SaltLength", Field, 2, ""},
+ {"PSSSaltLengthAuto", Const, 2, ""},
+ {"PSSSaltLengthEqualsHash", Const, 2, ""},
+ {"PrecomputedValues", Type, 0, ""},
+ {"PrecomputedValues.CRTValues", Field, 0, ""},
+ {"PrecomputedValues.Dp", Field, 0, ""},
+ {"PrecomputedValues.Dq", Field, 0, ""},
+ {"PrecomputedValues.Qinv", Field, 0, ""},
+ {"PrivateKey", Type, 0, ""},
+ {"PrivateKey.D", Field, 0, ""},
+ {"PrivateKey.Precomputed", Field, 0, ""},
+ {"PrivateKey.Primes", Field, 0, ""},
+ {"PrivateKey.PublicKey", Field, 0, ""},
+ {"PublicKey", Type, 0, ""},
+ {"PublicKey.E", Field, 0, ""},
+ {"PublicKey.N", Field, 0, ""},
+ {"SignPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)"},
+ {"SignPSS", Func, 2, "func(rand io.Reader, priv *PrivateKey, hash crypto.Hash, digest []byte, opts *PSSOptions) ([]byte, error)"},
+ {"VerifyPKCS1v15", Func, 0, "func(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) error"},
+ {"VerifyPSS", Func, 2, "func(pub *PublicKey, hash crypto.Hash, digest []byte, sig []byte, opts *PSSOptions) error"},
+ },
+ "crypto/sha1": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Sum", Func, 2, "func(data []byte) [20]byte"},
+ },
+ "crypto/sha256": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"New224", Func, 0, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Size224", Const, 0, ""},
+ {"Sum224", Func, 2, "func(data []byte) [28]byte"},
+ {"Sum256", Func, 2, "func(data []byte) [32]byte"},
+ },
+ "crypto/sha3": {
+ {"(*SHA3).AppendBinary", Method, 24, ""},
+ {"(*SHA3).BlockSize", Method, 24, ""},
+ {"(*SHA3).Clone", Method, 25, ""},
+ {"(*SHA3).MarshalBinary", Method, 24, ""},
+ {"(*SHA3).Reset", Method, 24, ""},
+ {"(*SHA3).Size", Method, 24, ""},
+ {"(*SHA3).Sum", Method, 24, ""},
+ {"(*SHA3).UnmarshalBinary", Method, 24, ""},
+ {"(*SHA3).Write", Method, 24, ""},
+ {"(*SHAKE).AppendBinary", Method, 24, ""},
+ {"(*SHAKE).BlockSize", Method, 24, ""},
+ {"(*SHAKE).MarshalBinary", Method, 24, ""},
+ {"(*SHAKE).Read", Method, 24, ""},
+ {"(*SHAKE).Reset", Method, 24, ""},
+ {"(*SHAKE).UnmarshalBinary", Method, 24, ""},
+ {"(*SHAKE).Write", Method, 24, ""},
+ {"New224", Func, 24, "func() *SHA3"},
+ {"New256", Func, 24, "func() *SHA3"},
+ {"New384", Func, 24, "func() *SHA3"},
+ {"New512", Func, 24, "func() *SHA3"},
+ {"NewCSHAKE128", Func, 24, "func(N []byte, S []byte) *SHAKE"},
+ {"NewCSHAKE256", Func, 24, "func(N []byte, S []byte) *SHAKE"},
+ {"NewSHAKE128", Func, 24, "func() *SHAKE"},
+ {"NewSHAKE256", Func, 24, "func() *SHAKE"},
+ {"SHA3", Type, 24, ""},
+ {"SHAKE", Type, 24, ""},
+ {"Sum224", Func, 24, "func(data []byte) [28]byte"},
+ {"Sum256", Func, 24, "func(data []byte) [32]byte"},
+ {"Sum384", Func, 24, "func(data []byte) [48]byte"},
+ {"Sum512", Func, 24, "func(data []byte) [64]byte"},
+ {"SumSHAKE128", Func, 24, "func(data []byte, length int) []byte"},
+ {"SumSHAKE256", Func, 24, "func(data []byte, length int) []byte"},
+ },
+ "crypto/sha512": {
+ {"BlockSize", Const, 0, ""},
+ {"New", Func, 0, "func() hash.Hash"},
+ {"New384", Func, 0, "func() hash.Hash"},
+ {"New512_224", Func, 5, "func() hash.Hash"},
+ {"New512_256", Func, 5, "func() hash.Hash"},
+ {"Size", Const, 0, ""},
+ {"Size224", Const, 5, ""},
+ {"Size256", Const, 5, ""},
+ {"Size384", Const, 0, ""},
+ {"Sum384", Func, 2, "func(data []byte) [48]byte"},
+ {"Sum512", Func, 2, "func(data []byte) [64]byte"},
+ {"Sum512_224", Func, 5, "func(data []byte) [28]byte"},
+ {"Sum512_256", Func, 5, "func(data []byte) [32]byte"},
+ },
+ "crypto/subtle": {
+ {"ConstantTimeByteEq", Func, 0, "func(x uint8, y uint8) int"},
+ {"ConstantTimeCompare", Func, 0, "func(x []byte, y []byte) int"},
+ {"ConstantTimeCopy", Func, 0, "func(v int, x []byte, y []byte)"},
+ {"ConstantTimeEq", Func, 0, "func(x int32, y int32) int"},
+ {"ConstantTimeLessOrEq", Func, 2, "func(x int, y int) int"},
+ {"ConstantTimeSelect", Func, 0, "func(v int, x int, y int) int"},
+ {"WithDataIndependentTiming", Func, 24, "func(f func())"},
+ {"XORBytes", Func, 20, "func(dst []byte, x []byte, y []byte) int"},
+ },
+ "crypto/tls": {
+ {"(*CertificateRequestInfo).Context", Method, 17, ""},
+ {"(*CertificateRequestInfo).SupportsCertificate", Method, 14, ""},
+ {"(*CertificateVerificationError).Error", Method, 20, ""},
+ {"(*CertificateVerificationError).Unwrap", Method, 20, ""},
+ {"(*ClientHelloInfo).Context", Method, 17, ""},
+ {"(*ClientHelloInfo).SupportsCertificate", Method, 14, ""},
+ {"(*ClientSessionState).ResumptionState", Method, 21, ""},
+ {"(*Config).BuildNameToCertificate", Method, 0, ""},
+ {"(*Config).Clone", Method, 8, ""},
+ {"(*Config).DecryptTicket", Method, 21, ""},
+ {"(*Config).EncryptTicket", Method, 21, ""},
+ {"(*Config).SetSessionTicketKeys", Method, 5, ""},
+ {"(*Conn).Close", Method, 0, ""},
+ {"(*Conn).CloseWrite", Method, 8, ""},
+ {"(*Conn).ConnectionState", Method, 0, ""},
+ {"(*Conn).Handshake", Method, 0, ""},
+ {"(*Conn).HandshakeContext", Method, 17, ""},
+ {"(*Conn).LocalAddr", Method, 0, ""},
+ {"(*Conn).NetConn", Method, 18, ""},
+ {"(*Conn).OCSPResponse", Method, 0, ""},
+ {"(*Conn).Read", Method, 0, ""},
+ {"(*Conn).RemoteAddr", Method, 0, ""},
+ {"(*Conn).SetDeadline", Method, 0, ""},
+ {"(*Conn).SetReadDeadline", Method, 0, ""},
+ {"(*Conn).SetWriteDeadline", Method, 0, ""},
+ {"(*Conn).VerifyHostname", Method, 0, ""},
+ {"(*Conn).Write", Method, 0, ""},
+ {"(*ConnectionState).ExportKeyingMaterial", Method, 11, ""},
+ {"(*Dialer).Dial", Method, 15, ""},
+ {"(*Dialer).DialContext", Method, 15, ""},
+ {"(*ECHRejectionError).Error", Method, 23, ""},
+ {"(*QUICConn).Close", Method, 21, ""},
+ {"(*QUICConn).ConnectionState", Method, 21, ""},
+ {"(*QUICConn).HandleData", Method, 21, ""},
+ {"(*QUICConn).NextEvent", Method, 21, ""},
+ {"(*QUICConn).SendSessionTicket", Method, 21, ""},
+ {"(*QUICConn).SetTransportParameters", Method, 21, ""},
+ {"(*QUICConn).Start", Method, 21, ""},
+ {"(*QUICConn).StoreSession", Method, 23, ""},
+ {"(*SessionState).Bytes", Method, 21, ""},
+ {"(AlertError).Error", Method, 21, ""},
+ {"(ClientAuthType).String", Method, 15, ""},
+ {"(CurveID).String", Method, 15, ""},
+ {"(QUICEncryptionLevel).String", Method, 21, ""},
+ {"(RecordHeaderError).Error", Method, 6, ""},
+ {"(SignatureScheme).String", Method, 15, ""},
+ {"AlertError", Type, 21, ""},
+ {"Certificate", Type, 0, ""},
+ {"Certificate.Certificate", Field, 0, ""},
+ {"Certificate.Leaf", Field, 0, ""},
+ {"Certificate.OCSPStaple", Field, 0, ""},
+ {"Certificate.PrivateKey", Field, 0, ""},
+ {"Certificate.SignedCertificateTimestamps", Field, 5, ""},
+ {"Certificate.SupportedSignatureAlgorithms", Field, 14, ""},
+ {"CertificateRequestInfo", Type, 8, ""},
+ {"CertificateRequestInfo.AcceptableCAs", Field, 8, ""},
+ {"CertificateRequestInfo.SignatureSchemes", Field, 8, ""},
+ {"CertificateRequestInfo.Version", Field, 14, ""},
+ {"CertificateVerificationError", Type, 20, ""},
+ {"CertificateVerificationError.Err", Field, 20, ""},
+ {"CertificateVerificationError.UnverifiedCertificates", Field, 20, ""},
+ {"CipherSuite", Type, 14, ""},
+ {"CipherSuite.ID", Field, 14, ""},
+ {"CipherSuite.Insecure", Field, 14, ""},
+ {"CipherSuite.Name", Field, 14, ""},
+ {"CipherSuite.SupportedVersions", Field, 14, ""},
+ {"CipherSuiteName", Func, 14, "func(id uint16) string"},
+ {"CipherSuites", Func, 14, "func() []*CipherSuite"},
+ {"Client", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
+ {"ClientAuthType", Type, 0, ""},
+ {"ClientHelloInfo", Type, 4, ""},
+ {"ClientHelloInfo.CipherSuites", Field, 4, ""},
+ {"ClientHelloInfo.Conn", Field, 8, ""},
+ {"ClientHelloInfo.Extensions", Field, 24, ""},
+ {"ClientHelloInfo.ServerName", Field, 4, ""},
+ {"ClientHelloInfo.SignatureSchemes", Field, 8, ""},
+ {"ClientHelloInfo.SupportedCurves", Field, 4, ""},
+ {"ClientHelloInfo.SupportedPoints", Field, 4, ""},
+ {"ClientHelloInfo.SupportedProtos", Field, 8, ""},
+ {"ClientHelloInfo.SupportedVersions", Field, 8, ""},
+ {"ClientSessionCache", Type, 3, ""},
+ {"ClientSessionState", Type, 3, ""},
+ {"Config", Type, 0, ""},
+ {"Config.Certificates", Field, 0, ""},
+ {"Config.CipherSuites", Field, 0, ""},
+ {"Config.ClientAuth", Field, 0, ""},
+ {"Config.ClientCAs", Field, 0, ""},
+ {"Config.ClientSessionCache", Field, 3, ""},
+ {"Config.CurvePreferences", Field, 3, ""},
+ {"Config.DynamicRecordSizingDisabled", Field, 7, ""},
+ {"Config.EncryptedClientHelloConfigList", Field, 23, ""},
+ {"Config.EncryptedClientHelloKeys", Field, 24, ""},
+ {"Config.EncryptedClientHelloRejectionVerify", Field, 23, ""},
+ {"Config.GetCertificate", Field, 4, ""},
+ {"Config.GetClientCertificate", Field, 8, ""},
+ {"Config.GetConfigForClient", Field, 8, ""},
+ {"Config.GetEncryptedClientHelloKeys", Field, 25, ""},
+ {"Config.InsecureSkipVerify", Field, 0, ""},
+ {"Config.KeyLogWriter", Field, 8, ""},
+ {"Config.MaxVersion", Field, 2, ""},
+ {"Config.MinVersion", Field, 2, ""},
+ {"Config.NameToCertificate", Field, 0, ""},
+ {"Config.NextProtos", Field, 0, ""},
+ {"Config.PreferServerCipherSuites", Field, 1, ""},
+ {"Config.Rand", Field, 0, ""},
+ {"Config.Renegotiation", Field, 7, ""},
+ {"Config.RootCAs", Field, 0, ""},
+ {"Config.ServerName", Field, 0, ""},
+ {"Config.SessionTicketKey", Field, 1, ""},
+ {"Config.SessionTicketsDisabled", Field, 1, ""},
+ {"Config.Time", Field, 0, ""},
+ {"Config.UnwrapSession", Field, 21, ""},
+ {"Config.VerifyConnection", Field, 15, ""},
+ {"Config.VerifyPeerCertificate", Field, 8, ""},
+ {"Config.WrapSession", Field, 21, ""},
+ {"Conn", Type, 0, ""},
+ {"ConnectionState", Type, 0, ""},
+ {"ConnectionState.CipherSuite", Field, 0, ""},
+ {"ConnectionState.CurveID", Field, 25, ""},
+ {"ConnectionState.DidResume", Field, 1, ""},
+ {"ConnectionState.ECHAccepted", Field, 23, ""},
+ {"ConnectionState.HandshakeComplete", Field, 0, ""},
+ {"ConnectionState.NegotiatedProtocol", Field, 0, ""},
+ {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""},
+ {"ConnectionState.OCSPResponse", Field, 5, ""},
+ {"ConnectionState.PeerCertificates", Field, 0, ""},
+ {"ConnectionState.ServerName", Field, 0, ""},
+ {"ConnectionState.SignedCertificateTimestamps", Field, 5, ""},
+ {"ConnectionState.TLSUnique", Field, 4, ""},
+ {"ConnectionState.VerifiedChains", Field, 0, ""},
+ {"ConnectionState.Version", Field, 3, ""},
+ {"CurveID", Type, 3, ""},
+ {"CurveP256", Const, 3, ""},
+ {"CurveP384", Const, 3, ""},
+ {"CurveP521", Const, 3, ""},
+ {"Dial", Func, 0, "func(network string, addr string, config *Config) (*Conn, error)"},
+ {"DialWithDialer", Func, 3, "func(dialer *net.Dialer, network string, addr string, config *Config) (*Conn, error)"},
+ {"Dialer", Type, 15, ""},
+ {"Dialer.Config", Field, 15, ""},
+ {"Dialer.NetDialer", Field, 15, ""},
+ {"ECDSAWithP256AndSHA256", Const, 8, ""},
+ {"ECDSAWithP384AndSHA384", Const, 8, ""},
+ {"ECDSAWithP521AndSHA512", Const, 8, ""},
+ {"ECDSAWithSHA1", Const, 10, ""},
+ {"ECHRejectionError", Type, 23, ""},
+ {"ECHRejectionError.RetryConfigList", Field, 23, ""},
+ {"Ed25519", Const, 13, ""},
+ {"EncryptedClientHelloKey", Type, 24, ""},
+ {"EncryptedClientHelloKey.Config", Field, 24, ""},
+ {"EncryptedClientHelloKey.PrivateKey", Field, 24, ""},
+ {"EncryptedClientHelloKey.SendAsRetry", Field, 24, ""},
+ {"InsecureCipherSuites", Func, 14, "func() []*CipherSuite"},
+ {"Listen", Func, 0, "func(network string, laddr string, config *Config) (net.Listener, error)"},
+ {"LoadX509KeyPair", Func, 0, "func(certFile string, keyFile string) (Certificate, error)"},
+ {"NewLRUClientSessionCache", Func, 3, "func(capacity int) ClientSessionCache"},
+ {"NewListener", Func, 0, "func(inner net.Listener, config *Config) net.Listener"},
+ {"NewResumptionState", Func, 21, "func(ticket []byte, state *SessionState) (*ClientSessionState, error)"},
+ {"NoClientCert", Const, 0, ""},
+ {"PKCS1WithSHA1", Const, 8, ""},
+ {"PKCS1WithSHA256", Const, 8, ""},
+ {"PKCS1WithSHA384", Const, 8, ""},
+ {"PKCS1WithSHA512", Const, 8, ""},
+ {"PSSWithSHA256", Const, 8, ""},
+ {"PSSWithSHA384", Const, 8, ""},
+ {"PSSWithSHA512", Const, 8, ""},
+ {"ParseSessionState", Func, 21, "func(data []byte) (*SessionState, error)"},
+ {"QUICClient", Func, 21, "func(config *QUICConfig) *QUICConn"},
+ {"QUICConfig", Type, 21, ""},
+ {"QUICConfig.EnableSessionEvents", Field, 23, ""},
+ {"QUICConfig.TLSConfig", Field, 21, ""},
+ {"QUICConn", Type, 21, ""},
+ {"QUICEncryptionLevel", Type, 21, ""},
+ {"QUICEncryptionLevelApplication", Const, 21, ""},
+ {"QUICEncryptionLevelEarly", Const, 21, ""},
+ {"QUICEncryptionLevelHandshake", Const, 21, ""},
+ {"QUICEncryptionLevelInitial", Const, 21, ""},
+ {"QUICEvent", Type, 21, ""},
+ {"QUICEvent.Data", Field, 21, ""},
+ {"QUICEvent.Kind", Field, 21, ""},
+ {"QUICEvent.Level", Field, 21, ""},
+ {"QUICEvent.SessionState", Field, 23, ""},
+ {"QUICEvent.Suite", Field, 21, ""},
+ {"QUICEventKind", Type, 21, ""},
+ {"QUICHandshakeDone", Const, 21, ""},
+ {"QUICNoEvent", Const, 21, ""},
+ {"QUICRejectedEarlyData", Const, 21, ""},
+ {"QUICResumeSession", Const, 23, ""},
+ {"QUICServer", Func, 21, "func(config *QUICConfig) *QUICConn"},
+ {"QUICSessionTicketOptions", Type, 21, ""},
+ {"QUICSessionTicketOptions.EarlyData", Field, 21, ""},
+ {"QUICSessionTicketOptions.Extra", Field, 23, ""},
+ {"QUICSetReadSecret", Const, 21, ""},
+ {"QUICSetWriteSecret", Const, 21, ""},
+ {"QUICStoreSession", Const, 23, ""},
+ {"QUICTransportParameters", Const, 21, ""},
+ {"QUICTransportParametersRequired", Const, 21, ""},
+ {"QUICWriteData", Const, 21, ""},
+ {"RecordHeaderError", Type, 6, ""},
+ {"RecordHeaderError.Conn", Field, 12, ""},
+ {"RecordHeaderError.Msg", Field, 6, ""},
+ {"RecordHeaderError.RecordHeader", Field, 6, ""},
+ {"RenegotiateFreelyAsClient", Const, 7, ""},
+ {"RenegotiateNever", Const, 7, ""},
+ {"RenegotiateOnceAsClient", Const, 7, ""},
+ {"RenegotiationSupport", Type, 7, ""},
+ {"RequestClientCert", Const, 0, ""},
+ {"RequireAndVerifyClientCert", Const, 0, ""},
+ {"RequireAnyClientCert", Const, 0, ""},
+ {"Server", Func, 0, "func(conn net.Conn, config *Config) *Conn"},
+ {"SessionState", Type, 21, ""},
+ {"SessionState.EarlyData", Field, 21, ""},
+ {"SessionState.Extra", Field, 21, ""},
+ {"SignatureScheme", Type, 8, ""},
+ {"TLS_AES_128_GCM_SHA256", Const, 12, ""},
+ {"TLS_AES_256_GCM_SHA384", Const, 12, ""},
+ {"TLS_CHACHA20_POLY1305_SHA256", Const, 12, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2, ""},
+ {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
+ {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
+ {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
+ {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2, ""},
+ {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
+ {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5, ""},
+ {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8, ""},
+ {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14, ""},
+ {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0, ""},
+ {"TLS_FALLBACK_SCSV", Const, 4, ""},
+ {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0, ""},
+ {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0, ""},
+ {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8, ""},
+ {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6, ""},
+ {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1, ""},
+ {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6, ""},
+ {"TLS_RSA_WITH_RC4_128_SHA", Const, 0, ""},
+ {"VerifyClientCertIfGiven", Const, 0, ""},
+ {"VersionName", Func, 21, "func(version uint16) string"},
+ {"VersionSSL30", Const, 2, ""},
+ {"VersionTLS10", Const, 2, ""},
+ {"VersionTLS11", Const, 2, ""},
+ {"VersionTLS12", Const, 2, ""},
+ {"VersionTLS13", Const, 12, ""},
+ {"X25519", Const, 8, ""},
+ {"X25519MLKEM768", Const, 24, ""},
+ {"X509KeyPair", Func, 0, "func(certPEMBlock []byte, keyPEMBlock []byte) (Certificate, error)"},
+ },
+ "crypto/x509": {
+ {"(*CertPool).AddCert", Method, 0, ""},
+ {"(*CertPool).AddCertWithConstraint", Method, 22, ""},
+ {"(*CertPool).AppendCertsFromPEM", Method, 0, ""},
+ {"(*CertPool).Clone", Method, 19, ""},
+ {"(*CertPool).Equal", Method, 19, ""},
+ {"(*CertPool).Subjects", Method, 0, ""},
+ {"(*Certificate).CheckCRLSignature", Method, 0, ""},
+ {"(*Certificate).CheckSignature", Method, 0, ""},
+ {"(*Certificate).CheckSignatureFrom", Method, 0, ""},
+ {"(*Certificate).CreateCRL", Method, 0, ""},
+ {"(*Certificate).Equal", Method, 0, ""},
+ {"(*Certificate).Verify", Method, 0, ""},
+ {"(*Certificate).VerifyHostname", Method, 0, ""},
+ {"(*CertificateRequest).CheckSignature", Method, 5, ""},
+ {"(*OID).UnmarshalBinary", Method, 23, ""},
+ {"(*OID).UnmarshalText", Method, 23, ""},
+ {"(*RevocationList).CheckSignatureFrom", Method, 19, ""},
+ {"(CertificateInvalidError).Error", Method, 0, ""},
+ {"(ConstraintViolationError).Error", Method, 0, ""},
+ {"(HostnameError).Error", Method, 0, ""},
+ {"(InsecureAlgorithmError).Error", Method, 6, ""},
+ {"(OID).AppendBinary", Method, 24, ""},
+ {"(OID).AppendText", Method, 24, ""},
+ {"(OID).Equal", Method, 22, ""},
+ {"(OID).EqualASN1OID", Method, 22, ""},
+ {"(OID).MarshalBinary", Method, 23, ""},
+ {"(OID).MarshalText", Method, 23, ""},
+ {"(OID).String", Method, 22, ""},
+ {"(PublicKeyAlgorithm).String", Method, 10, ""},
+ {"(SignatureAlgorithm).String", Method, 6, ""},
+ {"(SystemRootsError).Error", Method, 1, ""},
+ {"(SystemRootsError).Unwrap", Method, 16, ""},
+ {"(UnhandledCriticalExtension).Error", Method, 0, ""},
+ {"(UnknownAuthorityError).Error", Method, 0, ""},
+ {"CANotAuthorizedForExtKeyUsage", Const, 10, ""},
+ {"CANotAuthorizedForThisName", Const, 0, ""},
+ {"CertPool", Type, 0, ""},
+ {"Certificate", Type, 0, ""},
+ {"Certificate.AuthorityKeyId", Field, 0, ""},
+ {"Certificate.BasicConstraintsValid", Field, 0, ""},
+ {"Certificate.CRLDistributionPoints", Field, 2, ""},
+ {"Certificate.DNSNames", Field, 0, ""},
+ {"Certificate.EmailAddresses", Field, 0, ""},
+ {"Certificate.ExcludedDNSDomains", Field, 9, ""},
+ {"Certificate.ExcludedEmailAddresses", Field, 10, ""},
+ {"Certificate.ExcludedIPRanges", Field, 10, ""},
+ {"Certificate.ExcludedURIDomains", Field, 10, ""},
+ {"Certificate.ExtKeyUsage", Field, 0, ""},
+ {"Certificate.Extensions", Field, 2, ""},
+ {"Certificate.ExtraExtensions", Field, 2, ""},
+ {"Certificate.IPAddresses", Field, 1, ""},
+ {"Certificate.InhibitAnyPolicy", Field, 24, ""},
+ {"Certificate.InhibitAnyPolicyZero", Field, 24, ""},
+ {"Certificate.InhibitPolicyMapping", Field, 24, ""},
+ {"Certificate.InhibitPolicyMappingZero", Field, 24, ""},
+ {"Certificate.IsCA", Field, 0, ""},
+ {"Certificate.Issuer", Field, 0, ""},
+ {"Certificate.IssuingCertificateURL", Field, 2, ""},
+ {"Certificate.KeyUsage", Field, 0, ""},
+ {"Certificate.MaxPathLen", Field, 0, ""},
+ {"Certificate.MaxPathLenZero", Field, 4, ""},
+ {"Certificate.NotAfter", Field, 0, ""},
+ {"Certificate.NotBefore", Field, 0, ""},
+ {"Certificate.OCSPServer", Field, 2, ""},
+ {"Certificate.PermittedDNSDomains", Field, 0, ""},
+ {"Certificate.PermittedDNSDomainsCritical", Field, 0, ""},
+ {"Certificate.PermittedEmailAddresses", Field, 10, ""},
+ {"Certificate.PermittedIPRanges", Field, 10, ""},
+ {"Certificate.PermittedURIDomains", Field, 10, ""},
+ {"Certificate.Policies", Field, 22, ""},
+ {"Certificate.PolicyIdentifiers", Field, 0, ""},
+ {"Certificate.PolicyMappings", Field, 24, ""},
+ {"Certificate.PublicKey", Field, 0, ""},
+ {"Certificate.PublicKeyAlgorithm", Field, 0, ""},
+ {"Certificate.Raw", Field, 0, ""},
+ {"Certificate.RawIssuer", Field, 0, ""},
+ {"Certificate.RawSubject", Field, 0, ""},
+ {"Certificate.RawSubjectPublicKeyInfo", Field, 0, ""},
+ {"Certificate.RawTBSCertificate", Field, 0, ""},
+ {"Certificate.RequireExplicitPolicy", Field, 24, ""},
+ {"Certificate.RequireExplicitPolicyZero", Field, 24, ""},
+ {"Certificate.SerialNumber", Field, 0, ""},
+ {"Certificate.Signature", Field, 0, ""},
+ {"Certificate.SignatureAlgorithm", Field, 0, ""},
+ {"Certificate.Subject", Field, 0, ""},
+ {"Certificate.SubjectKeyId", Field, 0, ""},
+ {"Certificate.URIs", Field, 10, ""},
+ {"Certificate.UnhandledCriticalExtensions", Field, 5, ""},
+ {"Certificate.UnknownExtKeyUsage", Field, 0, ""},
+ {"Certificate.Version", Field, 0, ""},
+ {"CertificateInvalidError", Type, 0, ""},
+ {"CertificateInvalidError.Cert", Field, 0, ""},
+ {"CertificateInvalidError.Detail", Field, 10, ""},
+ {"CertificateInvalidError.Reason", Field, 0, ""},
+ {"CertificateRequest", Type, 3, ""},
+ {"CertificateRequest.Attributes", Field, 3, ""},
+ {"CertificateRequest.DNSNames", Field, 3, ""},
+ {"CertificateRequest.EmailAddresses", Field, 3, ""},
+ {"CertificateRequest.Extensions", Field, 3, ""},
+ {"CertificateRequest.ExtraExtensions", Field, 3, ""},
+ {"CertificateRequest.IPAddresses", Field, 3, ""},
+ {"CertificateRequest.PublicKey", Field, 3, ""},
+ {"CertificateRequest.PublicKeyAlgorithm", Field, 3, ""},
+ {"CertificateRequest.Raw", Field, 3, ""},
+ {"CertificateRequest.RawSubject", Field, 3, ""},
+ {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3, ""},
+ {"CertificateRequest.RawTBSCertificateRequest", Field, 3, ""},
+ {"CertificateRequest.Signature", Field, 3, ""},
+ {"CertificateRequest.SignatureAlgorithm", Field, 3, ""},
+ {"CertificateRequest.Subject", Field, 3, ""},
+ {"CertificateRequest.URIs", Field, 10, ""},
+ {"CertificateRequest.Version", Field, 3, ""},
+ {"ConstraintViolationError", Type, 0, ""},
+ {"CreateCertificate", Func, 0, "func(rand io.Reader, template *Certificate, parent *Certificate, pub any, priv any) ([]byte, error)"},
+ {"CreateCertificateRequest", Func, 3, "func(rand io.Reader, template *CertificateRequest, priv any) (csr []byte, err error)"},
+ {"CreateRevocationList", Func, 15, "func(rand io.Reader, template *RevocationList, issuer *Certificate, priv crypto.Signer) ([]byte, error)"},
+ {"DSA", Const, 0, ""},
+ {"DSAWithSHA1", Const, 0, ""},
+ {"DSAWithSHA256", Const, 0, ""},
+ {"DecryptPEMBlock", Func, 1, "func(b *pem.Block, password []byte) ([]byte, error)"},
+ {"ECDSA", Const, 1, ""},
+ {"ECDSAWithSHA1", Const, 1, ""},
+ {"ECDSAWithSHA256", Const, 1, ""},
+ {"ECDSAWithSHA384", Const, 1, ""},
+ {"ECDSAWithSHA512", Const, 1, ""},
+ {"Ed25519", Const, 13, ""},
+ {"EncryptPEMBlock", Func, 1, "func(rand io.Reader, blockType string, data []byte, password []byte, alg PEMCipher) (*pem.Block, error)"},
+ {"ErrUnsupportedAlgorithm", Var, 0, ""},
+ {"Expired", Const, 0, ""},
+ {"ExtKeyUsage", Type, 0, ""},
+ {"ExtKeyUsageAny", Const, 0, ""},
+ {"ExtKeyUsageClientAuth", Const, 0, ""},
+ {"ExtKeyUsageCodeSigning", Const, 0, ""},
+ {"ExtKeyUsageEmailProtection", Const, 0, ""},
+ {"ExtKeyUsageIPSECEndSystem", Const, 1, ""},
+ {"ExtKeyUsageIPSECTunnel", Const, 1, ""},
+ {"ExtKeyUsageIPSECUser", Const, 1, ""},
+ {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10, ""},
+ {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10, ""},
+ {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1, ""},
+ {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1, ""},
+ {"ExtKeyUsageOCSPSigning", Const, 0, ""},
+ {"ExtKeyUsageServerAuth", Const, 0, ""},
+ {"ExtKeyUsageTimeStamping", Const, 0, ""},
+ {"HostnameError", Type, 0, ""},
+ {"HostnameError.Certificate", Field, 0, ""},
+ {"HostnameError.Host", Field, 0, ""},
+ {"IncompatibleUsage", Const, 1, ""},
+ {"IncorrectPasswordError", Var, 1, ""},
+ {"InsecureAlgorithmError", Type, 6, ""},
+ {"InvalidReason", Type, 0, ""},
+ {"IsEncryptedPEMBlock", Func, 1, "func(b *pem.Block) bool"},
+ {"KeyUsage", Type, 0, ""},
+ {"KeyUsageCRLSign", Const, 0, ""},
+ {"KeyUsageCertSign", Const, 0, ""},
+ {"KeyUsageContentCommitment", Const, 0, ""},
+ {"KeyUsageDataEncipherment", Const, 0, ""},
+ {"KeyUsageDecipherOnly", Const, 0, ""},
+ {"KeyUsageDigitalSignature", Const, 0, ""},
+ {"KeyUsageEncipherOnly", Const, 0, ""},
+ {"KeyUsageKeyAgreement", Const, 0, ""},
+ {"KeyUsageKeyEncipherment", Const, 0, ""},
+ {"MD2WithRSA", Const, 0, ""},
+ {"MD5WithRSA", Const, 0, ""},
+ {"MarshalECPrivateKey", Func, 2, "func(key *ecdsa.PrivateKey) ([]byte, error)"},
+ {"MarshalPKCS1PrivateKey", Func, 0, "func(key *rsa.PrivateKey) []byte"},
+ {"MarshalPKCS1PublicKey", Func, 10, "func(key *rsa.PublicKey) []byte"},
+ {"MarshalPKCS8PrivateKey", Func, 10, "func(key any) ([]byte, error)"},
+ {"MarshalPKIXPublicKey", Func, 0, "func(pub any) ([]byte, error)"},
+ {"NameConstraintsWithoutSANs", Const, 10, ""},
+ {"NameMismatch", Const, 8, ""},
+ {"NewCertPool", Func, 0, "func() *CertPool"},
+ {"NoValidChains", Const, 24, ""},
+ {"NotAuthorizedToSign", Const, 0, ""},
+ {"OID", Type, 22, ""},
+ {"OIDFromInts", Func, 22, "func(oid []uint64) (OID, error)"},
+ {"PEMCipher", Type, 1, ""},
+ {"PEMCipher3DES", Const, 1, ""},
+ {"PEMCipherAES128", Const, 1, ""},
+ {"PEMCipherAES192", Const, 1, ""},
+ {"PEMCipherAES256", Const, 1, ""},
+ {"PEMCipherDES", Const, 1, ""},
+ {"ParseCRL", Func, 0, "func(crlBytes []byte) (*pkix.CertificateList, error)"},
+ {"ParseCertificate", Func, 0, "func(der []byte) (*Certificate, error)"},
+ {"ParseCertificateRequest", Func, 3, "func(asn1Data []byte) (*CertificateRequest, error)"},
+ {"ParseCertificates", Func, 0, "func(der []byte) ([]*Certificate, error)"},
+ {"ParseDERCRL", Func, 0, "func(derBytes []byte) (*pkix.CertificateList, error)"},
+ {"ParseECPrivateKey", Func, 1, "func(der []byte) (*ecdsa.PrivateKey, error)"},
+ {"ParseOID", Func, 23, "func(oid string) (OID, error)"},
+ {"ParsePKCS1PrivateKey", Func, 0, "func(der []byte) (*rsa.PrivateKey, error)"},
+ {"ParsePKCS1PublicKey", Func, 10, "func(der []byte) (*rsa.PublicKey, error)"},
+ {"ParsePKCS8PrivateKey", Func, 0, "func(der []byte) (key any, err error)"},
+ {"ParsePKIXPublicKey", Func, 0, "func(derBytes []byte) (pub any, err error)"},
+ {"ParseRevocationList", Func, 19, "func(der []byte) (*RevocationList, error)"},
+ {"PolicyMapping", Type, 24, ""},
+ {"PolicyMapping.IssuerDomainPolicy", Field, 24, ""},
+ {"PolicyMapping.SubjectDomainPolicy", Field, 24, ""},
+ {"PublicKeyAlgorithm", Type, 0, ""},
+ {"PureEd25519", Const, 13, ""},
+ {"RSA", Const, 0, ""},
+ {"RevocationList", Type, 15, ""},
+ {"RevocationList.AuthorityKeyId", Field, 19, ""},
+ {"RevocationList.Extensions", Field, 19, ""},
+ {"RevocationList.ExtraExtensions", Field, 15, ""},
+ {"RevocationList.Issuer", Field, 19, ""},
+ {"RevocationList.NextUpdate", Field, 15, ""},
+ {"RevocationList.Number", Field, 15, ""},
+ {"RevocationList.Raw", Field, 19, ""},
+ {"RevocationList.RawIssuer", Field, 19, ""},
+ {"RevocationList.RawTBSRevocationList", Field, 19, ""},
+ {"RevocationList.RevokedCertificateEntries", Field, 21, ""},
+ {"RevocationList.RevokedCertificates", Field, 15, ""},
+ {"RevocationList.Signature", Field, 19, ""},
+ {"RevocationList.SignatureAlgorithm", Field, 15, ""},
+ {"RevocationList.ThisUpdate", Field, 15, ""},
+ {"RevocationListEntry", Type, 21, ""},
+ {"RevocationListEntry.Extensions", Field, 21, ""},
+ {"RevocationListEntry.ExtraExtensions", Field, 21, ""},
+ {"RevocationListEntry.Raw", Field, 21, ""},
+ {"RevocationListEntry.ReasonCode", Field, 21, ""},
+ {"RevocationListEntry.RevocationTime", Field, 21, ""},
+ {"RevocationListEntry.SerialNumber", Field, 21, ""},
+ {"SHA1WithRSA", Const, 0, ""},
+ {"SHA256WithRSA", Const, 0, ""},
+ {"SHA256WithRSAPSS", Const, 8, ""},
+ {"SHA384WithRSA", Const, 0, ""},
+ {"SHA384WithRSAPSS", Const, 8, ""},
+ {"SHA512WithRSA", Const, 0, ""},
+ {"SHA512WithRSAPSS", Const, 8, ""},
+ {"SetFallbackRoots", Func, 20, "func(roots *CertPool)"},
+ {"SignatureAlgorithm", Type, 0, ""},
+ {"SystemCertPool", Func, 7, "func() (*CertPool, error)"},
+ {"SystemRootsError", Type, 1, ""},
+ {"SystemRootsError.Err", Field, 7, ""},
+ {"TooManyConstraints", Const, 10, ""},
+ {"TooManyIntermediates", Const, 0, ""},
+ {"UnconstrainedName", Const, 10, ""},
+ {"UnhandledCriticalExtension", Type, 0, ""},
+ {"UnknownAuthorityError", Type, 0, ""},
+ {"UnknownAuthorityError.Cert", Field, 8, ""},
+ {"UnknownPublicKeyAlgorithm", Const, 0, ""},
+ {"UnknownSignatureAlgorithm", Const, 0, ""},
+ {"VerifyOptions", Type, 0, ""},
+ {"VerifyOptions.CertificatePolicies", Field, 24, ""},
+ {"VerifyOptions.CurrentTime", Field, 0, ""},
+ {"VerifyOptions.DNSName", Field, 0, ""},
+ {"VerifyOptions.Intermediates", Field, 0, ""},
+ {"VerifyOptions.KeyUsages", Field, 1, ""},
+ {"VerifyOptions.MaxConstraintComparisions", Field, 10, ""},
+ {"VerifyOptions.Roots", Field, 0, ""},
+ },
+ "crypto/x509/pkix": {
+ {"(*CertificateList).HasExpired", Method, 0, ""},
+ {"(*Name).FillFromRDNSequence", Method, 0, ""},
+ {"(Name).String", Method, 10, ""},
+ {"(Name).ToRDNSequence", Method, 0, ""},
+ {"(RDNSequence).String", Method, 10, ""},
+ {"AlgorithmIdentifier", Type, 0, ""},
+ {"AlgorithmIdentifier.Algorithm", Field, 0, ""},
+ {"AlgorithmIdentifier.Parameters", Field, 0, ""},
+ {"AttributeTypeAndValue", Type, 0, ""},
+ {"AttributeTypeAndValue.Type", Field, 0, ""},
+ {"AttributeTypeAndValue.Value", Field, 0, ""},
+ {"AttributeTypeAndValueSET", Type, 3, ""},
+ {"AttributeTypeAndValueSET.Type", Field, 3, ""},
+ {"AttributeTypeAndValueSET.Value", Field, 3, ""},
+ {"CertificateList", Type, 0, ""},
+ {"CertificateList.SignatureAlgorithm", Field, 0, ""},
+ {"CertificateList.SignatureValue", Field, 0, ""},
+ {"CertificateList.TBSCertList", Field, 0, ""},
+ {"Extension", Type, 0, ""},
+ {"Extension.Critical", Field, 0, ""},
+ {"Extension.Id", Field, 0, ""},
+ {"Extension.Value", Field, 0, ""},
+ {"Name", Type, 0, ""},
+ {"Name.CommonName", Field, 0, ""},
+ {"Name.Country", Field, 0, ""},
+ {"Name.ExtraNames", Field, 5, ""},
+ {"Name.Locality", Field, 0, ""},
+ {"Name.Names", Field, 0, ""},
+ {"Name.Organization", Field, 0, ""},
+ {"Name.OrganizationalUnit", Field, 0, ""},
+ {"Name.PostalCode", Field, 0, ""},
+ {"Name.Province", Field, 0, ""},
+ {"Name.SerialNumber", Field, 0, ""},
+ {"Name.StreetAddress", Field, 0, ""},
+ {"RDNSequence", Type, 0, ""},
+ {"RelativeDistinguishedNameSET", Type, 0, ""},
+ {"RevokedCertificate", Type, 0, ""},
+ {"RevokedCertificate.Extensions", Field, 0, ""},
+ {"RevokedCertificate.RevocationTime", Field, 0, ""},
+ {"RevokedCertificate.SerialNumber", Field, 0, ""},
+ {"TBSCertificateList", Type, 0, ""},
+ {"TBSCertificateList.Extensions", Field, 0, ""},
+ {"TBSCertificateList.Issuer", Field, 0, ""},
+ {"TBSCertificateList.NextUpdate", Field, 0, ""},
+ {"TBSCertificateList.Raw", Field, 0, ""},
+ {"TBSCertificateList.RevokedCertificates", Field, 0, ""},
+ {"TBSCertificateList.Signature", Field, 0, ""},
+ {"TBSCertificateList.ThisUpdate", Field, 0, ""},
+ {"TBSCertificateList.Version", Field, 0, ""},
+ },
+ "database/sql": {
+ {"(*ColumnType).DatabaseTypeName", Method, 8, ""},
+ {"(*ColumnType).DecimalSize", Method, 8, ""},
+ {"(*ColumnType).Length", Method, 8, ""},
+ {"(*ColumnType).Name", Method, 8, ""},
+ {"(*ColumnType).Nullable", Method, 8, ""},
+ {"(*ColumnType).ScanType", Method, 8, ""},
+ {"(*Conn).BeginTx", Method, 9, ""},
+ {"(*Conn).Close", Method, 9, ""},
+ {"(*Conn).ExecContext", Method, 9, ""},
+ {"(*Conn).PingContext", Method, 9, ""},
+ {"(*Conn).PrepareContext", Method, 9, ""},
+ {"(*Conn).QueryContext", Method, 9, ""},
+ {"(*Conn).QueryRowContext", Method, 9, ""},
+ {"(*Conn).Raw", Method, 13, ""},
+ {"(*DB).Begin", Method, 0, ""},
+ {"(*DB).BeginTx", Method, 8, ""},
+ {"(*DB).Close", Method, 0, ""},
+ {"(*DB).Conn", Method, 9, ""},
+ {"(*DB).Driver", Method, 0, ""},
+ {"(*DB).Exec", Method, 0, ""},
+ {"(*DB).ExecContext", Method, 8, ""},
+ {"(*DB).Ping", Method, 1, ""},
+ {"(*DB).PingContext", Method, 8, ""},
+ {"(*DB).Prepare", Method, 0, ""},
+ {"(*DB).PrepareContext", Method, 8, ""},
+ {"(*DB).Query", Method, 0, ""},
+ {"(*DB).QueryContext", Method, 8, ""},
+ {"(*DB).QueryRow", Method, 0, ""},
+ {"(*DB).QueryRowContext", Method, 8, ""},
+ {"(*DB).SetConnMaxIdleTime", Method, 15, ""},
+ {"(*DB).SetConnMaxLifetime", Method, 6, ""},
+ {"(*DB).SetMaxIdleConns", Method, 1, ""},
+ {"(*DB).SetMaxOpenConns", Method, 2, ""},
+ {"(*DB).Stats", Method, 5, ""},
+ {"(*Null).Scan", Method, 22, ""},
+ {"(*NullBool).Scan", Method, 0, ""},
+ {"(*NullByte).Scan", Method, 17, ""},
+ {"(*NullFloat64).Scan", Method, 0, ""},
+ {"(*NullInt16).Scan", Method, 17, ""},
+ {"(*NullInt32).Scan", Method, 13, ""},
+ {"(*NullInt64).Scan", Method, 0, ""},
+ {"(*NullString).Scan", Method, 0, ""},
+ {"(*NullTime).Scan", Method, 13, ""},
+ {"(*Row).Err", Method, 15, ""},
+ {"(*Row).Scan", Method, 0, ""},
+ {"(*Rows).Close", Method, 0, ""},
+ {"(*Rows).ColumnTypes", Method, 8, ""},
+ {"(*Rows).Columns", Method, 0, ""},
+ {"(*Rows).Err", Method, 0, ""},
+ {"(*Rows).Next", Method, 0, ""},
+ {"(*Rows).NextResultSet", Method, 8, ""},
+ {"(*Rows).Scan", Method, 0, ""},
+ {"(*Stmt).Close", Method, 0, ""},
+ {"(*Stmt).Exec", Method, 0, ""},
+ {"(*Stmt).ExecContext", Method, 8, ""},
+ {"(*Stmt).Query", Method, 0, ""},
+ {"(*Stmt).QueryContext", Method, 8, ""},
+ {"(*Stmt).QueryRow", Method, 0, ""},
+ {"(*Stmt).QueryRowContext", Method, 8, ""},
+ {"(*Tx).Commit", Method, 0, ""},
+ {"(*Tx).Exec", Method, 0, ""},
+ {"(*Tx).ExecContext", Method, 8, ""},
+ {"(*Tx).Prepare", Method, 0, ""},
+ {"(*Tx).PrepareContext", Method, 8, ""},
+ {"(*Tx).Query", Method, 0, ""},
+ {"(*Tx).QueryContext", Method, 8, ""},
+ {"(*Tx).QueryRow", Method, 0, ""},
+ {"(*Tx).QueryRowContext", Method, 8, ""},
+ {"(*Tx).Rollback", Method, 0, ""},
+ {"(*Tx).Stmt", Method, 0, ""},
+ {"(*Tx).StmtContext", Method, 8, ""},
+ {"(IsolationLevel).String", Method, 11, ""},
+ {"(Null).Value", Method, 22, ""},
+ {"(NullBool).Value", Method, 0, ""},
+ {"(NullByte).Value", Method, 17, ""},
+ {"(NullFloat64).Value", Method, 0, ""},
+ {"(NullInt16).Value", Method, 17, ""},
+ {"(NullInt32).Value", Method, 13, ""},
+ {"(NullInt64).Value", Method, 0, ""},
+ {"(NullString).Value", Method, 0, ""},
+ {"(NullTime).Value", Method, 13, ""},
+ {"ColumnType", Type, 8, ""},
+ {"Conn", Type, 9, ""},
+ {"DB", Type, 0, ""},
+ {"DBStats", Type, 5, ""},
+ {"DBStats.Idle", Field, 11, ""},
+ {"DBStats.InUse", Field, 11, ""},
+ {"DBStats.MaxIdleClosed", Field, 11, ""},
+ {"DBStats.MaxIdleTimeClosed", Field, 15, ""},
+ {"DBStats.MaxLifetimeClosed", Field, 11, ""},
+ {"DBStats.MaxOpenConnections", Field, 11, ""},
+ {"DBStats.OpenConnections", Field, 5, ""},
+ {"DBStats.WaitCount", Field, 11, ""},
+ {"DBStats.WaitDuration", Field, 11, ""},
+ {"Drivers", Func, 4, "func() []string"},
+ {"ErrConnDone", Var, 9, ""},
+ {"ErrNoRows", Var, 0, ""},
+ {"ErrTxDone", Var, 0, ""},
+ {"IsolationLevel", Type, 8, ""},
+ {"LevelDefault", Const, 8, ""},
+ {"LevelLinearizable", Const, 8, ""},
+ {"LevelReadCommitted", Const, 8, ""},
+ {"LevelReadUncommitted", Const, 8, ""},
+ {"LevelRepeatableRead", Const, 8, ""},
+ {"LevelSerializable", Const, 8, ""},
+ {"LevelSnapshot", Const, 8, ""},
+ {"LevelWriteCommitted", Const, 8, ""},
+ {"Named", Func, 8, "func(name string, value any) NamedArg"},
+ {"NamedArg", Type, 8, ""},
+ {"NamedArg.Name", Field, 8, ""},
+ {"NamedArg.Value", Field, 8, ""},
+ {"Null", Type, 22, ""},
+ {"Null.V", Field, 22, ""},
+ {"Null.Valid", Field, 22, ""},
+ {"NullBool", Type, 0, ""},
+ {"NullBool.Bool", Field, 0, ""},
+ {"NullBool.Valid", Field, 0, ""},
+ {"NullByte", Type, 17, ""},
+ {"NullByte.Byte", Field, 17, ""},
+ {"NullByte.Valid", Field, 17, ""},
+ {"NullFloat64", Type, 0, ""},
+ {"NullFloat64.Float64", Field, 0, ""},
+ {"NullFloat64.Valid", Field, 0, ""},
+ {"NullInt16", Type, 17, ""},
+ {"NullInt16.Int16", Field, 17, ""},
+ {"NullInt16.Valid", Field, 17, ""},
+ {"NullInt32", Type, 13, ""},
+ {"NullInt32.Int32", Field, 13, ""},
+ {"NullInt32.Valid", Field, 13, ""},
+ {"NullInt64", Type, 0, ""},
+ {"NullInt64.Int64", Field, 0, ""},
+ {"NullInt64.Valid", Field, 0, ""},
+ {"NullString", Type, 0, ""},
+ {"NullString.String", Field, 0, ""},
+ {"NullString.Valid", Field, 0, ""},
+ {"NullTime", Type, 13, ""},
+ {"NullTime.Time", Field, 13, ""},
+ {"NullTime.Valid", Field, 13, ""},
+ {"Open", Func, 0, "func(driverName string, dataSourceName string) (*DB, error)"},
+ {"OpenDB", Func, 10, "func(c driver.Connector) *DB"},
+ {"Out", Type, 9, ""},
+ {"Out.Dest", Field, 9, ""},
+ {"Out.In", Field, 9, ""},
+ {"RawBytes", Type, 0, ""},
+ {"Register", Func, 0, "func(name string, driver driver.Driver)"},
+ {"Result", Type, 0, ""},
+ {"Row", Type, 0, ""},
+ {"Rows", Type, 0, ""},
+ {"Scanner", Type, 0, ""},
+ {"Stmt", Type, 0, ""},
+ {"Tx", Type, 0, ""},
+ {"TxOptions", Type, 8, ""},
+ {"TxOptions.Isolation", Field, 8, ""},
+ {"TxOptions.ReadOnly", Field, 8, ""},
+ },
+ "database/sql/driver": {
+ {"(NotNull).ConvertValue", Method, 0, ""},
+ {"(Null).ConvertValue", Method, 0, ""},
+ {"(RowsAffected).LastInsertId", Method, 0, ""},
+ {"(RowsAffected).RowsAffected", Method, 0, ""},
+ {"Bool", Var, 0, ""},
+ {"ColumnConverter", Type, 0, ""},
+ {"Conn", Type, 0, ""},
+ {"ConnBeginTx", Type, 8, ""},
+ {"ConnPrepareContext", Type, 8, ""},
+ {"Connector", Type, 10, ""},
+ {"DefaultParameterConverter", Var, 0, ""},
+ {"Driver", Type, 0, ""},
+ {"DriverContext", Type, 10, ""},
+ {"ErrBadConn", Var, 0, ""},
+ {"ErrRemoveArgument", Var, 9, ""},
+ {"ErrSkip", Var, 0, ""},
+ {"Execer", Type, 0, ""},
+ {"ExecerContext", Type, 8, ""},
+ {"Int32", Var, 0, ""},
+ {"IsScanValue", Func, 0, "func(v any) bool"},
+ {"IsValue", Func, 0, "func(v any) bool"},
+ {"IsolationLevel", Type, 8, ""},
+ {"NamedValue", Type, 8, ""},
+ {"NamedValue.Name", Field, 8, ""},
+ {"NamedValue.Ordinal", Field, 8, ""},
+ {"NamedValue.Value", Field, 8, ""},
+ {"NamedValueChecker", Type, 9, ""},
+ {"NotNull", Type, 0, ""},
+ {"NotNull.Converter", Field, 0, ""},
+ {"Null", Type, 0, ""},
+ {"Null.Converter", Field, 0, ""},
+ {"Pinger", Type, 8, ""},
+ {"Queryer", Type, 1, ""},
+ {"QueryerContext", Type, 8, ""},
+ {"Result", Type, 0, ""},
+ {"ResultNoRows", Var, 0, ""},
+ {"Rows", Type, 0, ""},
+ {"RowsAffected", Type, 0, ""},
+ {"RowsColumnTypeDatabaseTypeName", Type, 8, ""},
+ {"RowsColumnTypeLength", Type, 8, ""},
+ {"RowsColumnTypeNullable", Type, 8, ""},
+ {"RowsColumnTypePrecisionScale", Type, 8, ""},
+ {"RowsColumnTypeScanType", Type, 8, ""},
+ {"RowsNextResultSet", Type, 8, ""},
+ {"SessionResetter", Type, 10, ""},
+ {"Stmt", Type, 0, ""},
+ {"StmtExecContext", Type, 8, ""},
+ {"StmtQueryContext", Type, 8, ""},
+ {"String", Var, 0, ""},
+ {"Tx", Type, 0, ""},
+ {"TxOptions", Type, 8, ""},
+ {"TxOptions.Isolation", Field, 8, ""},
+ {"TxOptions.ReadOnly", Field, 8, ""},
+ {"Validator", Type, 15, ""},
+ {"Value", Type, 0, ""},
+ {"ValueConverter", Type, 0, ""},
+ {"Valuer", Type, 0, ""},
+ },
+ "debug/buildinfo": {
+ {"BuildInfo", Type, 18, ""},
+ {"Read", Func, 18, "func(r io.ReaderAt) (*BuildInfo, error)"},
+ {"ReadFile", Func, 18, "func(name string) (info *BuildInfo, err error)"},
+ },
+ "debug/dwarf": {
+ {"(*AddrType).Basic", Method, 0, ""},
+ {"(*AddrType).Common", Method, 0, ""},
+ {"(*AddrType).Size", Method, 0, ""},
+ {"(*AddrType).String", Method, 0, ""},
+ {"(*ArrayType).Common", Method, 0, ""},
+ {"(*ArrayType).Size", Method, 0, ""},
+ {"(*ArrayType).String", Method, 0, ""},
+ {"(*BasicType).Basic", Method, 0, ""},
+ {"(*BasicType).Common", Method, 0, ""},
+ {"(*BasicType).Size", Method, 0, ""},
+ {"(*BasicType).String", Method, 0, ""},
+ {"(*BoolType).Basic", Method, 0, ""},
+ {"(*BoolType).Common", Method, 0, ""},
+ {"(*BoolType).Size", Method, 0, ""},
+ {"(*BoolType).String", Method, 0, ""},
+ {"(*CharType).Basic", Method, 0, ""},
+ {"(*CharType).Common", Method, 0, ""},
+ {"(*CharType).Size", Method, 0, ""},
+ {"(*CharType).String", Method, 0, ""},
+ {"(*CommonType).Common", Method, 0, ""},
+ {"(*CommonType).Size", Method, 0, ""},
+ {"(*ComplexType).Basic", Method, 0, ""},
+ {"(*ComplexType).Common", Method, 0, ""},
+ {"(*ComplexType).Size", Method, 0, ""},
+ {"(*ComplexType).String", Method, 0, ""},
+ {"(*Data).AddSection", Method, 14, ""},
+ {"(*Data).AddTypes", Method, 3, ""},
+ {"(*Data).LineReader", Method, 5, ""},
+ {"(*Data).Ranges", Method, 7, ""},
+ {"(*Data).Reader", Method, 0, ""},
+ {"(*Data).Type", Method, 0, ""},
+ {"(*DotDotDotType).Common", Method, 0, ""},
+ {"(*DotDotDotType).Size", Method, 0, ""},
+ {"(*DotDotDotType).String", Method, 0, ""},
+ {"(*Entry).AttrField", Method, 5, ""},
+ {"(*Entry).Val", Method, 0, ""},
+ {"(*EnumType).Common", Method, 0, ""},
+ {"(*EnumType).Size", Method, 0, ""},
+ {"(*EnumType).String", Method, 0, ""},
+ {"(*FloatType).Basic", Method, 0, ""},
+ {"(*FloatType).Common", Method, 0, ""},
+ {"(*FloatType).Size", Method, 0, ""},
+ {"(*FloatType).String", Method, 0, ""},
+ {"(*FuncType).Common", Method, 0, ""},
+ {"(*FuncType).Size", Method, 0, ""},
+ {"(*FuncType).String", Method, 0, ""},
+ {"(*IntType).Basic", Method, 0, ""},
+ {"(*IntType).Common", Method, 0, ""},
+ {"(*IntType).Size", Method, 0, ""},
+ {"(*IntType).String", Method, 0, ""},
+ {"(*LineReader).Files", Method, 14, ""},
+ {"(*LineReader).Next", Method, 5, ""},
+ {"(*LineReader).Reset", Method, 5, ""},
+ {"(*LineReader).Seek", Method, 5, ""},
+ {"(*LineReader).SeekPC", Method, 5, ""},
+ {"(*LineReader).Tell", Method, 5, ""},
+ {"(*PtrType).Common", Method, 0, ""},
+ {"(*PtrType).Size", Method, 0, ""},
+ {"(*PtrType).String", Method, 0, ""},
+ {"(*QualType).Common", Method, 0, ""},
+ {"(*QualType).Size", Method, 0, ""},
+ {"(*QualType).String", Method, 0, ""},
+ {"(*Reader).AddressSize", Method, 5, ""},
+ {"(*Reader).ByteOrder", Method, 14, ""},
+ {"(*Reader).Next", Method, 0, ""},
+ {"(*Reader).Seek", Method, 0, ""},
+ {"(*Reader).SeekPC", Method, 7, ""},
+ {"(*Reader).SkipChildren", Method, 0, ""},
+ {"(*StructType).Common", Method, 0, ""},
+ {"(*StructType).Defn", Method, 0, ""},
+ {"(*StructType).Size", Method, 0, ""},
+ {"(*StructType).String", Method, 0, ""},
+ {"(*TypedefType).Common", Method, 0, ""},
+ {"(*TypedefType).Size", Method, 0, ""},
+ {"(*TypedefType).String", Method, 0, ""},
+ {"(*UcharType).Basic", Method, 0, ""},
+ {"(*UcharType).Common", Method, 0, ""},
+ {"(*UcharType).Size", Method, 0, ""},
+ {"(*UcharType).String", Method, 0, ""},
+ {"(*UintType).Basic", Method, 0, ""},
+ {"(*UintType).Common", Method, 0, ""},
+ {"(*UintType).Size", Method, 0, ""},
+ {"(*UintType).String", Method, 0, ""},
+ {"(*UnspecifiedType).Basic", Method, 4, ""},
+ {"(*UnspecifiedType).Common", Method, 4, ""},
+ {"(*UnspecifiedType).Size", Method, 4, ""},
+ {"(*UnspecifiedType).String", Method, 4, ""},
+ {"(*UnsupportedType).Common", Method, 13, ""},
+ {"(*UnsupportedType).Size", Method, 13, ""},
+ {"(*UnsupportedType).String", Method, 13, ""},
+ {"(*VoidType).Common", Method, 0, ""},
+ {"(*VoidType).Size", Method, 0, ""},
+ {"(*VoidType).String", Method, 0, ""},
+ {"(Attr).GoString", Method, 0, ""},
+ {"(Attr).String", Method, 0, ""},
+ {"(Class).GoString", Method, 5, ""},
+ {"(Class).String", Method, 5, ""},
+ {"(DecodeError).Error", Method, 0, ""},
+ {"(Tag).GoString", Method, 0, ""},
+ {"(Tag).String", Method, 0, ""},
+ {"AddrType", Type, 0, ""},
+ {"AddrType.BasicType", Field, 0, ""},
+ {"ArrayType", Type, 0, ""},
+ {"ArrayType.CommonType", Field, 0, ""},
+ {"ArrayType.Count", Field, 0, ""},
+ {"ArrayType.StrideBitSize", Field, 0, ""},
+ {"ArrayType.Type", Field, 0, ""},
+ {"Attr", Type, 0, ""},
+ {"AttrAbstractOrigin", Const, 0, ""},
+ {"AttrAccessibility", Const, 0, ""},
+ {"AttrAddrBase", Const, 14, ""},
+ {"AttrAddrClass", Const, 0, ""},
+ {"AttrAlignment", Const, 14, ""},
+ {"AttrAllocated", Const, 0, ""},
+ {"AttrArtificial", Const, 0, ""},
+ {"AttrAssociated", Const, 0, ""},
+ {"AttrBaseTypes", Const, 0, ""},
+ {"AttrBinaryScale", Const, 14, ""},
+ {"AttrBitOffset", Const, 0, ""},
+ {"AttrBitSize", Const, 0, ""},
+ {"AttrByteSize", Const, 0, ""},
+ {"AttrCallAllCalls", Const, 14, ""},
+ {"AttrCallAllSourceCalls", Const, 14, ""},
+ {"AttrCallAllTailCalls", Const, 14, ""},
+ {"AttrCallColumn", Const, 0, ""},
+ {"AttrCallDataLocation", Const, 14, ""},
+ {"AttrCallDataValue", Const, 14, ""},
+ {"AttrCallFile", Const, 0, ""},
+ {"AttrCallLine", Const, 0, ""},
+ {"AttrCallOrigin", Const, 14, ""},
+ {"AttrCallPC", Const, 14, ""},
+ {"AttrCallParameter", Const, 14, ""},
+ {"AttrCallReturnPC", Const, 14, ""},
+ {"AttrCallTailCall", Const, 14, ""},
+ {"AttrCallTarget", Const, 14, ""},
+ {"AttrCallTargetClobbered", Const, 14, ""},
+ {"AttrCallValue", Const, 14, ""},
+ {"AttrCalling", Const, 0, ""},
+ {"AttrCommonRef", Const, 0, ""},
+ {"AttrCompDir", Const, 0, ""},
+ {"AttrConstExpr", Const, 14, ""},
+ {"AttrConstValue", Const, 0, ""},
+ {"AttrContainingType", Const, 0, ""},
+ {"AttrCount", Const, 0, ""},
+ {"AttrDataBitOffset", Const, 14, ""},
+ {"AttrDataLocation", Const, 0, ""},
+ {"AttrDataMemberLoc", Const, 0, ""},
+ {"AttrDecimalScale", Const, 14, ""},
+ {"AttrDecimalSign", Const, 14, ""},
+ {"AttrDeclColumn", Const, 0, ""},
+ {"AttrDeclFile", Const, 0, ""},
+ {"AttrDeclLine", Const, 0, ""},
+ {"AttrDeclaration", Const, 0, ""},
+ {"AttrDefaultValue", Const, 0, ""},
+ {"AttrDefaulted", Const, 14, ""},
+ {"AttrDeleted", Const, 14, ""},
+ {"AttrDescription", Const, 0, ""},
+ {"AttrDigitCount", Const, 14, ""},
+ {"AttrDiscr", Const, 0, ""},
+ {"AttrDiscrList", Const, 0, ""},
+ {"AttrDiscrValue", Const, 0, ""},
+ {"AttrDwoName", Const, 14, ""},
+ {"AttrElemental", Const, 14, ""},
+ {"AttrEncoding", Const, 0, ""},
+ {"AttrEndianity", Const, 14, ""},
+ {"AttrEntrypc", Const, 0, ""},
+ {"AttrEnumClass", Const, 14, ""},
+ {"AttrExplicit", Const, 14, ""},
+ {"AttrExportSymbols", Const, 14, ""},
+ {"AttrExtension", Const, 0, ""},
+ {"AttrExternal", Const, 0, ""},
+ {"AttrFrameBase", Const, 0, ""},
+ {"AttrFriend", Const, 0, ""},
+ {"AttrHighpc", Const, 0, ""},
+ {"AttrIdentifierCase", Const, 0, ""},
+ {"AttrImport", Const, 0, ""},
+ {"AttrInline", Const, 0, ""},
+ {"AttrIsOptional", Const, 0, ""},
+ {"AttrLanguage", Const, 0, ""},
+ {"AttrLinkageName", Const, 14, ""},
+ {"AttrLocation", Const, 0, ""},
+ {"AttrLoclistsBase", Const, 14, ""},
+ {"AttrLowerBound", Const, 0, ""},
+ {"AttrLowpc", Const, 0, ""},
+ {"AttrMacroInfo", Const, 0, ""},
+ {"AttrMacros", Const, 14, ""},
+ {"AttrMainSubprogram", Const, 14, ""},
+ {"AttrMutable", Const, 14, ""},
+ {"AttrName", Const, 0, ""},
+ {"AttrNamelistItem", Const, 0, ""},
+ {"AttrNoreturn", Const, 14, ""},
+ {"AttrObjectPointer", Const, 14, ""},
+ {"AttrOrdering", Const, 0, ""},
+ {"AttrPictureString", Const, 14, ""},
+ {"AttrPriority", Const, 0, ""},
+ {"AttrProducer", Const, 0, ""},
+ {"AttrPrototyped", Const, 0, ""},
+ {"AttrPure", Const, 14, ""},
+ {"AttrRanges", Const, 0, ""},
+ {"AttrRank", Const, 14, ""},
+ {"AttrRecursive", Const, 14, ""},
+ {"AttrReference", Const, 14, ""},
+ {"AttrReturnAddr", Const, 0, ""},
+ {"AttrRnglistsBase", Const, 14, ""},
+ {"AttrRvalueReference", Const, 14, ""},
+ {"AttrSegment", Const, 0, ""},
+ {"AttrSibling", Const, 0, ""},
+ {"AttrSignature", Const, 14, ""},
+ {"AttrSmall", Const, 14, ""},
+ {"AttrSpecification", Const, 0, ""},
+ {"AttrStartScope", Const, 0, ""},
+ {"AttrStaticLink", Const, 0, ""},
+ {"AttrStmtList", Const, 0, ""},
+ {"AttrStrOffsetsBase", Const, 14, ""},
+ {"AttrStride", Const, 0, ""},
+ {"AttrStrideSize", Const, 0, ""},
+ {"AttrStringLength", Const, 0, ""},
+ {"AttrStringLengthBitSize", Const, 14, ""},
+ {"AttrStringLengthByteSize", Const, 14, ""},
+ {"AttrThreadsScaled", Const, 14, ""},
+ {"AttrTrampoline", Const, 0, ""},
+ {"AttrType", Const, 0, ""},
+ {"AttrUpperBound", Const, 0, ""},
+ {"AttrUseLocation", Const, 0, ""},
+ {"AttrUseUTF8", Const, 0, ""},
+ {"AttrVarParam", Const, 0, ""},
+ {"AttrVirtuality", Const, 0, ""},
+ {"AttrVisibility", Const, 0, ""},
+ {"AttrVtableElemLoc", Const, 0, ""},
+ {"BasicType", Type, 0, ""},
+ {"BasicType.BitOffset", Field, 0, ""},
+ {"BasicType.BitSize", Field, 0, ""},
+ {"BasicType.CommonType", Field, 0, ""},
+ {"BasicType.DataBitOffset", Field, 18, ""},
+ {"BoolType", Type, 0, ""},
+ {"BoolType.BasicType", Field, 0, ""},
+ {"CharType", Type, 0, ""},
+ {"CharType.BasicType", Field, 0, ""},
+ {"Class", Type, 5, ""},
+ {"ClassAddrPtr", Const, 14, ""},
+ {"ClassAddress", Const, 5, ""},
+ {"ClassBlock", Const, 5, ""},
+ {"ClassConstant", Const, 5, ""},
+ {"ClassExprLoc", Const, 5, ""},
+ {"ClassFlag", Const, 5, ""},
+ {"ClassLinePtr", Const, 5, ""},
+ {"ClassLocList", Const, 14, ""},
+ {"ClassLocListPtr", Const, 5, ""},
+ {"ClassMacPtr", Const, 5, ""},
+ {"ClassRangeListPtr", Const, 5, ""},
+ {"ClassReference", Const, 5, ""},
+ {"ClassReferenceAlt", Const, 5, ""},
+ {"ClassReferenceSig", Const, 5, ""},
+ {"ClassRngList", Const, 14, ""},
+ {"ClassRngListsPtr", Const, 14, ""},
+ {"ClassStrOffsetsPtr", Const, 14, ""},
+ {"ClassString", Const, 5, ""},
+ {"ClassStringAlt", Const, 5, ""},
+ {"ClassUnknown", Const, 6, ""},
+ {"CommonType", Type, 0, ""},
+ {"CommonType.ByteSize", Field, 0, ""},
+ {"CommonType.Name", Field, 0, ""},
+ {"ComplexType", Type, 0, ""},
+ {"ComplexType.BasicType", Field, 0, ""},
+ {"Data", Type, 0, ""},
+ {"DecodeError", Type, 0, ""},
+ {"DecodeError.Err", Field, 0, ""},
+ {"DecodeError.Name", Field, 0, ""},
+ {"DecodeError.Offset", Field, 0, ""},
+ {"DotDotDotType", Type, 0, ""},
+ {"DotDotDotType.CommonType", Field, 0, ""},
+ {"Entry", Type, 0, ""},
+ {"Entry.Children", Field, 0, ""},
+ {"Entry.Field", Field, 0, ""},
+ {"Entry.Offset", Field, 0, ""},
+ {"Entry.Tag", Field, 0, ""},
+ {"EnumType", Type, 0, ""},
+ {"EnumType.CommonType", Field, 0, ""},
+ {"EnumType.EnumName", Field, 0, ""},
+ {"EnumType.Val", Field, 0, ""},
+ {"EnumValue", Type, 0, ""},
+ {"EnumValue.Name", Field, 0, ""},
+ {"EnumValue.Val", Field, 0, ""},
+ {"ErrUnknownPC", Var, 5, ""},
+ {"Field", Type, 0, ""},
+ {"Field.Attr", Field, 0, ""},
+ {"Field.Class", Field, 5, ""},
+ {"Field.Val", Field, 0, ""},
+ {"FloatType", Type, 0, ""},
+ {"FloatType.BasicType", Field, 0, ""},
+ {"FuncType", Type, 0, ""},
+ {"FuncType.CommonType", Field, 0, ""},
+ {"FuncType.ParamType", Field, 0, ""},
+ {"FuncType.ReturnType", Field, 0, ""},
+ {"IntType", Type, 0, ""},
+ {"IntType.BasicType", Field, 0, ""},
+ {"LineEntry", Type, 5, ""},
+ {"LineEntry.Address", Field, 5, ""},
+ {"LineEntry.BasicBlock", Field, 5, ""},
+ {"LineEntry.Column", Field, 5, ""},
+ {"LineEntry.Discriminator", Field, 5, ""},
+ {"LineEntry.EndSequence", Field, 5, ""},
+ {"LineEntry.EpilogueBegin", Field, 5, ""},
+ {"LineEntry.File", Field, 5, ""},
+ {"LineEntry.ISA", Field, 5, ""},
+ {"LineEntry.IsStmt", Field, 5, ""},
+ {"LineEntry.Line", Field, 5, ""},
+ {"LineEntry.OpIndex", Field, 5, ""},
+ {"LineEntry.PrologueEnd", Field, 5, ""},
+ {"LineFile", Type, 5, ""},
+ {"LineFile.Length", Field, 5, ""},
+ {"LineFile.Mtime", Field, 5, ""},
+ {"LineFile.Name", Field, 5, ""},
+ {"LineReader", Type, 5, ""},
+ {"LineReaderPos", Type, 5, ""},
+ {"New", Func, 0, "func(abbrev []byte, aranges []byte, frame []byte, info []byte, line []byte, pubnames []byte, ranges []byte, str []byte) (*Data, error)"},
+ {"Offset", Type, 0, ""},
+ {"PtrType", Type, 0, ""},
+ {"PtrType.CommonType", Field, 0, ""},
+ {"PtrType.Type", Field, 0, ""},
+ {"QualType", Type, 0, ""},
+ {"QualType.CommonType", Field, 0, ""},
+ {"QualType.Qual", Field, 0, ""},
+ {"QualType.Type", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"StructField", Type, 0, ""},
+ {"StructField.BitOffset", Field, 0, ""},
+ {"StructField.BitSize", Field, 0, ""},
+ {"StructField.ByteOffset", Field, 0, ""},
+ {"StructField.ByteSize", Field, 0, ""},
+ {"StructField.DataBitOffset", Field, 18, ""},
+ {"StructField.Name", Field, 0, ""},
+ {"StructField.Type", Field, 0, ""},
+ {"StructType", Type, 0, ""},
+ {"StructType.CommonType", Field, 0, ""},
+ {"StructType.Field", Field, 0, ""},
+ {"StructType.Incomplete", Field, 0, ""},
+ {"StructType.Kind", Field, 0, ""},
+ {"StructType.StructName", Field, 0, ""},
+ {"Tag", Type, 0, ""},
+ {"TagAccessDeclaration", Const, 0, ""},
+ {"TagArrayType", Const, 0, ""},
+ {"TagAtomicType", Const, 14, ""},
+ {"TagBaseType", Const, 0, ""},
+ {"TagCallSite", Const, 14, ""},
+ {"TagCallSiteParameter", Const, 14, ""},
+ {"TagCatchDwarfBlock", Const, 0, ""},
+ {"TagClassType", Const, 0, ""},
+ {"TagCoarrayType", Const, 14, ""},
+ {"TagCommonDwarfBlock", Const, 0, ""},
+ {"TagCommonInclusion", Const, 0, ""},
+ {"TagCompileUnit", Const, 0, ""},
+ {"TagCondition", Const, 3, ""},
+ {"TagConstType", Const, 0, ""},
+ {"TagConstant", Const, 0, ""},
+ {"TagDwarfProcedure", Const, 0, ""},
+ {"TagDynamicType", Const, 14, ""},
+ {"TagEntryPoint", Const, 0, ""},
+ {"TagEnumerationType", Const, 0, ""},
+ {"TagEnumerator", Const, 0, ""},
+ {"TagFileType", Const, 0, ""},
+ {"TagFormalParameter", Const, 0, ""},
+ {"TagFriend", Const, 0, ""},
+ {"TagGenericSubrange", Const, 14, ""},
+ {"TagImmutableType", Const, 14, ""},
+ {"TagImportedDeclaration", Const, 0, ""},
+ {"TagImportedModule", Const, 0, ""},
+ {"TagImportedUnit", Const, 0, ""},
+ {"TagInheritance", Const, 0, ""},
+ {"TagInlinedSubroutine", Const, 0, ""},
+ {"TagInterfaceType", Const, 0, ""},
+ {"TagLabel", Const, 0, ""},
+ {"TagLexDwarfBlock", Const, 0, ""},
+ {"TagMember", Const, 0, ""},
+ {"TagModule", Const, 0, ""},
+ {"TagMutableType", Const, 0, ""},
+ {"TagNamelist", Const, 0, ""},
+ {"TagNamelistItem", Const, 0, ""},
+ {"TagNamespace", Const, 0, ""},
+ {"TagPackedType", Const, 0, ""},
+ {"TagPartialUnit", Const, 0, ""},
+ {"TagPointerType", Const, 0, ""},
+ {"TagPtrToMemberType", Const, 0, ""},
+ {"TagReferenceType", Const, 0, ""},
+ {"TagRestrictType", Const, 0, ""},
+ {"TagRvalueReferenceType", Const, 3, ""},
+ {"TagSetType", Const, 0, ""},
+ {"TagSharedType", Const, 3, ""},
+ {"TagSkeletonUnit", Const, 14, ""},
+ {"TagStringType", Const, 0, ""},
+ {"TagStructType", Const, 0, ""},
+ {"TagSubprogram", Const, 0, ""},
+ {"TagSubrangeType", Const, 0, ""},
+ {"TagSubroutineType", Const, 0, ""},
+ {"TagTemplateAlias", Const, 3, ""},
+ {"TagTemplateTypeParameter", Const, 0, ""},
+ {"TagTemplateValueParameter", Const, 0, ""},
+ {"TagThrownType", Const, 0, ""},
+ {"TagTryDwarfBlock", Const, 0, ""},
+ {"TagTypeUnit", Const, 3, ""},
+ {"TagTypedef", Const, 0, ""},
+ {"TagUnionType", Const, 0, ""},
+ {"TagUnspecifiedParameters", Const, 0, ""},
+ {"TagUnspecifiedType", Const, 0, ""},
+ {"TagVariable", Const, 0, ""},
+ {"TagVariant", Const, 0, ""},
+ {"TagVariantPart", Const, 0, ""},
+ {"TagVolatileType", Const, 0, ""},
+ {"TagWithStmt", Const, 0, ""},
+ {"Type", Type, 0, ""},
+ {"TypedefType", Type, 0, ""},
+ {"TypedefType.CommonType", Field, 0, ""},
+ {"TypedefType.Type", Field, 0, ""},
+ {"UcharType", Type, 0, ""},
+ {"UcharType.BasicType", Field, 0, ""},
+ {"UintType", Type, 0, ""},
+ {"UintType.BasicType", Field, 0, ""},
+ {"UnspecifiedType", Type, 4, ""},
+ {"UnspecifiedType.BasicType", Field, 4, ""},
+ {"UnsupportedType", Type, 13, ""},
+ {"UnsupportedType.CommonType", Field, 13, ""},
+ {"UnsupportedType.Tag", Field, 13, ""},
+ {"VoidType", Type, 0, ""},
+ {"VoidType.CommonType", Field, 0, ""},
+ },
+ "debug/elf": {
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).DWARF", Method, 0, ""},
+ {"(*File).DynString", Method, 1, ""},
+ {"(*File).DynValue", Method, 21, ""},
+ {"(*File).DynamicSymbols", Method, 4, ""},
+ {"(*File).DynamicVersionNeeds", Method, 24, ""},
+ {"(*File).DynamicVersions", Method, 24, ""},
+ {"(*File).ImportedLibraries", Method, 0, ""},
+ {"(*File).ImportedSymbols", Method, 0, ""},
+ {"(*File).Section", Method, 0, ""},
+ {"(*File).SectionByType", Method, 0, ""},
+ {"(*File).Symbols", Method, 0, ""},
+ {"(*FormatError).Error", Method, 0, ""},
+ {"(*Prog).Open", Method, 0, ""},
+ {"(*Section).Data", Method, 0, ""},
+ {"(*Section).Open", Method, 0, ""},
+ {"(Class).GoString", Method, 0, ""},
+ {"(Class).String", Method, 0, ""},
+ {"(CompressionType).GoString", Method, 6, ""},
+ {"(CompressionType).String", Method, 6, ""},
+ {"(Data).GoString", Method, 0, ""},
+ {"(Data).String", Method, 0, ""},
+ {"(DynFlag).GoString", Method, 0, ""},
+ {"(DynFlag).String", Method, 0, ""},
+ {"(DynFlag1).GoString", Method, 21, ""},
+ {"(DynFlag1).String", Method, 21, ""},
+ {"(DynTag).GoString", Method, 0, ""},
+ {"(DynTag).String", Method, 0, ""},
+ {"(Machine).GoString", Method, 0, ""},
+ {"(Machine).String", Method, 0, ""},
+ {"(NType).GoString", Method, 0, ""},
+ {"(NType).String", Method, 0, ""},
+ {"(OSABI).GoString", Method, 0, ""},
+ {"(OSABI).String", Method, 0, ""},
+ {"(Prog).ReadAt", Method, 0, ""},
+ {"(ProgFlag).GoString", Method, 0, ""},
+ {"(ProgFlag).String", Method, 0, ""},
+ {"(ProgType).GoString", Method, 0, ""},
+ {"(ProgType).String", Method, 0, ""},
+ {"(R_386).GoString", Method, 0, ""},
+ {"(R_386).String", Method, 0, ""},
+ {"(R_390).GoString", Method, 7, ""},
+ {"(R_390).String", Method, 7, ""},
+ {"(R_AARCH64).GoString", Method, 4, ""},
+ {"(R_AARCH64).String", Method, 4, ""},
+ {"(R_ALPHA).GoString", Method, 0, ""},
+ {"(R_ALPHA).String", Method, 0, ""},
+ {"(R_ARM).GoString", Method, 0, ""},
+ {"(R_ARM).String", Method, 0, ""},
+ {"(R_LARCH).GoString", Method, 19, ""},
+ {"(R_LARCH).String", Method, 19, ""},
+ {"(R_MIPS).GoString", Method, 6, ""},
+ {"(R_MIPS).String", Method, 6, ""},
+ {"(R_PPC).GoString", Method, 0, ""},
+ {"(R_PPC).String", Method, 0, ""},
+ {"(R_PPC64).GoString", Method, 5, ""},
+ {"(R_PPC64).String", Method, 5, ""},
+ {"(R_RISCV).GoString", Method, 11, ""},
+ {"(R_RISCV).String", Method, 11, ""},
+ {"(R_SPARC).GoString", Method, 0, ""},
+ {"(R_SPARC).String", Method, 0, ""},
+ {"(R_X86_64).GoString", Method, 0, ""},
+ {"(R_X86_64).String", Method, 0, ""},
+ {"(Section).ReadAt", Method, 0, ""},
+ {"(SectionFlag).GoString", Method, 0, ""},
+ {"(SectionFlag).String", Method, 0, ""},
+ {"(SectionIndex).GoString", Method, 0, ""},
+ {"(SectionIndex).String", Method, 0, ""},
+ {"(SectionType).GoString", Method, 0, ""},
+ {"(SectionType).String", Method, 0, ""},
+ {"(SymBind).GoString", Method, 0, ""},
+ {"(SymBind).String", Method, 0, ""},
+ {"(SymType).GoString", Method, 0, ""},
+ {"(SymType).String", Method, 0, ""},
+ {"(SymVis).GoString", Method, 0, ""},
+ {"(SymVis).String", Method, 0, ""},
+ {"(Type).GoString", Method, 0, ""},
+ {"(Type).String", Method, 0, ""},
+ {"(Version).GoString", Method, 0, ""},
+ {"(Version).String", Method, 0, ""},
+ {"(VersionIndex).Index", Method, 24, ""},
+ {"(VersionIndex).IsHidden", Method, 24, ""},
+ {"ARM_MAGIC_TRAMP_NUMBER", Const, 0, ""},
+ {"COMPRESS_HIOS", Const, 6, ""},
+ {"COMPRESS_HIPROC", Const, 6, ""},
+ {"COMPRESS_LOOS", Const, 6, ""},
+ {"COMPRESS_LOPROC", Const, 6, ""},
+ {"COMPRESS_ZLIB", Const, 6, ""},
+ {"COMPRESS_ZSTD", Const, 21, ""},
+ {"Chdr32", Type, 6, ""},
+ {"Chdr32.Addralign", Field, 6, ""},
+ {"Chdr32.Size", Field, 6, ""},
+ {"Chdr32.Type", Field, 6, ""},
+ {"Chdr64", Type, 6, ""},
+ {"Chdr64.Addralign", Field, 6, ""},
+ {"Chdr64.Size", Field, 6, ""},
+ {"Chdr64.Type", Field, 6, ""},
+ {"Class", Type, 0, ""},
+ {"CompressionType", Type, 6, ""},
+ {"DF_1_CONFALT", Const, 21, ""},
+ {"DF_1_DIRECT", Const, 21, ""},
+ {"DF_1_DISPRELDNE", Const, 21, ""},
+ {"DF_1_DISPRELPND", Const, 21, ""},
+ {"DF_1_EDITED", Const, 21, ""},
+ {"DF_1_ENDFILTEE", Const, 21, ""},
+ {"DF_1_GLOBAL", Const, 21, ""},
+ {"DF_1_GLOBAUDIT", Const, 21, ""},
+ {"DF_1_GROUP", Const, 21, ""},
+ {"DF_1_IGNMULDEF", Const, 21, ""},
+ {"DF_1_INITFIRST", Const, 21, ""},
+ {"DF_1_INTERPOSE", Const, 21, ""},
+ {"DF_1_KMOD", Const, 21, ""},
+ {"DF_1_LOADFLTR", Const, 21, ""},
+ {"DF_1_NOCOMMON", Const, 21, ""},
+ {"DF_1_NODEFLIB", Const, 21, ""},
+ {"DF_1_NODELETE", Const, 21, ""},
+ {"DF_1_NODIRECT", Const, 21, ""},
+ {"DF_1_NODUMP", Const, 21, ""},
+ {"DF_1_NOHDR", Const, 21, ""},
+ {"DF_1_NOKSYMS", Const, 21, ""},
+ {"DF_1_NOOPEN", Const, 21, ""},
+ {"DF_1_NORELOC", Const, 21, ""},
+ {"DF_1_NOW", Const, 21, ""},
+ {"DF_1_ORIGIN", Const, 21, ""},
+ {"DF_1_PIE", Const, 21, ""},
+ {"DF_1_SINGLETON", Const, 21, ""},
+ {"DF_1_STUB", Const, 21, ""},
+ {"DF_1_SYMINTPOSE", Const, 21, ""},
+ {"DF_1_TRANS", Const, 21, ""},
+ {"DF_1_WEAKFILTER", Const, 21, ""},
+ {"DF_BIND_NOW", Const, 0, ""},
+ {"DF_ORIGIN", Const, 0, ""},
+ {"DF_STATIC_TLS", Const, 0, ""},
+ {"DF_SYMBOLIC", Const, 0, ""},
+ {"DF_TEXTREL", Const, 0, ""},
+ {"DT_ADDRRNGHI", Const, 16, ""},
+ {"DT_ADDRRNGLO", Const, 16, ""},
+ {"DT_AUDIT", Const, 16, ""},
+ {"DT_AUXILIARY", Const, 16, ""},
+ {"DT_BIND_NOW", Const, 0, ""},
+ {"DT_CHECKSUM", Const, 16, ""},
+ {"DT_CONFIG", Const, 16, ""},
+ {"DT_DEBUG", Const, 0, ""},
+ {"DT_DEPAUDIT", Const, 16, ""},
+ {"DT_ENCODING", Const, 0, ""},
+ {"DT_FEATURE", Const, 16, ""},
+ {"DT_FILTER", Const, 16, ""},
+ {"DT_FINI", Const, 0, ""},
+ {"DT_FINI_ARRAY", Const, 0, ""},
+ {"DT_FINI_ARRAYSZ", Const, 0, ""},
+ {"DT_FLAGS", Const, 0, ""},
+ {"DT_FLAGS_1", Const, 16, ""},
+ {"DT_GNU_CONFLICT", Const, 16, ""},
+ {"DT_GNU_CONFLICTSZ", Const, 16, ""},
+ {"DT_GNU_HASH", Const, 16, ""},
+ {"DT_GNU_LIBLIST", Const, 16, ""},
+ {"DT_GNU_LIBLISTSZ", Const, 16, ""},
+ {"DT_GNU_PRELINKED", Const, 16, ""},
+ {"DT_HASH", Const, 0, ""},
+ {"DT_HIOS", Const, 0, ""},
+ {"DT_HIPROC", Const, 0, ""},
+ {"DT_INIT", Const, 0, ""},
+ {"DT_INIT_ARRAY", Const, 0, ""},
+ {"DT_INIT_ARRAYSZ", Const, 0, ""},
+ {"DT_JMPREL", Const, 0, ""},
+ {"DT_LOOS", Const, 0, ""},
+ {"DT_LOPROC", Const, 0, ""},
+ {"DT_MIPS_AUX_DYNAMIC", Const, 16, ""},
+ {"DT_MIPS_BASE_ADDRESS", Const, 16, ""},
+ {"DT_MIPS_COMPACT_SIZE", Const, 16, ""},
+ {"DT_MIPS_CONFLICT", Const, 16, ""},
+ {"DT_MIPS_CONFLICTNO", Const, 16, ""},
+ {"DT_MIPS_CXX_FLAGS", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASS", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASSSYM", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_CLASS_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_INSTANCE", Const, 16, ""},
+ {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_RELOC", Const, 16, ""},
+ {"DT_MIPS_DELTA_RELOC_NO", Const, 16, ""},
+ {"DT_MIPS_DELTA_SYM", Const, 16, ""},
+ {"DT_MIPS_DELTA_SYM_NO", Const, 16, ""},
+ {"DT_MIPS_DYNSTR_ALIGN", Const, 16, ""},
+ {"DT_MIPS_FLAGS", Const, 16, ""},
+ {"DT_MIPS_GOTSYM", Const, 16, ""},
+ {"DT_MIPS_GP_VALUE", Const, 16, ""},
+ {"DT_MIPS_HIDDEN_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_HIPAGENO", Const, 16, ""},
+ {"DT_MIPS_ICHECKSUM", Const, 16, ""},
+ {"DT_MIPS_INTERFACE", Const, 16, ""},
+ {"DT_MIPS_INTERFACE_SIZE", Const, 16, ""},
+ {"DT_MIPS_IVERSION", Const, 16, ""},
+ {"DT_MIPS_LIBLIST", Const, 16, ""},
+ {"DT_MIPS_LIBLISTNO", Const, 16, ""},
+ {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_LOCAL_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_LOCAL_GOTNO", Const, 16, ""},
+ {"DT_MIPS_MSYM", Const, 16, ""},
+ {"DT_MIPS_OPTIONS", Const, 16, ""},
+ {"DT_MIPS_PERF_SUFFIX", Const, 16, ""},
+ {"DT_MIPS_PIXIE_INIT", Const, 16, ""},
+ {"DT_MIPS_PLTGOT", Const, 16, ""},
+ {"DT_MIPS_PROTECTED_GOTIDX", Const, 16, ""},
+ {"DT_MIPS_RLD_MAP", Const, 16, ""},
+ {"DT_MIPS_RLD_MAP_REL", Const, 16, ""},
+ {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16, ""},
+ {"DT_MIPS_RLD_VERSION", Const, 16, ""},
+ {"DT_MIPS_RWPLT", Const, 16, ""},
+ {"DT_MIPS_SYMBOL_LIB", Const, 16, ""},
+ {"DT_MIPS_SYMTABNO", Const, 16, ""},
+ {"DT_MIPS_TIME_STAMP", Const, 16, ""},
+ {"DT_MIPS_UNREFEXTNO", Const, 16, ""},
+ {"DT_MOVEENT", Const, 16, ""},
+ {"DT_MOVESZ", Const, 16, ""},
+ {"DT_MOVETAB", Const, 16, ""},
+ {"DT_NEEDED", Const, 0, ""},
+ {"DT_NULL", Const, 0, ""},
+ {"DT_PLTGOT", Const, 0, ""},
+ {"DT_PLTPAD", Const, 16, ""},
+ {"DT_PLTPADSZ", Const, 16, ""},
+ {"DT_PLTREL", Const, 0, ""},
+ {"DT_PLTRELSZ", Const, 0, ""},
+ {"DT_POSFLAG_1", Const, 16, ""},
+ {"DT_PPC64_GLINK", Const, 16, ""},
+ {"DT_PPC64_OPD", Const, 16, ""},
+ {"DT_PPC64_OPDSZ", Const, 16, ""},
+ {"DT_PPC64_OPT", Const, 16, ""},
+ {"DT_PPC_GOT", Const, 16, ""},
+ {"DT_PPC_OPT", Const, 16, ""},
+ {"DT_PREINIT_ARRAY", Const, 0, ""},
+ {"DT_PREINIT_ARRAYSZ", Const, 0, ""},
+ {"DT_REL", Const, 0, ""},
+ {"DT_RELA", Const, 0, ""},
+ {"DT_RELACOUNT", Const, 16, ""},
+ {"DT_RELAENT", Const, 0, ""},
+ {"DT_RELASZ", Const, 0, ""},
+ {"DT_RELCOUNT", Const, 16, ""},
+ {"DT_RELENT", Const, 0, ""},
+ {"DT_RELSZ", Const, 0, ""},
+ {"DT_RPATH", Const, 0, ""},
+ {"DT_RUNPATH", Const, 0, ""},
+ {"DT_SONAME", Const, 0, ""},
+ {"DT_SPARC_REGISTER", Const, 16, ""},
+ {"DT_STRSZ", Const, 0, ""},
+ {"DT_STRTAB", Const, 0, ""},
+ {"DT_SYMBOLIC", Const, 0, ""},
+ {"DT_SYMENT", Const, 0, ""},
+ {"DT_SYMINENT", Const, 16, ""},
+ {"DT_SYMINFO", Const, 16, ""},
+ {"DT_SYMINSZ", Const, 16, ""},
+ {"DT_SYMTAB", Const, 0, ""},
+ {"DT_SYMTAB_SHNDX", Const, 16, ""},
+ {"DT_TEXTREL", Const, 0, ""},
+ {"DT_TLSDESC_GOT", Const, 16, ""},
+ {"DT_TLSDESC_PLT", Const, 16, ""},
+ {"DT_USED", Const, 16, ""},
+ {"DT_VALRNGHI", Const, 16, ""},
+ {"DT_VALRNGLO", Const, 16, ""},
+ {"DT_VERDEF", Const, 16, ""},
+ {"DT_VERDEFNUM", Const, 16, ""},
+ {"DT_VERNEED", Const, 0, ""},
+ {"DT_VERNEEDNUM", Const, 0, ""},
+ {"DT_VERSYM", Const, 0, ""},
+ {"Data", Type, 0, ""},
+ {"Dyn32", Type, 0, ""},
+ {"Dyn32.Tag", Field, 0, ""},
+ {"Dyn32.Val", Field, 0, ""},
+ {"Dyn64", Type, 0, ""},
+ {"Dyn64.Tag", Field, 0, ""},
+ {"Dyn64.Val", Field, 0, ""},
+ {"DynFlag", Type, 0, ""},
+ {"DynFlag1", Type, 21, ""},
+ {"DynTag", Type, 0, ""},
+ {"DynamicVersion", Type, 24, ""},
+ {"DynamicVersion.Deps", Field, 24, ""},
+ {"DynamicVersion.Flags", Field, 24, ""},
+ {"DynamicVersion.Index", Field, 24, ""},
+ {"DynamicVersion.Name", Field, 24, ""},
+ {"DynamicVersionDep", Type, 24, ""},
+ {"DynamicVersionDep.Dep", Field, 24, ""},
+ {"DynamicVersionDep.Flags", Field, 24, ""},
+ {"DynamicVersionDep.Index", Field, 24, ""},
+ {"DynamicVersionFlag", Type, 24, ""},
+ {"DynamicVersionNeed", Type, 24, ""},
+ {"DynamicVersionNeed.Name", Field, 24, ""},
+ {"DynamicVersionNeed.Needs", Field, 24, ""},
+ {"EI_ABIVERSION", Const, 0, ""},
+ {"EI_CLASS", Const, 0, ""},
+ {"EI_DATA", Const, 0, ""},
+ {"EI_NIDENT", Const, 0, ""},
+ {"EI_OSABI", Const, 0, ""},
+ {"EI_PAD", Const, 0, ""},
+ {"EI_VERSION", Const, 0, ""},
+ {"ELFCLASS32", Const, 0, ""},
+ {"ELFCLASS64", Const, 0, ""},
+ {"ELFCLASSNONE", Const, 0, ""},
+ {"ELFDATA2LSB", Const, 0, ""},
+ {"ELFDATA2MSB", Const, 0, ""},
+ {"ELFDATANONE", Const, 0, ""},
+ {"ELFMAG", Const, 0, ""},
+ {"ELFOSABI_86OPEN", Const, 0, ""},
+ {"ELFOSABI_AIX", Const, 0, ""},
+ {"ELFOSABI_ARM", Const, 0, ""},
+ {"ELFOSABI_AROS", Const, 11, ""},
+ {"ELFOSABI_CLOUDABI", Const, 11, ""},
+ {"ELFOSABI_FENIXOS", Const, 11, ""},
+ {"ELFOSABI_FREEBSD", Const, 0, ""},
+ {"ELFOSABI_HPUX", Const, 0, ""},
+ {"ELFOSABI_HURD", Const, 0, ""},
+ {"ELFOSABI_IRIX", Const, 0, ""},
+ {"ELFOSABI_LINUX", Const, 0, ""},
+ {"ELFOSABI_MODESTO", Const, 0, ""},
+ {"ELFOSABI_NETBSD", Const, 0, ""},
+ {"ELFOSABI_NONE", Const, 0, ""},
+ {"ELFOSABI_NSK", Const, 0, ""},
+ {"ELFOSABI_OPENBSD", Const, 0, ""},
+ {"ELFOSABI_OPENVMS", Const, 0, ""},
+ {"ELFOSABI_SOLARIS", Const, 0, ""},
+ {"ELFOSABI_STANDALONE", Const, 0, ""},
+ {"ELFOSABI_TRU64", Const, 0, ""},
+ {"EM_386", Const, 0, ""},
+ {"EM_486", Const, 0, ""},
+ {"EM_56800EX", Const, 11, ""},
+ {"EM_68HC05", Const, 11, ""},
+ {"EM_68HC08", Const, 11, ""},
+ {"EM_68HC11", Const, 11, ""},
+ {"EM_68HC12", Const, 0, ""},
+ {"EM_68HC16", Const, 11, ""},
+ {"EM_68K", Const, 0, ""},
+ {"EM_78KOR", Const, 11, ""},
+ {"EM_8051", Const, 11, ""},
+ {"EM_860", Const, 0, ""},
+ {"EM_88K", Const, 0, ""},
+ {"EM_960", Const, 0, ""},
+ {"EM_AARCH64", Const, 4, ""},
+ {"EM_ALPHA", Const, 0, ""},
+ {"EM_ALPHA_STD", Const, 0, ""},
+ {"EM_ALTERA_NIOS2", Const, 11, ""},
+ {"EM_AMDGPU", Const, 11, ""},
+ {"EM_ARC", Const, 0, ""},
+ {"EM_ARCA", Const, 11, ""},
+ {"EM_ARC_COMPACT", Const, 11, ""},
+ {"EM_ARC_COMPACT2", Const, 11, ""},
+ {"EM_ARM", Const, 0, ""},
+ {"EM_AVR", Const, 11, ""},
+ {"EM_AVR32", Const, 11, ""},
+ {"EM_BA1", Const, 11, ""},
+ {"EM_BA2", Const, 11, ""},
+ {"EM_BLACKFIN", Const, 11, ""},
+ {"EM_BPF", Const, 11, ""},
+ {"EM_C166", Const, 11, ""},
+ {"EM_CDP", Const, 11, ""},
+ {"EM_CE", Const, 11, ""},
+ {"EM_CLOUDSHIELD", Const, 11, ""},
+ {"EM_COGE", Const, 11, ""},
+ {"EM_COLDFIRE", Const, 0, ""},
+ {"EM_COOL", Const, 11, ""},
+ {"EM_COREA_1ST", Const, 11, ""},
+ {"EM_COREA_2ND", Const, 11, ""},
+ {"EM_CR", Const, 11, ""},
+ {"EM_CR16", Const, 11, ""},
+ {"EM_CRAYNV2", Const, 11, ""},
+ {"EM_CRIS", Const, 11, ""},
+ {"EM_CRX", Const, 11, ""},
+ {"EM_CSR_KALIMBA", Const, 11, ""},
+ {"EM_CUDA", Const, 11, ""},
+ {"EM_CYPRESS_M8C", Const, 11, ""},
+ {"EM_D10V", Const, 11, ""},
+ {"EM_D30V", Const, 11, ""},
+ {"EM_DSP24", Const, 11, ""},
+ {"EM_DSPIC30F", Const, 11, ""},
+ {"EM_DXP", Const, 11, ""},
+ {"EM_ECOG1", Const, 11, ""},
+ {"EM_ECOG16", Const, 11, ""},
+ {"EM_ECOG1X", Const, 11, ""},
+ {"EM_ECOG2", Const, 11, ""},
+ {"EM_ETPU", Const, 11, ""},
+ {"EM_EXCESS", Const, 11, ""},
+ {"EM_F2MC16", Const, 11, ""},
+ {"EM_FIREPATH", Const, 11, ""},
+ {"EM_FR20", Const, 0, ""},
+ {"EM_FR30", Const, 11, ""},
+ {"EM_FT32", Const, 11, ""},
+ {"EM_FX66", Const, 11, ""},
+ {"EM_H8S", Const, 0, ""},
+ {"EM_H8_300", Const, 0, ""},
+ {"EM_H8_300H", Const, 0, ""},
+ {"EM_H8_500", Const, 0, ""},
+ {"EM_HUANY", Const, 11, ""},
+ {"EM_IA_64", Const, 0, ""},
+ {"EM_INTEL205", Const, 11, ""},
+ {"EM_INTEL206", Const, 11, ""},
+ {"EM_INTEL207", Const, 11, ""},
+ {"EM_INTEL208", Const, 11, ""},
+ {"EM_INTEL209", Const, 11, ""},
+ {"EM_IP2K", Const, 11, ""},
+ {"EM_JAVELIN", Const, 11, ""},
+ {"EM_K10M", Const, 11, ""},
+ {"EM_KM32", Const, 11, ""},
+ {"EM_KMX16", Const, 11, ""},
+ {"EM_KMX32", Const, 11, ""},
+ {"EM_KMX8", Const, 11, ""},
+ {"EM_KVARC", Const, 11, ""},
+ {"EM_L10M", Const, 11, ""},
+ {"EM_LANAI", Const, 11, ""},
+ {"EM_LATTICEMICO32", Const, 11, ""},
+ {"EM_LOONGARCH", Const, 19, ""},
+ {"EM_M16C", Const, 11, ""},
+ {"EM_M32", Const, 0, ""},
+ {"EM_M32C", Const, 11, ""},
+ {"EM_M32R", Const, 11, ""},
+ {"EM_MANIK", Const, 11, ""},
+ {"EM_MAX", Const, 11, ""},
+ {"EM_MAXQ30", Const, 11, ""},
+ {"EM_MCHP_PIC", Const, 11, ""},
+ {"EM_MCST_ELBRUS", Const, 11, ""},
+ {"EM_ME16", Const, 0, ""},
+ {"EM_METAG", Const, 11, ""},
+ {"EM_MICROBLAZE", Const, 11, ""},
+ {"EM_MIPS", Const, 0, ""},
+ {"EM_MIPS_RS3_LE", Const, 0, ""},
+ {"EM_MIPS_RS4_BE", Const, 0, ""},
+ {"EM_MIPS_X", Const, 0, ""},
+ {"EM_MMA", Const, 0, ""},
+ {"EM_MMDSP_PLUS", Const, 11, ""},
+ {"EM_MMIX", Const, 11, ""},
+ {"EM_MN10200", Const, 11, ""},
+ {"EM_MN10300", Const, 11, ""},
+ {"EM_MOXIE", Const, 11, ""},
+ {"EM_MSP430", Const, 11, ""},
+ {"EM_NCPU", Const, 0, ""},
+ {"EM_NDR1", Const, 0, ""},
+ {"EM_NDS32", Const, 11, ""},
+ {"EM_NONE", Const, 0, ""},
+ {"EM_NORC", Const, 11, ""},
+ {"EM_NS32K", Const, 11, ""},
+ {"EM_OPEN8", Const, 11, ""},
+ {"EM_OPENRISC", Const, 11, ""},
+ {"EM_PARISC", Const, 0, ""},
+ {"EM_PCP", Const, 0, ""},
+ {"EM_PDP10", Const, 11, ""},
+ {"EM_PDP11", Const, 11, ""},
+ {"EM_PDSP", Const, 11, ""},
+ {"EM_PJ", Const, 11, ""},
+ {"EM_PPC", Const, 0, ""},
+ {"EM_PPC64", Const, 0, ""},
+ {"EM_PRISM", Const, 11, ""},
+ {"EM_QDSP6", Const, 11, ""},
+ {"EM_R32C", Const, 11, ""},
+ {"EM_RCE", Const, 0, ""},
+ {"EM_RH32", Const, 0, ""},
+ {"EM_RISCV", Const, 11, ""},
+ {"EM_RL78", Const, 11, ""},
+ {"EM_RS08", Const, 11, ""},
+ {"EM_RX", Const, 11, ""},
+ {"EM_S370", Const, 0, ""},
+ {"EM_S390", Const, 0, ""},
+ {"EM_SCORE7", Const, 11, ""},
+ {"EM_SEP", Const, 11, ""},
+ {"EM_SE_C17", Const, 11, ""},
+ {"EM_SE_C33", Const, 11, ""},
+ {"EM_SH", Const, 0, ""},
+ {"EM_SHARC", Const, 11, ""},
+ {"EM_SLE9X", Const, 11, ""},
+ {"EM_SNP1K", Const, 11, ""},
+ {"EM_SPARC", Const, 0, ""},
+ {"EM_SPARC32PLUS", Const, 0, ""},
+ {"EM_SPARCV9", Const, 0, ""},
+ {"EM_ST100", Const, 0, ""},
+ {"EM_ST19", Const, 11, ""},
+ {"EM_ST200", Const, 11, ""},
+ {"EM_ST7", Const, 11, ""},
+ {"EM_ST9PLUS", Const, 11, ""},
+ {"EM_STARCORE", Const, 0, ""},
+ {"EM_STM8", Const, 11, ""},
+ {"EM_STXP7X", Const, 11, ""},
+ {"EM_SVX", Const, 11, ""},
+ {"EM_TILE64", Const, 11, ""},
+ {"EM_TILEGX", Const, 11, ""},
+ {"EM_TILEPRO", Const, 11, ""},
+ {"EM_TINYJ", Const, 0, ""},
+ {"EM_TI_ARP32", Const, 11, ""},
+ {"EM_TI_C2000", Const, 11, ""},
+ {"EM_TI_C5500", Const, 11, ""},
+ {"EM_TI_C6000", Const, 11, ""},
+ {"EM_TI_PRU", Const, 11, ""},
+ {"EM_TMM_GPP", Const, 11, ""},
+ {"EM_TPC", Const, 11, ""},
+ {"EM_TRICORE", Const, 0, ""},
+ {"EM_TRIMEDIA", Const, 11, ""},
+ {"EM_TSK3000", Const, 11, ""},
+ {"EM_UNICORE", Const, 11, ""},
+ {"EM_V800", Const, 0, ""},
+ {"EM_V850", Const, 11, ""},
+ {"EM_VAX", Const, 11, ""},
+ {"EM_VIDEOCORE", Const, 11, ""},
+ {"EM_VIDEOCORE3", Const, 11, ""},
+ {"EM_VIDEOCORE5", Const, 11, ""},
+ {"EM_VISIUM", Const, 11, ""},
+ {"EM_VPP500", Const, 0, ""},
+ {"EM_X86_64", Const, 0, ""},
+ {"EM_XCORE", Const, 11, ""},
+ {"EM_XGATE", Const, 11, ""},
+ {"EM_XIMO16", Const, 11, ""},
+ {"EM_XTENSA", Const, 11, ""},
+ {"EM_Z80", Const, 11, ""},
+ {"EM_ZSP", Const, 11, ""},
+ {"ET_CORE", Const, 0, ""},
+ {"ET_DYN", Const, 0, ""},
+ {"ET_EXEC", Const, 0, ""},
+ {"ET_HIOS", Const, 0, ""},
+ {"ET_HIPROC", Const, 0, ""},
+ {"ET_LOOS", Const, 0, ""},
+ {"ET_LOPROC", Const, 0, ""},
+ {"ET_NONE", Const, 0, ""},
+ {"ET_REL", Const, 0, ""},
+ {"EV_CURRENT", Const, 0, ""},
+ {"EV_NONE", Const, 0, ""},
+ {"ErrNoSymbols", Var, 4, ""},
+ {"File", Type, 0, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"File.Progs", Field, 0, ""},
+ {"File.Sections", Field, 0, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.ABIVersion", Field, 0, ""},
+ {"FileHeader.ByteOrder", Field, 0, ""},
+ {"FileHeader.Class", Field, 0, ""},
+ {"FileHeader.Data", Field, 0, ""},
+ {"FileHeader.Entry", Field, 1, ""},
+ {"FileHeader.Machine", Field, 0, ""},
+ {"FileHeader.OSABI", Field, 0, ""},
+ {"FileHeader.Type", Field, 0, ""},
+ {"FileHeader.Version", Field, 0, ""},
+ {"FormatError", Type, 0, ""},
+ {"Header32", Type, 0, ""},
+ {"Header32.Ehsize", Field, 0, ""},
+ {"Header32.Entry", Field, 0, ""},
+ {"Header32.Flags", Field, 0, ""},
+ {"Header32.Ident", Field, 0, ""},
+ {"Header32.Machine", Field, 0, ""},
+ {"Header32.Phentsize", Field, 0, ""},
+ {"Header32.Phnum", Field, 0, ""},
+ {"Header32.Phoff", Field, 0, ""},
+ {"Header32.Shentsize", Field, 0, ""},
+ {"Header32.Shnum", Field, 0, ""},
+ {"Header32.Shoff", Field, 0, ""},
+ {"Header32.Shstrndx", Field, 0, ""},
+ {"Header32.Type", Field, 0, ""},
+ {"Header32.Version", Field, 0, ""},
+ {"Header64", Type, 0, ""},
+ {"Header64.Ehsize", Field, 0, ""},
+ {"Header64.Entry", Field, 0, ""},
+ {"Header64.Flags", Field, 0, ""},
+ {"Header64.Ident", Field, 0, ""},
+ {"Header64.Machine", Field, 0, ""},
+ {"Header64.Phentsize", Field, 0, ""},
+ {"Header64.Phnum", Field, 0, ""},
+ {"Header64.Phoff", Field, 0, ""},
+ {"Header64.Shentsize", Field, 0, ""},
+ {"Header64.Shnum", Field, 0, ""},
+ {"Header64.Shoff", Field, 0, ""},
+ {"Header64.Shstrndx", Field, 0, ""},
+ {"Header64.Type", Field, 0, ""},
+ {"Header64.Version", Field, 0, ""},
+ {"ImportedSymbol", Type, 0, ""},
+ {"ImportedSymbol.Library", Field, 0, ""},
+ {"ImportedSymbol.Name", Field, 0, ""},
+ {"ImportedSymbol.Version", Field, 0, ""},
+ {"Machine", Type, 0, ""},
+ {"NT_FPREGSET", Const, 0, ""},
+ {"NT_PRPSINFO", Const, 0, ""},
+ {"NT_PRSTATUS", Const, 0, ""},
+ {"NType", Type, 0, ""},
+ {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+ {"OSABI", Type, 0, ""},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"PF_MASKOS", Const, 0, ""},
+ {"PF_MASKPROC", Const, 0, ""},
+ {"PF_R", Const, 0, ""},
+ {"PF_W", Const, 0, ""},
+ {"PF_X", Const, 0, ""},
+ {"PT_AARCH64_ARCHEXT", Const, 16, ""},
+ {"PT_AARCH64_UNWIND", Const, 16, ""},
+ {"PT_ARM_ARCHEXT", Const, 16, ""},
+ {"PT_ARM_EXIDX", Const, 16, ""},
+ {"PT_DYNAMIC", Const, 0, ""},
+ {"PT_GNU_EH_FRAME", Const, 16, ""},
+ {"PT_GNU_MBIND_HI", Const, 16, ""},
+ {"PT_GNU_MBIND_LO", Const, 16, ""},
+ {"PT_GNU_PROPERTY", Const, 16, ""},
+ {"PT_GNU_RELRO", Const, 16, ""},
+ {"PT_GNU_STACK", Const, 16, ""},
+ {"PT_HIOS", Const, 0, ""},
+ {"PT_HIPROC", Const, 0, ""},
+ {"PT_INTERP", Const, 0, ""},
+ {"PT_LOAD", Const, 0, ""},
+ {"PT_LOOS", Const, 0, ""},
+ {"PT_LOPROC", Const, 0, ""},
+ {"PT_MIPS_ABIFLAGS", Const, 16, ""},
+ {"PT_MIPS_OPTIONS", Const, 16, ""},
+ {"PT_MIPS_REGINFO", Const, 16, ""},
+ {"PT_MIPS_RTPROC", Const, 16, ""},
+ {"PT_NOTE", Const, 0, ""},
+ {"PT_NULL", Const, 0, ""},
+ {"PT_OPENBSD_BOOTDATA", Const, 16, ""},
+ {"PT_OPENBSD_NOBTCFI", Const, 23, ""},
+ {"PT_OPENBSD_RANDOMIZE", Const, 16, ""},
+ {"PT_OPENBSD_WXNEEDED", Const, 16, ""},
+ {"PT_PAX_FLAGS", Const, 16, ""},
+ {"PT_PHDR", Const, 0, ""},
+ {"PT_RISCV_ATTRIBUTES", Const, 25, ""},
+ {"PT_S390_PGSTE", Const, 16, ""},
+ {"PT_SHLIB", Const, 0, ""},
+ {"PT_SUNWSTACK", Const, 16, ""},
+ {"PT_SUNW_EH_FRAME", Const, 16, ""},
+ {"PT_TLS", Const, 0, ""},
+ {"Prog", Type, 0, ""},
+ {"Prog.ProgHeader", Field, 0, ""},
+ {"Prog.ReaderAt", Field, 0, ""},
+ {"Prog32", Type, 0, ""},
+ {"Prog32.Align", Field, 0, ""},
+ {"Prog32.Filesz", Field, 0, ""},
+ {"Prog32.Flags", Field, 0, ""},
+ {"Prog32.Memsz", Field, 0, ""},
+ {"Prog32.Off", Field, 0, ""},
+ {"Prog32.Paddr", Field, 0, ""},
+ {"Prog32.Type", Field, 0, ""},
+ {"Prog32.Vaddr", Field, 0, ""},
+ {"Prog64", Type, 0, ""},
+ {"Prog64.Align", Field, 0, ""},
+ {"Prog64.Filesz", Field, 0, ""},
+ {"Prog64.Flags", Field, 0, ""},
+ {"Prog64.Memsz", Field, 0, ""},
+ {"Prog64.Off", Field, 0, ""},
+ {"Prog64.Paddr", Field, 0, ""},
+ {"Prog64.Type", Field, 0, ""},
+ {"Prog64.Vaddr", Field, 0, ""},
+ {"ProgFlag", Type, 0, ""},
+ {"ProgHeader", Type, 0, ""},
+ {"ProgHeader.Align", Field, 0, ""},
+ {"ProgHeader.Filesz", Field, 0, ""},
+ {"ProgHeader.Flags", Field, 0, ""},
+ {"ProgHeader.Memsz", Field, 0, ""},
+ {"ProgHeader.Off", Field, 0, ""},
+ {"ProgHeader.Paddr", Field, 0, ""},
+ {"ProgHeader.Type", Field, 0, ""},
+ {"ProgHeader.Vaddr", Field, 0, ""},
+ {"ProgType", Type, 0, ""},
+ {"R_386", Type, 0, ""},
+ {"R_386_16", Const, 10, ""},
+ {"R_386_32", Const, 0, ""},
+ {"R_386_32PLT", Const, 10, ""},
+ {"R_386_8", Const, 10, ""},
+ {"R_386_COPY", Const, 0, ""},
+ {"R_386_GLOB_DAT", Const, 0, ""},
+ {"R_386_GOT32", Const, 0, ""},
+ {"R_386_GOT32X", Const, 10, ""},
+ {"R_386_GOTOFF", Const, 0, ""},
+ {"R_386_GOTPC", Const, 0, ""},
+ {"R_386_IRELATIVE", Const, 10, ""},
+ {"R_386_JMP_SLOT", Const, 0, ""},
+ {"R_386_NONE", Const, 0, ""},
+ {"R_386_PC16", Const, 10, ""},
+ {"R_386_PC32", Const, 0, ""},
+ {"R_386_PC8", Const, 10, ""},
+ {"R_386_PLT32", Const, 0, ""},
+ {"R_386_RELATIVE", Const, 0, ""},
+ {"R_386_SIZE32", Const, 10, ""},
+ {"R_386_TLS_DESC", Const, 10, ""},
+ {"R_386_TLS_DESC_CALL", Const, 10, ""},
+ {"R_386_TLS_DTPMOD32", Const, 0, ""},
+ {"R_386_TLS_DTPOFF32", Const, 0, ""},
+ {"R_386_TLS_GD", Const, 0, ""},
+ {"R_386_TLS_GD_32", Const, 0, ""},
+ {"R_386_TLS_GD_CALL", Const, 0, ""},
+ {"R_386_TLS_GD_POP", Const, 0, ""},
+ {"R_386_TLS_GD_PUSH", Const, 0, ""},
+ {"R_386_TLS_GOTDESC", Const, 10, ""},
+ {"R_386_TLS_GOTIE", Const, 0, ""},
+ {"R_386_TLS_IE", Const, 0, ""},
+ {"R_386_TLS_IE_32", Const, 0, ""},
+ {"R_386_TLS_LDM", Const, 0, ""},
+ {"R_386_TLS_LDM_32", Const, 0, ""},
+ {"R_386_TLS_LDM_CALL", Const, 0, ""},
+ {"R_386_TLS_LDM_POP", Const, 0, ""},
+ {"R_386_TLS_LDM_PUSH", Const, 0, ""},
+ {"R_386_TLS_LDO_32", Const, 0, ""},
+ {"R_386_TLS_LE", Const, 0, ""},
+ {"R_386_TLS_LE_32", Const, 0, ""},
+ {"R_386_TLS_TPOFF", Const, 0, ""},
+ {"R_386_TLS_TPOFF32", Const, 0, ""},
+ {"R_390", Type, 7, ""},
+ {"R_390_12", Const, 7, ""},
+ {"R_390_16", Const, 7, ""},
+ {"R_390_20", Const, 7, ""},
+ {"R_390_32", Const, 7, ""},
+ {"R_390_64", Const, 7, ""},
+ {"R_390_8", Const, 7, ""},
+ {"R_390_COPY", Const, 7, ""},
+ {"R_390_GLOB_DAT", Const, 7, ""},
+ {"R_390_GOT12", Const, 7, ""},
+ {"R_390_GOT16", Const, 7, ""},
+ {"R_390_GOT20", Const, 7, ""},
+ {"R_390_GOT32", Const, 7, ""},
+ {"R_390_GOT64", Const, 7, ""},
+ {"R_390_GOTENT", Const, 7, ""},
+ {"R_390_GOTOFF", Const, 7, ""},
+ {"R_390_GOTOFF16", Const, 7, ""},
+ {"R_390_GOTOFF64", Const, 7, ""},
+ {"R_390_GOTPC", Const, 7, ""},
+ {"R_390_GOTPCDBL", Const, 7, ""},
+ {"R_390_GOTPLT12", Const, 7, ""},
+ {"R_390_GOTPLT16", Const, 7, ""},
+ {"R_390_GOTPLT20", Const, 7, ""},
+ {"R_390_GOTPLT32", Const, 7, ""},
+ {"R_390_GOTPLT64", Const, 7, ""},
+ {"R_390_GOTPLTENT", Const, 7, ""},
+ {"R_390_GOTPLTOFF16", Const, 7, ""},
+ {"R_390_GOTPLTOFF32", Const, 7, ""},
+ {"R_390_GOTPLTOFF64", Const, 7, ""},
+ {"R_390_JMP_SLOT", Const, 7, ""},
+ {"R_390_NONE", Const, 7, ""},
+ {"R_390_PC16", Const, 7, ""},
+ {"R_390_PC16DBL", Const, 7, ""},
+ {"R_390_PC32", Const, 7, ""},
+ {"R_390_PC32DBL", Const, 7, ""},
+ {"R_390_PC64", Const, 7, ""},
+ {"R_390_PLT16DBL", Const, 7, ""},
+ {"R_390_PLT32", Const, 7, ""},
+ {"R_390_PLT32DBL", Const, 7, ""},
+ {"R_390_PLT64", Const, 7, ""},
+ {"R_390_RELATIVE", Const, 7, ""},
+ {"R_390_TLS_DTPMOD", Const, 7, ""},
+ {"R_390_TLS_DTPOFF", Const, 7, ""},
+ {"R_390_TLS_GD32", Const, 7, ""},
+ {"R_390_TLS_GD64", Const, 7, ""},
+ {"R_390_TLS_GDCALL", Const, 7, ""},
+ {"R_390_TLS_GOTIE12", Const, 7, ""},
+ {"R_390_TLS_GOTIE20", Const, 7, ""},
+ {"R_390_TLS_GOTIE32", Const, 7, ""},
+ {"R_390_TLS_GOTIE64", Const, 7, ""},
+ {"R_390_TLS_IE32", Const, 7, ""},
+ {"R_390_TLS_IE64", Const, 7, ""},
+ {"R_390_TLS_IEENT", Const, 7, ""},
+ {"R_390_TLS_LDCALL", Const, 7, ""},
+ {"R_390_TLS_LDM32", Const, 7, ""},
+ {"R_390_TLS_LDM64", Const, 7, ""},
+ {"R_390_TLS_LDO32", Const, 7, ""},
+ {"R_390_TLS_LDO64", Const, 7, ""},
+ {"R_390_TLS_LE32", Const, 7, ""},
+ {"R_390_TLS_LE64", Const, 7, ""},
+ {"R_390_TLS_LOAD", Const, 7, ""},
+ {"R_390_TLS_TPOFF", Const, 7, ""},
+ {"R_AARCH64", Type, 4, ""},
+ {"R_AARCH64_ABS16", Const, 4, ""},
+ {"R_AARCH64_ABS32", Const, 4, ""},
+ {"R_AARCH64_ABS64", Const, 4, ""},
+ {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_ADR_GOT_PAGE", Const, 4, ""},
+ {"R_AARCH64_ADR_PREL_LO21", Const, 4, ""},
+ {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4, ""},
+ {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4, ""},
+ {"R_AARCH64_CALL26", Const, 4, ""},
+ {"R_AARCH64_CONDBR19", Const, 4, ""},
+ {"R_AARCH64_COPY", Const, 4, ""},
+ {"R_AARCH64_GLOB_DAT", Const, 4, ""},
+ {"R_AARCH64_GOT_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_IRELATIVE", Const, 4, ""},
+ {"R_AARCH64_JUMP26", Const, 4, ""},
+ {"R_AARCH64_JUMP_SLOT", Const, 4, ""},
+ {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10, ""},
+ {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10, ""},
+ {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_LD_PREL_LO19", Const, 4, ""},
+ {"R_AARCH64_MOVW_SABS_G0", Const, 4, ""},
+ {"R_AARCH64_MOVW_SABS_G1", Const, 4, ""},
+ {"R_AARCH64_MOVW_SABS_G2", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G0", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G1", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G2", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4, ""},
+ {"R_AARCH64_MOVW_UABS_G3", Const, 4, ""},
+ {"R_AARCH64_NONE", Const, 4, ""},
+ {"R_AARCH64_NULL", Const, 4, ""},
+ {"R_AARCH64_P32_ABS16", Const, 4, ""},
+ {"R_AARCH64_P32_ABS32", Const, 4, ""},
+ {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4, ""},
+ {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4, ""},
+ {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4, ""},
+ {"R_AARCH64_P32_CALL26", Const, 4, ""},
+ {"R_AARCH64_P32_CONDBR19", Const, 4, ""},
+ {"R_AARCH64_P32_COPY", Const, 4, ""},
+ {"R_AARCH64_P32_GLOB_DAT", Const, 4, ""},
+ {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_P32_IRELATIVE", Const, 4, ""},
+ {"R_AARCH64_P32_JUMP26", Const, 4, ""},
+ {"R_AARCH64_P32_JUMP_SLOT", Const, 4, ""},
+ {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_LD_PREL_LO19", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4, ""},
+ {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4, ""},
+ {"R_AARCH64_P32_PREL16", Const, 4, ""},
+ {"R_AARCH64_P32_PREL32", Const, 4, ""},
+ {"R_AARCH64_P32_RELATIVE", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_CALL", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
+ {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
+ {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
+ {"R_AARCH64_P32_TLS_DTPMOD", Const, 4, ""},
+ {"R_AARCH64_P32_TLS_DTPREL", Const, 4, ""},
+ {"R_AARCH64_P32_TLS_TPREL", Const, 4, ""},
+ {"R_AARCH64_P32_TSTBR14", Const, 4, ""},
+ {"R_AARCH64_PREL16", Const, 4, ""},
+ {"R_AARCH64_PREL32", Const, 4, ""},
+ {"R_AARCH64_PREL64", Const, 4, ""},
+ {"R_AARCH64_RELATIVE", Const, 4, ""},
+ {"R_AARCH64_TLSDESC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADD", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_CALL", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_LDR", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4, ""},
+ {"R_AARCH64_TLSDESC_OFF_G1", Const, 4, ""},
+ {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4, ""},
+ {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10, ""},
+ {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10, ""},
+ {"R_AARCH64_TLSGD_MOVW_G1", Const, 10, ""},
+ {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4, ""},
+ {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4, ""},
+ {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4, ""},
+ {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4, ""},
+ {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10, ""},
+ {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10, ""},
+ {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10, ""},
+ {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10, ""},
+ {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4, ""},
+ {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4, ""},
+ {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4, ""},
+ {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10, ""},
+ {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4, ""},
+ {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4, ""},
+ {"R_AARCH64_TLS_DTPMOD64", Const, 4, ""},
+ {"R_AARCH64_TLS_DTPREL64", Const, 4, ""},
+ {"R_AARCH64_TLS_TPREL64", Const, 4, ""},
+ {"R_AARCH64_TSTBR14", Const, 4, ""},
+ {"R_ALPHA", Type, 0, ""},
+ {"R_ALPHA_BRADDR", Const, 0, ""},
+ {"R_ALPHA_COPY", Const, 0, ""},
+ {"R_ALPHA_GLOB_DAT", Const, 0, ""},
+ {"R_ALPHA_GPDISP", Const, 0, ""},
+ {"R_ALPHA_GPREL32", Const, 0, ""},
+ {"R_ALPHA_GPRELHIGH", Const, 0, ""},
+ {"R_ALPHA_GPRELLOW", Const, 0, ""},
+ {"R_ALPHA_GPVALUE", Const, 0, ""},
+ {"R_ALPHA_HINT", Const, 0, ""},
+ {"R_ALPHA_IMMED_BR_HI32", Const, 0, ""},
+ {"R_ALPHA_IMMED_GP_16", Const, 0, ""},
+ {"R_ALPHA_IMMED_GP_HI32", Const, 0, ""},
+ {"R_ALPHA_IMMED_LO32", Const, 0, ""},
+ {"R_ALPHA_IMMED_SCN_HI32", Const, 0, ""},
+ {"R_ALPHA_JMP_SLOT", Const, 0, ""},
+ {"R_ALPHA_LITERAL", Const, 0, ""},
+ {"R_ALPHA_LITUSE", Const, 0, ""},
+ {"R_ALPHA_NONE", Const, 0, ""},
+ {"R_ALPHA_OP_PRSHIFT", Const, 0, ""},
+ {"R_ALPHA_OP_PSUB", Const, 0, ""},
+ {"R_ALPHA_OP_PUSH", Const, 0, ""},
+ {"R_ALPHA_OP_STORE", Const, 0, ""},
+ {"R_ALPHA_REFLONG", Const, 0, ""},
+ {"R_ALPHA_REFQUAD", Const, 0, ""},
+ {"R_ALPHA_RELATIVE", Const, 0, ""},
+ {"R_ALPHA_SREL16", Const, 0, ""},
+ {"R_ALPHA_SREL32", Const, 0, ""},
+ {"R_ALPHA_SREL64", Const, 0, ""},
+ {"R_ARM", Type, 0, ""},
+ {"R_ARM_ABS12", Const, 0, ""},
+ {"R_ARM_ABS16", Const, 0, ""},
+ {"R_ARM_ABS32", Const, 0, ""},
+ {"R_ARM_ABS32_NOI", Const, 10, ""},
+ {"R_ARM_ABS8", Const, 0, ""},
+ {"R_ARM_ALU_PCREL_15_8", Const, 10, ""},
+ {"R_ARM_ALU_PCREL_23_15", Const, 10, ""},
+ {"R_ARM_ALU_PCREL_7_0", Const, 10, ""},
+ {"R_ARM_ALU_PC_G0", Const, 10, ""},
+ {"R_ARM_ALU_PC_G0_NC", Const, 10, ""},
+ {"R_ARM_ALU_PC_G1", Const, 10, ""},
+ {"R_ARM_ALU_PC_G1_NC", Const, 10, ""},
+ {"R_ARM_ALU_PC_G2", Const, 10, ""},
+ {"R_ARM_ALU_SBREL_19_12_NC", Const, 10, ""},
+ {"R_ARM_ALU_SBREL_27_20_CK", Const, 10, ""},
+ {"R_ARM_ALU_SB_G0", Const, 10, ""},
+ {"R_ARM_ALU_SB_G0_NC", Const, 10, ""},
+ {"R_ARM_ALU_SB_G1", Const, 10, ""},
+ {"R_ARM_ALU_SB_G1_NC", Const, 10, ""},
+ {"R_ARM_ALU_SB_G2", Const, 10, ""},
+ {"R_ARM_AMP_VCALL9", Const, 0, ""},
+ {"R_ARM_BASE_ABS", Const, 10, ""},
+ {"R_ARM_CALL", Const, 10, ""},
+ {"R_ARM_COPY", Const, 0, ""},
+ {"R_ARM_GLOB_DAT", Const, 0, ""},
+ {"R_ARM_GNU_VTENTRY", Const, 0, ""},
+ {"R_ARM_GNU_VTINHERIT", Const, 0, ""},
+ {"R_ARM_GOT32", Const, 0, ""},
+ {"R_ARM_GOTOFF", Const, 0, ""},
+ {"R_ARM_GOTOFF12", Const, 10, ""},
+ {"R_ARM_GOTPC", Const, 0, ""},
+ {"R_ARM_GOTRELAX", Const, 10, ""},
+ {"R_ARM_GOT_ABS", Const, 10, ""},
+ {"R_ARM_GOT_BREL12", Const, 10, ""},
+ {"R_ARM_GOT_PREL", Const, 10, ""},
+ {"R_ARM_IRELATIVE", Const, 10, ""},
+ {"R_ARM_JUMP24", Const, 10, ""},
+ {"R_ARM_JUMP_SLOT", Const, 0, ""},
+ {"R_ARM_LDC_PC_G0", Const, 10, ""},
+ {"R_ARM_LDC_PC_G1", Const, 10, ""},
+ {"R_ARM_LDC_PC_G2", Const, 10, ""},
+ {"R_ARM_LDC_SB_G0", Const, 10, ""},
+ {"R_ARM_LDC_SB_G1", Const, 10, ""},
+ {"R_ARM_LDC_SB_G2", Const, 10, ""},
+ {"R_ARM_LDRS_PC_G0", Const, 10, ""},
+ {"R_ARM_LDRS_PC_G1", Const, 10, ""},
+ {"R_ARM_LDRS_PC_G2", Const, 10, ""},
+ {"R_ARM_LDRS_SB_G0", Const, 10, ""},
+ {"R_ARM_LDRS_SB_G1", Const, 10, ""},
+ {"R_ARM_LDRS_SB_G2", Const, 10, ""},
+ {"R_ARM_LDR_PC_G1", Const, 10, ""},
+ {"R_ARM_LDR_PC_G2", Const, 10, ""},
+ {"R_ARM_LDR_SBREL_11_10_NC", Const, 10, ""},
+ {"R_ARM_LDR_SB_G0", Const, 10, ""},
+ {"R_ARM_LDR_SB_G1", Const, 10, ""},
+ {"R_ARM_LDR_SB_G2", Const, 10, ""},
+ {"R_ARM_ME_TOO", Const, 10, ""},
+ {"R_ARM_MOVT_ABS", Const, 10, ""},
+ {"R_ARM_MOVT_BREL", Const, 10, ""},
+ {"R_ARM_MOVT_PREL", Const, 10, ""},
+ {"R_ARM_MOVW_ABS_NC", Const, 10, ""},
+ {"R_ARM_MOVW_BREL", Const, 10, ""},
+ {"R_ARM_MOVW_BREL_NC", Const, 10, ""},
+ {"R_ARM_MOVW_PREL_NC", Const, 10, ""},
+ {"R_ARM_NONE", Const, 0, ""},
+ {"R_ARM_PC13", Const, 0, ""},
+ {"R_ARM_PC24", Const, 0, ""},
+ {"R_ARM_PLT32", Const, 0, ""},
+ {"R_ARM_PLT32_ABS", Const, 10, ""},
+ {"R_ARM_PREL31", Const, 10, ""},
+ {"R_ARM_PRIVATE_0", Const, 10, ""},
+ {"R_ARM_PRIVATE_1", Const, 10, ""},
+ {"R_ARM_PRIVATE_10", Const, 10, ""},
+ {"R_ARM_PRIVATE_11", Const, 10, ""},
+ {"R_ARM_PRIVATE_12", Const, 10, ""},
+ {"R_ARM_PRIVATE_13", Const, 10, ""},
+ {"R_ARM_PRIVATE_14", Const, 10, ""},
+ {"R_ARM_PRIVATE_15", Const, 10, ""},
+ {"R_ARM_PRIVATE_2", Const, 10, ""},
+ {"R_ARM_PRIVATE_3", Const, 10, ""},
+ {"R_ARM_PRIVATE_4", Const, 10, ""},
+ {"R_ARM_PRIVATE_5", Const, 10, ""},
+ {"R_ARM_PRIVATE_6", Const, 10, ""},
+ {"R_ARM_PRIVATE_7", Const, 10, ""},
+ {"R_ARM_PRIVATE_8", Const, 10, ""},
+ {"R_ARM_PRIVATE_9", Const, 10, ""},
+ {"R_ARM_RABS32", Const, 0, ""},
+ {"R_ARM_RBASE", Const, 0, ""},
+ {"R_ARM_REL32", Const, 0, ""},
+ {"R_ARM_REL32_NOI", Const, 10, ""},
+ {"R_ARM_RELATIVE", Const, 0, ""},
+ {"R_ARM_RPC24", Const, 0, ""},
+ {"R_ARM_RREL32", Const, 0, ""},
+ {"R_ARM_RSBREL32", Const, 0, ""},
+ {"R_ARM_RXPC25", Const, 10, ""},
+ {"R_ARM_SBREL31", Const, 10, ""},
+ {"R_ARM_SBREL32", Const, 0, ""},
+ {"R_ARM_SWI24", Const, 0, ""},
+ {"R_ARM_TARGET1", Const, 10, ""},
+ {"R_ARM_TARGET2", Const, 10, ""},
+ {"R_ARM_THM_ABS5", Const, 0, ""},
+ {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10, ""},
+ {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10, ""},
+ {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10, ""},
+ {"R_ARM_THM_ALU_ABS_G3", Const, 10, ""},
+ {"R_ARM_THM_ALU_PREL_11_0", Const, 10, ""},
+ {"R_ARM_THM_GOT_BREL12", Const, 10, ""},
+ {"R_ARM_THM_JUMP11", Const, 10, ""},
+ {"R_ARM_THM_JUMP19", Const, 10, ""},
+ {"R_ARM_THM_JUMP24", Const, 10, ""},
+ {"R_ARM_THM_JUMP6", Const, 10, ""},
+ {"R_ARM_THM_JUMP8", Const, 10, ""},
+ {"R_ARM_THM_MOVT_ABS", Const, 10, ""},
+ {"R_ARM_THM_MOVT_BREL", Const, 10, ""},
+ {"R_ARM_THM_MOVT_PREL", Const, 10, ""},
+ {"R_ARM_THM_MOVW_ABS_NC", Const, 10, ""},
+ {"R_ARM_THM_MOVW_BREL", Const, 10, ""},
+ {"R_ARM_THM_MOVW_BREL_NC", Const, 10, ""},
+ {"R_ARM_THM_MOVW_PREL_NC", Const, 10, ""},
+ {"R_ARM_THM_PC12", Const, 10, ""},
+ {"R_ARM_THM_PC22", Const, 0, ""},
+ {"R_ARM_THM_PC8", Const, 0, ""},
+ {"R_ARM_THM_RPC22", Const, 0, ""},
+ {"R_ARM_THM_SWI8", Const, 0, ""},
+ {"R_ARM_THM_TLS_CALL", Const, 10, ""},
+ {"R_ARM_THM_TLS_DESCSEQ16", Const, 10, ""},
+ {"R_ARM_THM_TLS_DESCSEQ32", Const, 10, ""},
+ {"R_ARM_THM_XPC22", Const, 0, ""},
+ {"R_ARM_TLS_CALL", Const, 10, ""},
+ {"R_ARM_TLS_DESCSEQ", Const, 10, ""},
+ {"R_ARM_TLS_DTPMOD32", Const, 10, ""},
+ {"R_ARM_TLS_DTPOFF32", Const, 10, ""},
+ {"R_ARM_TLS_GD32", Const, 10, ""},
+ {"R_ARM_TLS_GOTDESC", Const, 10, ""},
+ {"R_ARM_TLS_IE12GP", Const, 10, ""},
+ {"R_ARM_TLS_IE32", Const, 10, ""},
+ {"R_ARM_TLS_LDM32", Const, 10, ""},
+ {"R_ARM_TLS_LDO12", Const, 10, ""},
+ {"R_ARM_TLS_LDO32", Const, 10, ""},
+ {"R_ARM_TLS_LE12", Const, 10, ""},
+ {"R_ARM_TLS_LE32", Const, 10, ""},
+ {"R_ARM_TLS_TPOFF32", Const, 10, ""},
+ {"R_ARM_V4BX", Const, 10, ""},
+ {"R_ARM_XPC25", Const, 0, ""},
+ {"R_INFO", Func, 0, "func(sym uint32, typ uint32) uint64"},
+ {"R_INFO32", Func, 0, "func(sym uint32, typ uint32) uint32"},
+ {"R_LARCH", Type, 19, ""},
+ {"R_LARCH_32", Const, 19, ""},
+ {"R_LARCH_32_PCREL", Const, 20, ""},
+ {"R_LARCH_64", Const, 19, ""},
+ {"R_LARCH_64_PCREL", Const, 22, ""},
+ {"R_LARCH_ABS64_HI12", Const, 20, ""},
+ {"R_LARCH_ABS64_LO20", Const, 20, ""},
+ {"R_LARCH_ABS_HI20", Const, 20, ""},
+ {"R_LARCH_ABS_LO12", Const, 20, ""},
+ {"R_LARCH_ADD16", Const, 19, ""},
+ {"R_LARCH_ADD24", Const, 19, ""},
+ {"R_LARCH_ADD32", Const, 19, ""},
+ {"R_LARCH_ADD6", Const, 22, ""},
+ {"R_LARCH_ADD64", Const, 19, ""},
+ {"R_LARCH_ADD8", Const, 19, ""},
+ {"R_LARCH_ADD_ULEB128", Const, 22, ""},
+ {"R_LARCH_ALIGN", Const, 22, ""},
+ {"R_LARCH_B16", Const, 20, ""},
+ {"R_LARCH_B21", Const, 20, ""},
+ {"R_LARCH_B26", Const, 20, ""},
+ {"R_LARCH_CFA", Const, 22, ""},
+ {"R_LARCH_COPY", Const, 19, ""},
+ {"R_LARCH_DELETE", Const, 22, ""},
+ {"R_LARCH_GNU_VTENTRY", Const, 20, ""},
+ {"R_LARCH_GNU_VTINHERIT", Const, 20, ""},
+ {"R_LARCH_GOT64_HI12", Const, 20, ""},
+ {"R_LARCH_GOT64_LO20", Const, 20, ""},
+ {"R_LARCH_GOT64_PC_HI12", Const, 20, ""},
+ {"R_LARCH_GOT64_PC_LO20", Const, 20, ""},
+ {"R_LARCH_GOT_HI20", Const, 20, ""},
+ {"R_LARCH_GOT_LO12", Const, 20, ""},
+ {"R_LARCH_GOT_PC_HI20", Const, 20, ""},
+ {"R_LARCH_GOT_PC_LO12", Const, 20, ""},
+ {"R_LARCH_IRELATIVE", Const, 19, ""},
+ {"R_LARCH_JUMP_SLOT", Const, 19, ""},
+ {"R_LARCH_MARK_LA", Const, 19, ""},
+ {"R_LARCH_MARK_PCREL", Const, 19, ""},
+ {"R_LARCH_NONE", Const, 19, ""},
+ {"R_LARCH_PCALA64_HI12", Const, 20, ""},
+ {"R_LARCH_PCALA64_LO20", Const, 20, ""},
+ {"R_LARCH_PCALA_HI20", Const, 20, ""},
+ {"R_LARCH_PCALA_LO12", Const, 20, ""},
+ {"R_LARCH_PCREL20_S2", Const, 22, ""},
+ {"R_LARCH_RELATIVE", Const, 19, ""},
+ {"R_LARCH_RELAX", Const, 20, ""},
+ {"R_LARCH_SOP_ADD", Const, 19, ""},
+ {"R_LARCH_SOP_AND", Const, 19, ""},
+ {"R_LARCH_SOP_ASSERT", Const, 19, ""},
+ {"R_LARCH_SOP_IF_ELSE", Const, 19, ""},
+ {"R_LARCH_SOP_NOT", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_12", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_16", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_10_5", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_S_5_20", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_U", Const, 19, ""},
+ {"R_LARCH_SOP_POP_32_U_10_12", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_DUP", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_GPREL", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_PCREL", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19, ""},
+ {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19, ""},
+ {"R_LARCH_SOP_SL", Const, 19, ""},
+ {"R_LARCH_SOP_SR", Const, 19, ""},
+ {"R_LARCH_SOP_SUB", Const, 19, ""},
+ {"R_LARCH_SUB16", Const, 19, ""},
+ {"R_LARCH_SUB24", Const, 19, ""},
+ {"R_LARCH_SUB32", Const, 19, ""},
+ {"R_LARCH_SUB6", Const, 22, ""},
+ {"R_LARCH_SUB64", Const, 19, ""},
+ {"R_LARCH_SUB8", Const, 19, ""},
+ {"R_LARCH_SUB_ULEB128", Const, 22, ""},
+ {"R_LARCH_TLS_DTPMOD32", Const, 19, ""},
+ {"R_LARCH_TLS_DTPMOD64", Const, 19, ""},
+ {"R_LARCH_TLS_DTPREL32", Const, 19, ""},
+ {"R_LARCH_TLS_DTPREL64", Const, 19, ""},
+ {"R_LARCH_TLS_GD_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_HI12", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_PC_HI12", Const, 20, ""},
+ {"R_LARCH_TLS_IE64_PC_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_IE_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_IE_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_LD_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LE64_HI12", Const, 20, ""},
+ {"R_LARCH_TLS_LE64_LO20", Const, 20, ""},
+ {"R_LARCH_TLS_LE_HI20", Const, 20, ""},
+ {"R_LARCH_TLS_LE_LO12", Const, 20, ""},
+ {"R_LARCH_TLS_TPREL32", Const, 19, ""},
+ {"R_LARCH_TLS_TPREL64", Const, 19, ""},
+ {"R_MIPS", Type, 6, ""},
+ {"R_MIPS_16", Const, 6, ""},
+ {"R_MIPS_26", Const, 6, ""},
+ {"R_MIPS_32", Const, 6, ""},
+ {"R_MIPS_64", Const, 6, ""},
+ {"R_MIPS_ADD_IMMEDIATE", Const, 6, ""},
+ {"R_MIPS_CALL16", Const, 6, ""},
+ {"R_MIPS_CALL_HI16", Const, 6, ""},
+ {"R_MIPS_CALL_LO16", Const, 6, ""},
+ {"R_MIPS_DELETE", Const, 6, ""},
+ {"R_MIPS_GOT16", Const, 6, ""},
+ {"R_MIPS_GOT_DISP", Const, 6, ""},
+ {"R_MIPS_GOT_HI16", Const, 6, ""},
+ {"R_MIPS_GOT_LO16", Const, 6, ""},
+ {"R_MIPS_GOT_OFST", Const, 6, ""},
+ {"R_MIPS_GOT_PAGE", Const, 6, ""},
+ {"R_MIPS_GPREL16", Const, 6, ""},
+ {"R_MIPS_GPREL32", Const, 6, ""},
+ {"R_MIPS_HI16", Const, 6, ""},
+ {"R_MIPS_HIGHER", Const, 6, ""},
+ {"R_MIPS_HIGHEST", Const, 6, ""},
+ {"R_MIPS_INSERT_A", Const, 6, ""},
+ {"R_MIPS_INSERT_B", Const, 6, ""},
+ {"R_MIPS_JALR", Const, 6, ""},
+ {"R_MIPS_LITERAL", Const, 6, ""},
+ {"R_MIPS_LO16", Const, 6, ""},
+ {"R_MIPS_NONE", Const, 6, ""},
+ {"R_MIPS_PC16", Const, 6, ""},
+ {"R_MIPS_PC32", Const, 22, ""},
+ {"R_MIPS_PJUMP", Const, 6, ""},
+ {"R_MIPS_REL16", Const, 6, ""},
+ {"R_MIPS_REL32", Const, 6, ""},
+ {"R_MIPS_RELGOT", Const, 6, ""},
+ {"R_MIPS_SCN_DISP", Const, 6, ""},
+ {"R_MIPS_SHIFT5", Const, 6, ""},
+ {"R_MIPS_SHIFT6", Const, 6, ""},
+ {"R_MIPS_SUB", Const, 6, ""},
+ {"R_MIPS_TLS_DTPMOD32", Const, 6, ""},
+ {"R_MIPS_TLS_DTPMOD64", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL32", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL64", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL_HI16", Const, 6, ""},
+ {"R_MIPS_TLS_DTPREL_LO16", Const, 6, ""},
+ {"R_MIPS_TLS_GD", Const, 6, ""},
+ {"R_MIPS_TLS_GOTTPREL", Const, 6, ""},
+ {"R_MIPS_TLS_LDM", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL32", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL64", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL_HI16", Const, 6, ""},
+ {"R_MIPS_TLS_TPREL_LO16", Const, 6, ""},
+ {"R_PPC", Type, 0, ""},
+ {"R_PPC64", Type, 5, ""},
+ {"R_PPC64_ADDR14", Const, 5, ""},
+ {"R_PPC64_ADDR14_BRNTAKEN", Const, 5, ""},
+ {"R_PPC64_ADDR14_BRTAKEN", Const, 5, ""},
+ {"R_PPC64_ADDR16", Const, 5, ""},
+ {"R_PPC64_ADDR16_DS", Const, 5, ""},
+ {"R_PPC64_ADDR16_HA", Const, 5, ""},
+ {"R_PPC64_ADDR16_HI", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGH", Const, 10, ""},
+ {"R_PPC64_ADDR16_HIGHA", Const, 10, ""},
+ {"R_PPC64_ADDR16_HIGHER", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHER34", Const, 20, ""},
+ {"R_PPC64_ADDR16_HIGHERA", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHERA34", Const, 20, ""},
+ {"R_PPC64_ADDR16_HIGHEST", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHEST34", Const, 20, ""},
+ {"R_PPC64_ADDR16_HIGHESTA", Const, 5, ""},
+ {"R_PPC64_ADDR16_HIGHESTA34", Const, 20, ""},
+ {"R_PPC64_ADDR16_LO", Const, 5, ""},
+ {"R_PPC64_ADDR16_LO_DS", Const, 5, ""},
+ {"R_PPC64_ADDR24", Const, 5, ""},
+ {"R_PPC64_ADDR32", Const, 5, ""},
+ {"R_PPC64_ADDR64", Const, 5, ""},
+ {"R_PPC64_ADDR64_LOCAL", Const, 10, ""},
+ {"R_PPC64_COPY", Const, 20, ""},
+ {"R_PPC64_D28", Const, 20, ""},
+ {"R_PPC64_D34", Const, 20, ""},
+ {"R_PPC64_D34_HA30", Const, 20, ""},
+ {"R_PPC64_D34_HI30", Const, 20, ""},
+ {"R_PPC64_D34_LO", Const, 20, ""},
+ {"R_PPC64_DTPMOD64", Const, 5, ""},
+ {"R_PPC64_DTPREL16", Const, 5, ""},
+ {"R_PPC64_DTPREL16_DS", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HA", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HI", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGH", Const, 10, ""},
+ {"R_PPC64_DTPREL16_HIGHA", Const, 10, ""},
+ {"R_PPC64_DTPREL16_HIGHER", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGHERA", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGHEST", Const, 5, ""},
+ {"R_PPC64_DTPREL16_HIGHESTA", Const, 5, ""},
+ {"R_PPC64_DTPREL16_LO", Const, 5, ""},
+ {"R_PPC64_DTPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_DTPREL34", Const, 20, ""},
+ {"R_PPC64_DTPREL64", Const, 5, ""},
+ {"R_PPC64_ENTRY", Const, 10, ""},
+ {"R_PPC64_GLOB_DAT", Const, 20, ""},
+ {"R_PPC64_GNU_VTENTRY", Const, 20, ""},
+ {"R_PPC64_GNU_VTINHERIT", Const, 20, ""},
+ {"R_PPC64_GOT16", Const, 5, ""},
+ {"R_PPC64_GOT16_DS", Const, 5, ""},
+ {"R_PPC64_GOT16_HA", Const, 5, ""},
+ {"R_PPC64_GOT16_HI", Const, 5, ""},
+ {"R_PPC64_GOT16_LO", Const, 5, ""},
+ {"R_PPC64_GOT16_LO_DS", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_DS", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_TLSGD16", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD16_LO", Const, 5, ""},
+ {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_TLSLD16", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD16_LO", Const, 5, ""},
+ {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20, ""},
+ {"R_PPC64_GOT_TPREL16_DS", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL16_HA", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL16_HI", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_GOT_TPREL_PCREL34", Const, 20, ""},
+ {"R_PPC64_IRELATIVE", Const, 10, ""},
+ {"R_PPC64_JMP_IREL", Const, 10, ""},
+ {"R_PPC64_JMP_SLOT", Const, 5, ""},
+ {"R_PPC64_NONE", Const, 5, ""},
+ {"R_PPC64_PCREL28", Const, 20, ""},
+ {"R_PPC64_PCREL34", Const, 20, ""},
+ {"R_PPC64_PCREL_OPT", Const, 20, ""},
+ {"R_PPC64_PLT16_HA", Const, 20, ""},
+ {"R_PPC64_PLT16_HI", Const, 20, ""},
+ {"R_PPC64_PLT16_LO", Const, 20, ""},
+ {"R_PPC64_PLT16_LO_DS", Const, 10, ""},
+ {"R_PPC64_PLT32", Const, 20, ""},
+ {"R_PPC64_PLT64", Const, 20, ""},
+ {"R_PPC64_PLTCALL", Const, 20, ""},
+ {"R_PPC64_PLTCALL_NOTOC", Const, 20, ""},
+ {"R_PPC64_PLTGOT16", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_DS", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_HA", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_HI", Const, 10, ""},
+ {"R_PPC64_PLTGOT16_LO", Const, 10, ""},
+ {"R_PPC64_PLTGOT_LO_DS", Const, 10, ""},
+ {"R_PPC64_PLTREL32", Const, 20, ""},
+ {"R_PPC64_PLTREL64", Const, 20, ""},
+ {"R_PPC64_PLTSEQ", Const, 20, ""},
+ {"R_PPC64_PLTSEQ_NOTOC", Const, 20, ""},
+ {"R_PPC64_PLT_PCREL34", Const, 20, ""},
+ {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20, ""},
+ {"R_PPC64_REL14", Const, 5, ""},
+ {"R_PPC64_REL14_BRNTAKEN", Const, 5, ""},
+ {"R_PPC64_REL14_BRTAKEN", Const, 5, ""},
+ {"R_PPC64_REL16", Const, 5, ""},
+ {"R_PPC64_REL16DX_HA", Const, 10, ""},
+ {"R_PPC64_REL16_HA", Const, 5, ""},
+ {"R_PPC64_REL16_HI", Const, 5, ""},
+ {"R_PPC64_REL16_HIGH", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHA", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHER", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHER34", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHERA", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHERA34", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHEST", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHEST34", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHESTA", Const, 20, ""},
+ {"R_PPC64_REL16_HIGHESTA34", Const, 20, ""},
+ {"R_PPC64_REL16_LO", Const, 5, ""},
+ {"R_PPC64_REL24", Const, 5, ""},
+ {"R_PPC64_REL24_NOTOC", Const, 10, ""},
+ {"R_PPC64_REL24_P9NOTOC", Const, 21, ""},
+ {"R_PPC64_REL30", Const, 20, ""},
+ {"R_PPC64_REL32", Const, 5, ""},
+ {"R_PPC64_REL64", Const, 5, ""},
+ {"R_PPC64_RELATIVE", Const, 18, ""},
+ {"R_PPC64_SECTOFF", Const, 20, ""},
+ {"R_PPC64_SECTOFF_DS", Const, 10, ""},
+ {"R_PPC64_SECTOFF_HA", Const, 20, ""},
+ {"R_PPC64_SECTOFF_HI", Const, 20, ""},
+ {"R_PPC64_SECTOFF_LO", Const, 20, ""},
+ {"R_PPC64_SECTOFF_LO_DS", Const, 10, ""},
+ {"R_PPC64_TLS", Const, 5, ""},
+ {"R_PPC64_TLSGD", Const, 5, ""},
+ {"R_PPC64_TLSLD", Const, 5, ""},
+ {"R_PPC64_TOC", Const, 5, ""},
+ {"R_PPC64_TOC16", Const, 5, ""},
+ {"R_PPC64_TOC16_DS", Const, 5, ""},
+ {"R_PPC64_TOC16_HA", Const, 5, ""},
+ {"R_PPC64_TOC16_HI", Const, 5, ""},
+ {"R_PPC64_TOC16_LO", Const, 5, ""},
+ {"R_PPC64_TOC16_LO_DS", Const, 5, ""},
+ {"R_PPC64_TOCSAVE", Const, 10, ""},
+ {"R_PPC64_TPREL16", Const, 5, ""},
+ {"R_PPC64_TPREL16_DS", Const, 5, ""},
+ {"R_PPC64_TPREL16_HA", Const, 5, ""},
+ {"R_PPC64_TPREL16_HI", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGH", Const, 10, ""},
+ {"R_PPC64_TPREL16_HIGHA", Const, 10, ""},
+ {"R_PPC64_TPREL16_HIGHER", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGHERA", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGHEST", Const, 5, ""},
+ {"R_PPC64_TPREL16_HIGHESTA", Const, 5, ""},
+ {"R_PPC64_TPREL16_LO", Const, 5, ""},
+ {"R_PPC64_TPREL16_LO_DS", Const, 5, ""},
+ {"R_PPC64_TPREL34", Const, 20, ""},
+ {"R_PPC64_TPREL64", Const, 5, ""},
+ {"R_PPC64_UADDR16", Const, 20, ""},
+ {"R_PPC64_UADDR32", Const, 20, ""},
+ {"R_PPC64_UADDR64", Const, 20, ""},
+ {"R_PPC_ADDR14", Const, 0, ""},
+ {"R_PPC_ADDR14_BRNTAKEN", Const, 0, ""},
+ {"R_PPC_ADDR14_BRTAKEN", Const, 0, ""},
+ {"R_PPC_ADDR16", Const, 0, ""},
+ {"R_PPC_ADDR16_HA", Const, 0, ""},
+ {"R_PPC_ADDR16_HI", Const, 0, ""},
+ {"R_PPC_ADDR16_LO", Const, 0, ""},
+ {"R_PPC_ADDR24", Const, 0, ""},
+ {"R_PPC_ADDR32", Const, 0, ""},
+ {"R_PPC_COPY", Const, 0, ""},
+ {"R_PPC_DTPMOD32", Const, 0, ""},
+ {"R_PPC_DTPREL16", Const, 0, ""},
+ {"R_PPC_DTPREL16_HA", Const, 0, ""},
+ {"R_PPC_DTPREL16_HI", Const, 0, ""},
+ {"R_PPC_DTPREL16_LO", Const, 0, ""},
+ {"R_PPC_DTPREL32", Const, 0, ""},
+ {"R_PPC_EMB_BIT_FLD", Const, 0, ""},
+ {"R_PPC_EMB_MRKREF", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16_HA", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16_HI", Const, 0, ""},
+ {"R_PPC_EMB_NADDR16_LO", Const, 0, ""},
+ {"R_PPC_EMB_NADDR32", Const, 0, ""},
+ {"R_PPC_EMB_RELSDA", Const, 0, ""},
+ {"R_PPC_EMB_RELSEC16", Const, 0, ""},
+ {"R_PPC_EMB_RELST_HA", Const, 0, ""},
+ {"R_PPC_EMB_RELST_HI", Const, 0, ""},
+ {"R_PPC_EMB_RELST_LO", Const, 0, ""},
+ {"R_PPC_EMB_SDA21", Const, 0, ""},
+ {"R_PPC_EMB_SDA2I16", Const, 0, ""},
+ {"R_PPC_EMB_SDA2REL", Const, 0, ""},
+ {"R_PPC_EMB_SDAI16", Const, 0, ""},
+ {"R_PPC_GLOB_DAT", Const, 0, ""},
+ {"R_PPC_GOT16", Const, 0, ""},
+ {"R_PPC_GOT16_HA", Const, 0, ""},
+ {"R_PPC_GOT16_HI", Const, 0, ""},
+ {"R_PPC_GOT16_LO", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16_HA", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16_HI", Const, 0, ""},
+ {"R_PPC_GOT_TLSGD16_LO", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16_HA", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16_HI", Const, 0, ""},
+ {"R_PPC_GOT_TLSLD16_LO", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16_HA", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16_HI", Const, 0, ""},
+ {"R_PPC_GOT_TPREL16_LO", Const, 0, ""},
+ {"R_PPC_JMP_SLOT", Const, 0, ""},
+ {"R_PPC_LOCAL24PC", Const, 0, ""},
+ {"R_PPC_NONE", Const, 0, ""},
+ {"R_PPC_PLT16_HA", Const, 0, ""},
+ {"R_PPC_PLT16_HI", Const, 0, ""},
+ {"R_PPC_PLT16_LO", Const, 0, ""},
+ {"R_PPC_PLT32", Const, 0, ""},
+ {"R_PPC_PLTREL24", Const, 0, ""},
+ {"R_PPC_PLTREL32", Const, 0, ""},
+ {"R_PPC_REL14", Const, 0, ""},
+ {"R_PPC_REL14_BRNTAKEN", Const, 0, ""},
+ {"R_PPC_REL14_BRTAKEN", Const, 0, ""},
+ {"R_PPC_REL24", Const, 0, ""},
+ {"R_PPC_REL32", Const, 0, ""},
+ {"R_PPC_RELATIVE", Const, 0, ""},
+ {"R_PPC_SDAREL16", Const, 0, ""},
+ {"R_PPC_SECTOFF", Const, 0, ""},
+ {"R_PPC_SECTOFF_HA", Const, 0, ""},
+ {"R_PPC_SECTOFF_HI", Const, 0, ""},
+ {"R_PPC_SECTOFF_LO", Const, 0, ""},
+ {"R_PPC_TLS", Const, 0, ""},
+ {"R_PPC_TPREL16", Const, 0, ""},
+ {"R_PPC_TPREL16_HA", Const, 0, ""},
+ {"R_PPC_TPREL16_HI", Const, 0, ""},
+ {"R_PPC_TPREL16_LO", Const, 0, ""},
+ {"R_PPC_TPREL32", Const, 0, ""},
+ {"R_PPC_UADDR16", Const, 0, ""},
+ {"R_PPC_UADDR32", Const, 0, ""},
+ {"R_RISCV", Type, 11, ""},
+ {"R_RISCV_32", Const, 11, ""},
+ {"R_RISCV_32_PCREL", Const, 12, ""},
+ {"R_RISCV_64", Const, 11, ""},
+ {"R_RISCV_ADD16", Const, 11, ""},
+ {"R_RISCV_ADD32", Const, 11, ""},
+ {"R_RISCV_ADD64", Const, 11, ""},
+ {"R_RISCV_ADD8", Const, 11, ""},
+ {"R_RISCV_ALIGN", Const, 11, ""},
+ {"R_RISCV_BRANCH", Const, 11, ""},
+ {"R_RISCV_CALL", Const, 11, ""},
+ {"R_RISCV_CALL_PLT", Const, 11, ""},
+ {"R_RISCV_COPY", Const, 11, ""},
+ {"R_RISCV_GNU_VTENTRY", Const, 11, ""},
+ {"R_RISCV_GNU_VTINHERIT", Const, 11, ""},
+ {"R_RISCV_GOT_HI20", Const, 11, ""},
+ {"R_RISCV_GPREL_I", Const, 11, ""},
+ {"R_RISCV_GPREL_S", Const, 11, ""},
+ {"R_RISCV_HI20", Const, 11, ""},
+ {"R_RISCV_JAL", Const, 11, ""},
+ {"R_RISCV_JUMP_SLOT", Const, 11, ""},
+ {"R_RISCV_LO12_I", Const, 11, ""},
+ {"R_RISCV_LO12_S", Const, 11, ""},
+ {"R_RISCV_NONE", Const, 11, ""},
+ {"R_RISCV_PCREL_HI20", Const, 11, ""},
+ {"R_RISCV_PCREL_LO12_I", Const, 11, ""},
+ {"R_RISCV_PCREL_LO12_S", Const, 11, ""},
+ {"R_RISCV_RELATIVE", Const, 11, ""},
+ {"R_RISCV_RELAX", Const, 11, ""},
+ {"R_RISCV_RVC_BRANCH", Const, 11, ""},
+ {"R_RISCV_RVC_JUMP", Const, 11, ""},
+ {"R_RISCV_RVC_LUI", Const, 11, ""},
+ {"R_RISCV_SET16", Const, 11, ""},
+ {"R_RISCV_SET32", Const, 11, ""},
+ {"R_RISCV_SET6", Const, 11, ""},
+ {"R_RISCV_SET8", Const, 11, ""},
+ {"R_RISCV_SUB16", Const, 11, ""},
+ {"R_RISCV_SUB32", Const, 11, ""},
+ {"R_RISCV_SUB6", Const, 11, ""},
+ {"R_RISCV_SUB64", Const, 11, ""},
+ {"R_RISCV_SUB8", Const, 11, ""},
+ {"R_RISCV_TLS_DTPMOD32", Const, 11, ""},
+ {"R_RISCV_TLS_DTPMOD64", Const, 11, ""},
+ {"R_RISCV_TLS_DTPREL32", Const, 11, ""},
+ {"R_RISCV_TLS_DTPREL64", Const, 11, ""},
+ {"R_RISCV_TLS_GD_HI20", Const, 11, ""},
+ {"R_RISCV_TLS_GOT_HI20", Const, 11, ""},
+ {"R_RISCV_TLS_TPREL32", Const, 11, ""},
+ {"R_RISCV_TLS_TPREL64", Const, 11, ""},
+ {"R_RISCV_TPREL_ADD", Const, 11, ""},
+ {"R_RISCV_TPREL_HI20", Const, 11, ""},
+ {"R_RISCV_TPREL_I", Const, 11, ""},
+ {"R_RISCV_TPREL_LO12_I", Const, 11, ""},
+ {"R_RISCV_TPREL_LO12_S", Const, 11, ""},
+ {"R_RISCV_TPREL_S", Const, 11, ""},
+ {"R_SPARC", Type, 0, ""},
+ {"R_SPARC_10", Const, 0, ""},
+ {"R_SPARC_11", Const, 0, ""},
+ {"R_SPARC_13", Const, 0, ""},
+ {"R_SPARC_16", Const, 0, ""},
+ {"R_SPARC_22", Const, 0, ""},
+ {"R_SPARC_32", Const, 0, ""},
+ {"R_SPARC_5", Const, 0, ""},
+ {"R_SPARC_6", Const, 0, ""},
+ {"R_SPARC_64", Const, 0, ""},
+ {"R_SPARC_7", Const, 0, ""},
+ {"R_SPARC_8", Const, 0, ""},
+ {"R_SPARC_COPY", Const, 0, ""},
+ {"R_SPARC_DISP16", Const, 0, ""},
+ {"R_SPARC_DISP32", Const, 0, ""},
+ {"R_SPARC_DISP64", Const, 0, ""},
+ {"R_SPARC_DISP8", Const, 0, ""},
+ {"R_SPARC_GLOB_DAT", Const, 0, ""},
+ {"R_SPARC_GLOB_JMP", Const, 0, ""},
+ {"R_SPARC_GOT10", Const, 0, ""},
+ {"R_SPARC_GOT13", Const, 0, ""},
+ {"R_SPARC_GOT22", Const, 0, ""},
+ {"R_SPARC_H44", Const, 0, ""},
+ {"R_SPARC_HH22", Const, 0, ""},
+ {"R_SPARC_HI22", Const, 0, ""},
+ {"R_SPARC_HIPLT22", Const, 0, ""},
+ {"R_SPARC_HIX22", Const, 0, ""},
+ {"R_SPARC_HM10", Const, 0, ""},
+ {"R_SPARC_JMP_SLOT", Const, 0, ""},
+ {"R_SPARC_L44", Const, 0, ""},
+ {"R_SPARC_LM22", Const, 0, ""},
+ {"R_SPARC_LO10", Const, 0, ""},
+ {"R_SPARC_LOPLT10", Const, 0, ""},
+ {"R_SPARC_LOX10", Const, 0, ""},
+ {"R_SPARC_M44", Const, 0, ""},
+ {"R_SPARC_NONE", Const, 0, ""},
+ {"R_SPARC_OLO10", Const, 0, ""},
+ {"R_SPARC_PC10", Const, 0, ""},
+ {"R_SPARC_PC22", Const, 0, ""},
+ {"R_SPARC_PCPLT10", Const, 0, ""},
+ {"R_SPARC_PCPLT22", Const, 0, ""},
+ {"R_SPARC_PCPLT32", Const, 0, ""},
+ {"R_SPARC_PC_HH22", Const, 0, ""},
+ {"R_SPARC_PC_HM10", Const, 0, ""},
+ {"R_SPARC_PC_LM22", Const, 0, ""},
+ {"R_SPARC_PLT32", Const, 0, ""},
+ {"R_SPARC_PLT64", Const, 0, ""},
+ {"R_SPARC_REGISTER", Const, 0, ""},
+ {"R_SPARC_RELATIVE", Const, 0, ""},
+ {"R_SPARC_UA16", Const, 0, ""},
+ {"R_SPARC_UA32", Const, 0, ""},
+ {"R_SPARC_UA64", Const, 0, ""},
+ {"R_SPARC_WDISP16", Const, 0, ""},
+ {"R_SPARC_WDISP19", Const, 0, ""},
+ {"R_SPARC_WDISP22", Const, 0, ""},
+ {"R_SPARC_WDISP30", Const, 0, ""},
+ {"R_SPARC_WPLT30", Const, 0, ""},
+ {"R_SYM32", Func, 0, "func(info uint32) uint32"},
+ {"R_SYM64", Func, 0, "func(info uint64) uint32"},
+ {"R_TYPE32", Func, 0, "func(info uint32) uint32"},
+ {"R_TYPE64", Func, 0, "func(info uint64) uint32"},
+ {"R_X86_64", Type, 0, ""},
+ {"R_X86_64_16", Const, 0, ""},
+ {"R_X86_64_32", Const, 0, ""},
+ {"R_X86_64_32S", Const, 0, ""},
+ {"R_X86_64_64", Const, 0, ""},
+ {"R_X86_64_8", Const, 0, ""},
+ {"R_X86_64_COPY", Const, 0, ""},
+ {"R_X86_64_DTPMOD64", Const, 0, ""},
+ {"R_X86_64_DTPOFF32", Const, 0, ""},
+ {"R_X86_64_DTPOFF64", Const, 0, ""},
+ {"R_X86_64_GLOB_DAT", Const, 0, ""},
+ {"R_X86_64_GOT32", Const, 0, ""},
+ {"R_X86_64_GOT64", Const, 10, ""},
+ {"R_X86_64_GOTOFF64", Const, 10, ""},
+ {"R_X86_64_GOTPC32", Const, 10, ""},
+ {"R_X86_64_GOTPC32_TLSDESC", Const, 10, ""},
+ {"R_X86_64_GOTPC64", Const, 10, ""},
+ {"R_X86_64_GOTPCREL", Const, 0, ""},
+ {"R_X86_64_GOTPCREL64", Const, 10, ""},
+ {"R_X86_64_GOTPCRELX", Const, 10, ""},
+ {"R_X86_64_GOTPLT64", Const, 10, ""},
+ {"R_X86_64_GOTTPOFF", Const, 0, ""},
+ {"R_X86_64_IRELATIVE", Const, 10, ""},
+ {"R_X86_64_JMP_SLOT", Const, 0, ""},
+ {"R_X86_64_NONE", Const, 0, ""},
+ {"R_X86_64_PC16", Const, 0, ""},
+ {"R_X86_64_PC32", Const, 0, ""},
+ {"R_X86_64_PC32_BND", Const, 10, ""},
+ {"R_X86_64_PC64", Const, 10, ""},
+ {"R_X86_64_PC8", Const, 0, ""},
+ {"R_X86_64_PLT32", Const, 0, ""},
+ {"R_X86_64_PLT32_BND", Const, 10, ""},
+ {"R_X86_64_PLTOFF64", Const, 10, ""},
+ {"R_X86_64_RELATIVE", Const, 0, ""},
+ {"R_X86_64_RELATIVE64", Const, 10, ""},
+ {"R_X86_64_REX_GOTPCRELX", Const, 10, ""},
+ {"R_X86_64_SIZE32", Const, 10, ""},
+ {"R_X86_64_SIZE64", Const, 10, ""},
+ {"R_X86_64_TLSDESC", Const, 10, ""},
+ {"R_X86_64_TLSDESC_CALL", Const, 10, ""},
+ {"R_X86_64_TLSGD", Const, 0, ""},
+ {"R_X86_64_TLSLD", Const, 0, ""},
+ {"R_X86_64_TPOFF32", Const, 0, ""},
+ {"R_X86_64_TPOFF64", Const, 0, ""},
+ {"Rel32", Type, 0, ""},
+ {"Rel32.Info", Field, 0, ""},
+ {"Rel32.Off", Field, 0, ""},
+ {"Rel64", Type, 0, ""},
+ {"Rel64.Info", Field, 0, ""},
+ {"Rel64.Off", Field, 0, ""},
+ {"Rela32", Type, 0, ""},
+ {"Rela32.Addend", Field, 0, ""},
+ {"Rela32.Info", Field, 0, ""},
+ {"Rela32.Off", Field, 0, ""},
+ {"Rela64", Type, 0, ""},
+ {"Rela64.Addend", Field, 0, ""},
+ {"Rela64.Info", Field, 0, ""},
+ {"Rela64.Off", Field, 0, ""},
+ {"SHF_ALLOC", Const, 0, ""},
+ {"SHF_COMPRESSED", Const, 6, ""},
+ {"SHF_EXECINSTR", Const, 0, ""},
+ {"SHF_GROUP", Const, 0, ""},
+ {"SHF_INFO_LINK", Const, 0, ""},
+ {"SHF_LINK_ORDER", Const, 0, ""},
+ {"SHF_MASKOS", Const, 0, ""},
+ {"SHF_MASKPROC", Const, 0, ""},
+ {"SHF_MERGE", Const, 0, ""},
+ {"SHF_OS_NONCONFORMING", Const, 0, ""},
+ {"SHF_STRINGS", Const, 0, ""},
+ {"SHF_TLS", Const, 0, ""},
+ {"SHF_WRITE", Const, 0, ""},
+ {"SHN_ABS", Const, 0, ""},
+ {"SHN_COMMON", Const, 0, ""},
+ {"SHN_HIOS", Const, 0, ""},
+ {"SHN_HIPROC", Const, 0, ""},
+ {"SHN_HIRESERVE", Const, 0, ""},
+ {"SHN_LOOS", Const, 0, ""},
+ {"SHN_LOPROC", Const, 0, ""},
+ {"SHN_LORESERVE", Const, 0, ""},
+ {"SHN_UNDEF", Const, 0, ""},
+ {"SHN_XINDEX", Const, 0, ""},
+ {"SHT_DYNAMIC", Const, 0, ""},
+ {"SHT_DYNSYM", Const, 0, ""},
+ {"SHT_FINI_ARRAY", Const, 0, ""},
+ {"SHT_GNU_ATTRIBUTES", Const, 0, ""},
+ {"SHT_GNU_HASH", Const, 0, ""},
+ {"SHT_GNU_LIBLIST", Const, 0, ""},
+ {"SHT_GNU_VERDEF", Const, 0, ""},
+ {"SHT_GNU_VERNEED", Const, 0, ""},
+ {"SHT_GNU_VERSYM", Const, 0, ""},
+ {"SHT_GROUP", Const, 0, ""},
+ {"SHT_HASH", Const, 0, ""},
+ {"SHT_HIOS", Const, 0, ""},
+ {"SHT_HIPROC", Const, 0, ""},
+ {"SHT_HIUSER", Const, 0, ""},
+ {"SHT_INIT_ARRAY", Const, 0, ""},
+ {"SHT_LOOS", Const, 0, ""},
+ {"SHT_LOPROC", Const, 0, ""},
+ {"SHT_LOUSER", Const, 0, ""},
+ {"SHT_MIPS_ABIFLAGS", Const, 17, ""},
+ {"SHT_NOBITS", Const, 0, ""},
+ {"SHT_NOTE", Const, 0, ""},
+ {"SHT_NULL", Const, 0, ""},
+ {"SHT_PREINIT_ARRAY", Const, 0, ""},
+ {"SHT_PROGBITS", Const, 0, ""},
+ {"SHT_REL", Const, 0, ""},
+ {"SHT_RELA", Const, 0, ""},
+ {"SHT_RISCV_ATTRIBUTES", Const, 25, ""},
+ {"SHT_SHLIB", Const, 0, ""},
+ {"SHT_STRTAB", Const, 0, ""},
+ {"SHT_SYMTAB", Const, 0, ""},
+ {"SHT_SYMTAB_SHNDX", Const, 0, ""},
+ {"STB_GLOBAL", Const, 0, ""},
+ {"STB_HIOS", Const, 0, ""},
+ {"STB_HIPROC", Const, 0, ""},
+ {"STB_LOCAL", Const, 0, ""},
+ {"STB_LOOS", Const, 0, ""},
+ {"STB_LOPROC", Const, 0, ""},
+ {"STB_WEAK", Const, 0, ""},
+ {"STT_COMMON", Const, 0, ""},
+ {"STT_FILE", Const, 0, ""},
+ {"STT_FUNC", Const, 0, ""},
+ {"STT_GNU_IFUNC", Const, 23, ""},
+ {"STT_HIOS", Const, 0, ""},
+ {"STT_HIPROC", Const, 0, ""},
+ {"STT_LOOS", Const, 0, ""},
+ {"STT_LOPROC", Const, 0, ""},
+ {"STT_NOTYPE", Const, 0, ""},
+ {"STT_OBJECT", Const, 0, ""},
+ {"STT_RELC", Const, 23, ""},
+ {"STT_SECTION", Const, 0, ""},
+ {"STT_SRELC", Const, 23, ""},
+ {"STT_TLS", Const, 0, ""},
+ {"STV_DEFAULT", Const, 0, ""},
+ {"STV_HIDDEN", Const, 0, ""},
+ {"STV_INTERNAL", Const, 0, ""},
+ {"STV_PROTECTED", Const, 0, ""},
+ {"ST_BIND", Func, 0, "func(info uint8) SymBind"},
+ {"ST_INFO", Func, 0, "func(bind SymBind, typ SymType) uint8"},
+ {"ST_TYPE", Func, 0, "func(info uint8) SymType"},
+ {"ST_VISIBILITY", Func, 0, "func(other uint8) SymVis"},
+ {"Section", Type, 0, ""},
+ {"Section.ReaderAt", Field, 0, ""},
+ {"Section.SectionHeader", Field, 0, ""},
+ {"Section32", Type, 0, ""},
+ {"Section32.Addr", Field, 0, ""},
+ {"Section32.Addralign", Field, 0, ""},
+ {"Section32.Entsize", Field, 0, ""},
+ {"Section32.Flags", Field, 0, ""},
+ {"Section32.Info", Field, 0, ""},
+ {"Section32.Link", Field, 0, ""},
+ {"Section32.Name", Field, 0, ""},
+ {"Section32.Off", Field, 0, ""},
+ {"Section32.Size", Field, 0, ""},
+ {"Section32.Type", Field, 0, ""},
+ {"Section64", Type, 0, ""},
+ {"Section64.Addr", Field, 0, ""},
+ {"Section64.Addralign", Field, 0, ""},
+ {"Section64.Entsize", Field, 0, ""},
+ {"Section64.Flags", Field, 0, ""},
+ {"Section64.Info", Field, 0, ""},
+ {"Section64.Link", Field, 0, ""},
+ {"Section64.Name", Field, 0, ""},
+ {"Section64.Off", Field, 0, ""},
+ {"Section64.Size", Field, 0, ""},
+ {"Section64.Type", Field, 0, ""},
+ {"SectionFlag", Type, 0, ""},
+ {"SectionHeader", Type, 0, ""},
+ {"SectionHeader.Addr", Field, 0, ""},
+ {"SectionHeader.Addralign", Field, 0, ""},
+ {"SectionHeader.Entsize", Field, 0, ""},
+ {"SectionHeader.FileSize", Field, 6, ""},
+ {"SectionHeader.Flags", Field, 0, ""},
+ {"SectionHeader.Info", Field, 0, ""},
+ {"SectionHeader.Link", Field, 0, ""},
+ {"SectionHeader.Name", Field, 0, ""},
+ {"SectionHeader.Offset", Field, 0, ""},
+ {"SectionHeader.Size", Field, 0, ""},
+ {"SectionHeader.Type", Field, 0, ""},
+ {"SectionIndex", Type, 0, ""},
+ {"SectionType", Type, 0, ""},
+ {"Sym32", Type, 0, ""},
+ {"Sym32.Info", Field, 0, ""},
+ {"Sym32.Name", Field, 0, ""},
+ {"Sym32.Other", Field, 0, ""},
+ {"Sym32.Shndx", Field, 0, ""},
+ {"Sym32.Size", Field, 0, ""},
+ {"Sym32.Value", Field, 0, ""},
+ {"Sym32Size", Const, 0, ""},
+ {"Sym64", Type, 0, ""},
+ {"Sym64.Info", Field, 0, ""},
+ {"Sym64.Name", Field, 0, ""},
+ {"Sym64.Other", Field, 0, ""},
+ {"Sym64.Shndx", Field, 0, ""},
+ {"Sym64.Size", Field, 0, ""},
+ {"Sym64.Value", Field, 0, ""},
+ {"Sym64Size", Const, 0, ""},
+ {"SymBind", Type, 0, ""},
+ {"SymType", Type, 0, ""},
+ {"SymVis", Type, 0, ""},
+ {"Symbol", Type, 0, ""},
+ {"Symbol.HasVersion", Field, 24, ""},
+ {"Symbol.Info", Field, 0, ""},
+ {"Symbol.Library", Field, 13, ""},
+ {"Symbol.Name", Field, 0, ""},
+ {"Symbol.Other", Field, 0, ""},
+ {"Symbol.Section", Field, 0, ""},
+ {"Symbol.Size", Field, 0, ""},
+ {"Symbol.Value", Field, 0, ""},
+ {"Symbol.Version", Field, 13, ""},
+ {"Symbol.VersionIndex", Field, 24, ""},
+ {"Type", Type, 0, ""},
+ {"VER_FLG_BASE", Const, 24, ""},
+ {"VER_FLG_INFO", Const, 24, ""},
+ {"VER_FLG_WEAK", Const, 24, ""},
+ {"Version", Type, 0, ""},
+ {"VersionIndex", Type, 24, ""},
+ },
+ "debug/gosym": {
+ {"(*DecodingError).Error", Method, 0, ""},
+ {"(*LineTable).LineToPC", Method, 0, ""},
+ {"(*LineTable).PCToLine", Method, 0, ""},
+ {"(*Sym).BaseName", Method, 0, ""},
+ {"(*Sym).PackageName", Method, 0, ""},
+ {"(*Sym).ReceiverName", Method, 0, ""},
+ {"(*Sym).Static", Method, 0, ""},
+ {"(*Table).LineToPC", Method, 0, ""},
+ {"(*Table).LookupFunc", Method, 0, ""},
+ {"(*Table).LookupSym", Method, 0, ""},
+ {"(*Table).PCToFunc", Method, 0, ""},
+ {"(*Table).PCToLine", Method, 0, ""},
+ {"(*Table).SymByAddr", Method, 0, ""},
+ {"(*UnknownLineError).Error", Method, 0, ""},
+ {"(Func).BaseName", Method, 0, ""},
+ {"(Func).PackageName", Method, 0, ""},
+ {"(Func).ReceiverName", Method, 0, ""},
+ {"(Func).Static", Method, 0, ""},
+ {"(UnknownFileError).Error", Method, 0, ""},
+ {"DecodingError", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"Func.End", Field, 0, ""},
+ {"Func.Entry", Field, 0, ""},
+ {"Func.FrameSize", Field, 0, ""},
+ {"Func.LineTable", Field, 0, ""},
+ {"Func.Locals", Field, 0, ""},
+ {"Func.Obj", Field, 0, ""},
+ {"Func.Params", Field, 0, ""},
+ {"Func.Sym", Field, 0, ""},
+ {"LineTable", Type, 0, ""},
+ {"LineTable.Data", Field, 0, ""},
+ {"LineTable.Line", Field, 0, ""},
+ {"LineTable.PC", Field, 0, ""},
+ {"NewLineTable", Func, 0, "func(data []byte, text uint64) *LineTable"},
+ {"NewTable", Func, 0, "func(symtab []byte, pcln *LineTable) (*Table, error)"},
+ {"Obj", Type, 0, ""},
+ {"Obj.Funcs", Field, 0, ""},
+ {"Obj.Paths", Field, 0, ""},
+ {"Sym", Type, 0, ""},
+ {"Sym.Func", Field, 0, ""},
+ {"Sym.GoType", Field, 0, ""},
+ {"Sym.Name", Field, 0, ""},
+ {"Sym.Type", Field, 0, ""},
+ {"Sym.Value", Field, 0, ""},
+ {"Table", Type, 0, ""},
+ {"Table.Files", Field, 0, ""},
+ {"Table.Funcs", Field, 0, ""},
+ {"Table.Objs", Field, 0, ""},
+ {"Table.Syms", Field, 0, ""},
+ {"UnknownFileError", Type, 0, ""},
+ {"UnknownLineError", Type, 0, ""},
+ {"UnknownLineError.File", Field, 0, ""},
+ {"UnknownLineError.Line", Field, 0, ""},
+ },
+ "debug/macho": {
+ {"(*FatFile).Close", Method, 3, ""},
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).DWARF", Method, 0, ""},
+ {"(*File).ImportedLibraries", Method, 0, ""},
+ {"(*File).ImportedSymbols", Method, 0, ""},
+ {"(*File).Section", Method, 0, ""},
+ {"(*File).Segment", Method, 0, ""},
+ {"(*FormatError).Error", Method, 0, ""},
+ {"(*Section).Data", Method, 0, ""},
+ {"(*Section).Open", Method, 0, ""},
+ {"(*Segment).Data", Method, 0, ""},
+ {"(*Segment).Open", Method, 0, ""},
+ {"(Cpu).GoString", Method, 0, ""},
+ {"(Cpu).String", Method, 0, ""},
+ {"(Dylib).Raw", Method, 0, ""},
+ {"(Dysymtab).Raw", Method, 0, ""},
+ {"(FatArch).Close", Method, 3, ""},
+ {"(FatArch).DWARF", Method, 3, ""},
+ {"(FatArch).ImportedLibraries", Method, 3, ""},
+ {"(FatArch).ImportedSymbols", Method, 3, ""},
+ {"(FatArch).Section", Method, 3, ""},
+ {"(FatArch).Segment", Method, 3, ""},
+ {"(LoadBytes).Raw", Method, 0, ""},
+ {"(LoadCmd).GoString", Method, 0, ""},
+ {"(LoadCmd).String", Method, 0, ""},
+ {"(RelocTypeARM).GoString", Method, 10, ""},
+ {"(RelocTypeARM).String", Method, 10, ""},
+ {"(RelocTypeARM64).GoString", Method, 10, ""},
+ {"(RelocTypeARM64).String", Method, 10, ""},
+ {"(RelocTypeGeneric).GoString", Method, 10, ""},
+ {"(RelocTypeGeneric).String", Method, 10, ""},
+ {"(RelocTypeX86_64).GoString", Method, 10, ""},
+ {"(RelocTypeX86_64).String", Method, 10, ""},
+ {"(Rpath).Raw", Method, 10, ""},
+ {"(Section).ReadAt", Method, 0, ""},
+ {"(Segment).Raw", Method, 0, ""},
+ {"(Segment).ReadAt", Method, 0, ""},
+ {"(Symtab).Raw", Method, 0, ""},
+ {"(Type).GoString", Method, 10, ""},
+ {"(Type).String", Method, 10, ""},
+ {"ARM64_RELOC_ADDEND", Const, 10, ""},
+ {"ARM64_RELOC_BRANCH26", Const, 10, ""},
+ {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10, ""},
+ {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10, ""},
+ {"ARM64_RELOC_PAGE21", Const, 10, ""},
+ {"ARM64_RELOC_PAGEOFF12", Const, 10, ""},
+ {"ARM64_RELOC_POINTER_TO_GOT", Const, 10, ""},
+ {"ARM64_RELOC_SUBTRACTOR", Const, 10, ""},
+ {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10, ""},
+ {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10, ""},
+ {"ARM64_RELOC_UNSIGNED", Const, 10, ""},
+ {"ARM_RELOC_BR24", Const, 10, ""},
+ {"ARM_RELOC_HALF", Const, 10, ""},
+ {"ARM_RELOC_HALF_SECTDIFF", Const, 10, ""},
+ {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
+ {"ARM_RELOC_PAIR", Const, 10, ""},
+ {"ARM_RELOC_PB_LA_PTR", Const, 10, ""},
+ {"ARM_RELOC_SECTDIFF", Const, 10, ""},
+ {"ARM_RELOC_VANILLA", Const, 10, ""},
+ {"ARM_THUMB_32BIT_BRANCH", Const, 10, ""},
+ {"ARM_THUMB_RELOC_BR22", Const, 10, ""},
+ {"Cpu", Type, 0, ""},
+ {"Cpu386", Const, 0, ""},
+ {"CpuAmd64", Const, 0, ""},
+ {"CpuArm", Const, 3, ""},
+ {"CpuArm64", Const, 11, ""},
+ {"CpuPpc", Const, 3, ""},
+ {"CpuPpc64", Const, 3, ""},
+ {"Dylib", Type, 0, ""},
+ {"Dylib.CompatVersion", Field, 0, ""},
+ {"Dylib.CurrentVersion", Field, 0, ""},
+ {"Dylib.LoadBytes", Field, 0, ""},
+ {"Dylib.Name", Field, 0, ""},
+ {"Dylib.Time", Field, 0, ""},
+ {"DylibCmd", Type, 0, ""},
+ {"DylibCmd.Cmd", Field, 0, ""},
+ {"DylibCmd.CompatVersion", Field, 0, ""},
+ {"DylibCmd.CurrentVersion", Field, 0, ""},
+ {"DylibCmd.Len", Field, 0, ""},
+ {"DylibCmd.Name", Field, 0, ""},
+ {"DylibCmd.Time", Field, 0, ""},
+ {"Dysymtab", Type, 0, ""},
+ {"Dysymtab.DysymtabCmd", Field, 0, ""},
+ {"Dysymtab.IndirectSyms", Field, 0, ""},
+ {"Dysymtab.LoadBytes", Field, 0, ""},
+ {"DysymtabCmd", Type, 0, ""},
+ {"DysymtabCmd.Cmd", Field, 0, ""},
+ {"DysymtabCmd.Extrefsymoff", Field, 0, ""},
+ {"DysymtabCmd.Extreloff", Field, 0, ""},
+ {"DysymtabCmd.Iextdefsym", Field, 0, ""},
+ {"DysymtabCmd.Ilocalsym", Field, 0, ""},
+ {"DysymtabCmd.Indirectsymoff", Field, 0, ""},
+ {"DysymtabCmd.Iundefsym", Field, 0, ""},
+ {"DysymtabCmd.Len", Field, 0, ""},
+ {"DysymtabCmd.Locreloff", Field, 0, ""},
+ {"DysymtabCmd.Modtaboff", Field, 0, ""},
+ {"DysymtabCmd.Nextdefsym", Field, 0, ""},
+ {"DysymtabCmd.Nextrefsyms", Field, 0, ""},
+ {"DysymtabCmd.Nextrel", Field, 0, ""},
+ {"DysymtabCmd.Nindirectsyms", Field, 0, ""},
+ {"DysymtabCmd.Nlocalsym", Field, 0, ""},
+ {"DysymtabCmd.Nlocrel", Field, 0, ""},
+ {"DysymtabCmd.Nmodtab", Field, 0, ""},
+ {"DysymtabCmd.Ntoc", Field, 0, ""},
+ {"DysymtabCmd.Nundefsym", Field, 0, ""},
+ {"DysymtabCmd.Tocoffset", Field, 0, ""},
+ {"ErrNotFat", Var, 3, ""},
+ {"FatArch", Type, 3, ""},
+ {"FatArch.FatArchHeader", Field, 3, ""},
+ {"FatArch.File", Field, 3, ""},
+ {"FatArchHeader", Type, 3, ""},
+ {"FatArchHeader.Align", Field, 3, ""},
+ {"FatArchHeader.Cpu", Field, 3, ""},
+ {"FatArchHeader.Offset", Field, 3, ""},
+ {"FatArchHeader.Size", Field, 3, ""},
+ {"FatArchHeader.SubCpu", Field, 3, ""},
+ {"FatFile", Type, 3, ""},
+ {"FatFile.Arches", Field, 3, ""},
+ {"FatFile.Magic", Field, 3, ""},
+ {"File", Type, 0, ""},
+ {"File.ByteOrder", Field, 0, ""},
+ {"File.Dysymtab", Field, 0, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"File.Loads", Field, 0, ""},
+ {"File.Sections", Field, 0, ""},
+ {"File.Symtab", Field, 0, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.Cmdsz", Field, 0, ""},
+ {"FileHeader.Cpu", Field, 0, ""},
+ {"FileHeader.Flags", Field, 0, ""},
+ {"FileHeader.Magic", Field, 0, ""},
+ {"FileHeader.Ncmd", Field, 0, ""},
+ {"FileHeader.SubCpu", Field, 0, ""},
+ {"FileHeader.Type", Field, 0, ""},
+ {"FlagAllModsBound", Const, 10, ""},
+ {"FlagAllowStackExecution", Const, 10, ""},
+ {"FlagAppExtensionSafe", Const, 10, ""},
+ {"FlagBindAtLoad", Const, 10, ""},
+ {"FlagBindsToWeak", Const, 10, ""},
+ {"FlagCanonical", Const, 10, ""},
+ {"FlagDeadStrippableDylib", Const, 10, ""},
+ {"FlagDyldLink", Const, 10, ""},
+ {"FlagForceFlat", Const, 10, ""},
+ {"FlagHasTLVDescriptors", Const, 10, ""},
+ {"FlagIncrLink", Const, 10, ""},
+ {"FlagLazyInit", Const, 10, ""},
+ {"FlagNoFixPrebinding", Const, 10, ""},
+ {"FlagNoHeapExecution", Const, 10, ""},
+ {"FlagNoMultiDefs", Const, 10, ""},
+ {"FlagNoReexportedDylibs", Const, 10, ""},
+ {"FlagNoUndefs", Const, 10, ""},
+ {"FlagPIE", Const, 10, ""},
+ {"FlagPrebindable", Const, 10, ""},
+ {"FlagPrebound", Const, 10, ""},
+ {"FlagRootSafe", Const, 10, ""},
+ {"FlagSetuidSafe", Const, 10, ""},
+ {"FlagSplitSegs", Const, 10, ""},
+ {"FlagSubsectionsViaSymbols", Const, 10, ""},
+ {"FlagTwoLevel", Const, 10, ""},
+ {"FlagWeakDefines", Const, 10, ""},
+ {"FormatError", Type, 0, ""},
+ {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10, ""},
+ {"GENERIC_RELOC_PAIR", Const, 10, ""},
+ {"GENERIC_RELOC_PB_LA_PTR", Const, 10, ""},
+ {"GENERIC_RELOC_SECTDIFF", Const, 10, ""},
+ {"GENERIC_RELOC_TLV", Const, 10, ""},
+ {"GENERIC_RELOC_VANILLA", Const, 10, ""},
+ {"Load", Type, 0, ""},
+ {"LoadBytes", Type, 0, ""},
+ {"LoadCmd", Type, 0, ""},
+ {"LoadCmdDylib", Const, 0, ""},
+ {"LoadCmdDylinker", Const, 0, ""},
+ {"LoadCmdDysymtab", Const, 0, ""},
+ {"LoadCmdRpath", Const, 10, ""},
+ {"LoadCmdSegment", Const, 0, ""},
+ {"LoadCmdSegment64", Const, 0, ""},
+ {"LoadCmdSymtab", Const, 0, ""},
+ {"LoadCmdThread", Const, 0, ""},
+ {"LoadCmdUnixThread", Const, 0, ""},
+ {"Magic32", Const, 0, ""},
+ {"Magic64", Const, 0, ""},
+ {"MagicFat", Const, 3, ""},
+ {"NewFatFile", Func, 3, "func(r io.ReaderAt) (*FatFile, error)"},
+ {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+ {"Nlist32", Type, 0, ""},
+ {"Nlist32.Desc", Field, 0, ""},
+ {"Nlist32.Name", Field, 0, ""},
+ {"Nlist32.Sect", Field, 0, ""},
+ {"Nlist32.Type", Field, 0, ""},
+ {"Nlist32.Value", Field, 0, ""},
+ {"Nlist64", Type, 0, ""},
+ {"Nlist64.Desc", Field, 0, ""},
+ {"Nlist64.Name", Field, 0, ""},
+ {"Nlist64.Sect", Field, 0, ""},
+ {"Nlist64.Type", Field, 0, ""},
+ {"Nlist64.Value", Field, 0, ""},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"OpenFat", Func, 3, "func(name string) (*FatFile, error)"},
+ {"Regs386", Type, 0, ""},
+ {"Regs386.AX", Field, 0, ""},
+ {"Regs386.BP", Field, 0, ""},
+ {"Regs386.BX", Field, 0, ""},
+ {"Regs386.CS", Field, 0, ""},
+ {"Regs386.CX", Field, 0, ""},
+ {"Regs386.DI", Field, 0, ""},
+ {"Regs386.DS", Field, 0, ""},
+ {"Regs386.DX", Field, 0, ""},
+ {"Regs386.ES", Field, 0, ""},
+ {"Regs386.FLAGS", Field, 0, ""},
+ {"Regs386.FS", Field, 0, ""},
+ {"Regs386.GS", Field, 0, ""},
+ {"Regs386.IP", Field, 0, ""},
+ {"Regs386.SI", Field, 0, ""},
+ {"Regs386.SP", Field, 0, ""},
+ {"Regs386.SS", Field, 0, ""},
+ {"RegsAMD64", Type, 0, ""},
+ {"RegsAMD64.AX", Field, 0, ""},
+ {"RegsAMD64.BP", Field, 0, ""},
+ {"RegsAMD64.BX", Field, 0, ""},
+ {"RegsAMD64.CS", Field, 0, ""},
+ {"RegsAMD64.CX", Field, 0, ""},
+ {"RegsAMD64.DI", Field, 0, ""},
+ {"RegsAMD64.DX", Field, 0, ""},
+ {"RegsAMD64.FLAGS", Field, 0, ""},
+ {"RegsAMD64.FS", Field, 0, ""},
+ {"RegsAMD64.GS", Field, 0, ""},
+ {"RegsAMD64.IP", Field, 0, ""},
+ {"RegsAMD64.R10", Field, 0, ""},
+ {"RegsAMD64.R11", Field, 0, ""},
+ {"RegsAMD64.R12", Field, 0, ""},
+ {"RegsAMD64.R13", Field, 0, ""},
+ {"RegsAMD64.R14", Field, 0, ""},
+ {"RegsAMD64.R15", Field, 0, ""},
+ {"RegsAMD64.R8", Field, 0, ""},
+ {"RegsAMD64.R9", Field, 0, ""},
+ {"RegsAMD64.SI", Field, 0, ""},
+ {"RegsAMD64.SP", Field, 0, ""},
+ {"Reloc", Type, 10, ""},
+ {"Reloc.Addr", Field, 10, ""},
+ {"Reloc.Extern", Field, 10, ""},
+ {"Reloc.Len", Field, 10, ""},
+ {"Reloc.Pcrel", Field, 10, ""},
+ {"Reloc.Scattered", Field, 10, ""},
+ {"Reloc.Type", Field, 10, ""},
+ {"Reloc.Value", Field, 10, ""},
+ {"RelocTypeARM", Type, 10, ""},
+ {"RelocTypeARM64", Type, 10, ""},
+ {"RelocTypeGeneric", Type, 10, ""},
+ {"RelocTypeX86_64", Type, 10, ""},
+ {"Rpath", Type, 10, ""},
+ {"Rpath.LoadBytes", Field, 10, ""},
+ {"Rpath.Path", Field, 10, ""},
+ {"RpathCmd", Type, 10, ""},
+ {"RpathCmd.Cmd", Field, 10, ""},
+ {"RpathCmd.Len", Field, 10, ""},
+ {"RpathCmd.Path", Field, 10, ""},
+ {"Section", Type, 0, ""},
+ {"Section.ReaderAt", Field, 0, ""},
+ {"Section.Relocs", Field, 10, ""},
+ {"Section.SectionHeader", Field, 0, ""},
+ {"Section32", Type, 0, ""},
+ {"Section32.Addr", Field, 0, ""},
+ {"Section32.Align", Field, 0, ""},
+ {"Section32.Flags", Field, 0, ""},
+ {"Section32.Name", Field, 0, ""},
+ {"Section32.Nreloc", Field, 0, ""},
+ {"Section32.Offset", Field, 0, ""},
+ {"Section32.Reloff", Field, 0, ""},
+ {"Section32.Reserve1", Field, 0, ""},
+ {"Section32.Reserve2", Field, 0, ""},
+ {"Section32.Seg", Field, 0, ""},
+ {"Section32.Size", Field, 0, ""},
+ {"Section64", Type, 0, ""},
+ {"Section64.Addr", Field, 0, ""},
+ {"Section64.Align", Field, 0, ""},
+ {"Section64.Flags", Field, 0, ""},
+ {"Section64.Name", Field, 0, ""},
+ {"Section64.Nreloc", Field, 0, ""},
+ {"Section64.Offset", Field, 0, ""},
+ {"Section64.Reloff", Field, 0, ""},
+ {"Section64.Reserve1", Field, 0, ""},
+ {"Section64.Reserve2", Field, 0, ""},
+ {"Section64.Reserve3", Field, 0, ""},
+ {"Section64.Seg", Field, 0, ""},
+ {"Section64.Size", Field, 0, ""},
+ {"SectionHeader", Type, 0, ""},
+ {"SectionHeader.Addr", Field, 0, ""},
+ {"SectionHeader.Align", Field, 0, ""},
+ {"SectionHeader.Flags", Field, 0, ""},
+ {"SectionHeader.Name", Field, 0, ""},
+ {"SectionHeader.Nreloc", Field, 0, ""},
+ {"SectionHeader.Offset", Field, 0, ""},
+ {"SectionHeader.Reloff", Field, 0, ""},
+ {"SectionHeader.Seg", Field, 0, ""},
+ {"SectionHeader.Size", Field, 0, ""},
+ {"Segment", Type, 0, ""},
+ {"Segment.LoadBytes", Field, 0, ""},
+ {"Segment.ReaderAt", Field, 0, ""},
+ {"Segment.SegmentHeader", Field, 0, ""},
+ {"Segment32", Type, 0, ""},
+ {"Segment32.Addr", Field, 0, ""},
+ {"Segment32.Cmd", Field, 0, ""},
+ {"Segment32.Filesz", Field, 0, ""},
+ {"Segment32.Flag", Field, 0, ""},
+ {"Segment32.Len", Field, 0, ""},
+ {"Segment32.Maxprot", Field, 0, ""},
+ {"Segment32.Memsz", Field, 0, ""},
+ {"Segment32.Name", Field, 0, ""},
+ {"Segment32.Nsect", Field, 0, ""},
+ {"Segment32.Offset", Field, 0, ""},
+ {"Segment32.Prot", Field, 0, ""},
+ {"Segment64", Type, 0, ""},
+ {"Segment64.Addr", Field, 0, ""},
+ {"Segment64.Cmd", Field, 0, ""},
+ {"Segment64.Filesz", Field, 0, ""},
+ {"Segment64.Flag", Field, 0, ""},
+ {"Segment64.Len", Field, 0, ""},
+ {"Segment64.Maxprot", Field, 0, ""},
+ {"Segment64.Memsz", Field, 0, ""},
+ {"Segment64.Name", Field, 0, ""},
+ {"Segment64.Nsect", Field, 0, ""},
+ {"Segment64.Offset", Field, 0, ""},
+ {"Segment64.Prot", Field, 0, ""},
+ {"SegmentHeader", Type, 0, ""},
+ {"SegmentHeader.Addr", Field, 0, ""},
+ {"SegmentHeader.Cmd", Field, 0, ""},
+ {"SegmentHeader.Filesz", Field, 0, ""},
+ {"SegmentHeader.Flag", Field, 0, ""},
+ {"SegmentHeader.Len", Field, 0, ""},
+ {"SegmentHeader.Maxprot", Field, 0, ""},
+ {"SegmentHeader.Memsz", Field, 0, ""},
+ {"SegmentHeader.Name", Field, 0, ""},
+ {"SegmentHeader.Nsect", Field, 0, ""},
+ {"SegmentHeader.Offset", Field, 0, ""},
+ {"SegmentHeader.Prot", Field, 0, ""},
+ {"Symbol", Type, 0, ""},
+ {"Symbol.Desc", Field, 0, ""},
+ {"Symbol.Name", Field, 0, ""},
+ {"Symbol.Sect", Field, 0, ""},
+ {"Symbol.Type", Field, 0, ""},
+ {"Symbol.Value", Field, 0, ""},
+ {"Symtab", Type, 0, ""},
+ {"Symtab.LoadBytes", Field, 0, ""},
+ {"Symtab.Syms", Field, 0, ""},
+ {"Symtab.SymtabCmd", Field, 0, ""},
+ {"SymtabCmd", Type, 0, ""},
+ {"SymtabCmd.Cmd", Field, 0, ""},
+ {"SymtabCmd.Len", Field, 0, ""},
+ {"SymtabCmd.Nsyms", Field, 0, ""},
+ {"SymtabCmd.Stroff", Field, 0, ""},
+ {"SymtabCmd.Strsize", Field, 0, ""},
+ {"SymtabCmd.Symoff", Field, 0, ""},
+ {"Thread", Type, 0, ""},
+ {"Thread.Cmd", Field, 0, ""},
+ {"Thread.Data", Field, 0, ""},
+ {"Thread.Len", Field, 0, ""},
+ {"Thread.Type", Field, 0, ""},
+ {"Type", Type, 0, ""},
+ {"TypeBundle", Const, 3, ""},
+ {"TypeDylib", Const, 3, ""},
+ {"TypeExec", Const, 0, ""},
+ {"TypeObj", Const, 0, ""},
+ {"X86_64_RELOC_BRANCH", Const, 10, ""},
+ {"X86_64_RELOC_GOT", Const, 10, ""},
+ {"X86_64_RELOC_GOT_LOAD", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED_1", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED_2", Const, 10, ""},
+ {"X86_64_RELOC_SIGNED_4", Const, 10, ""},
+ {"X86_64_RELOC_SUBTRACTOR", Const, 10, ""},
+ {"X86_64_RELOC_TLV", Const, 10, ""},
+ {"X86_64_RELOC_UNSIGNED", Const, 10, ""},
+ },
+ "debug/pe": {
+ {"(*COFFSymbol).FullName", Method, 8, ""},
+ {"(*File).COFFSymbolReadSectionDefAux", Method, 19, ""},
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).DWARF", Method, 0, ""},
+ {"(*File).ImportedLibraries", Method, 0, ""},
+ {"(*File).ImportedSymbols", Method, 0, ""},
+ {"(*File).Section", Method, 0, ""},
+ {"(*FormatError).Error", Method, 0, ""},
+ {"(*Section).Data", Method, 0, ""},
+ {"(*Section).Open", Method, 0, ""},
+ {"(Section).ReadAt", Method, 0, ""},
+ {"(StringTable).String", Method, 8, ""},
+ {"COFFSymbol", Type, 1, ""},
+ {"COFFSymbol.Name", Field, 1, ""},
+ {"COFFSymbol.NumberOfAuxSymbols", Field, 1, ""},
+ {"COFFSymbol.SectionNumber", Field, 1, ""},
+ {"COFFSymbol.StorageClass", Field, 1, ""},
+ {"COFFSymbol.Type", Field, 1, ""},
+ {"COFFSymbol.Value", Field, 1, ""},
+ {"COFFSymbolAuxFormat5", Type, 19, ""},
+ {"COFFSymbolAuxFormat5.Checksum", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.NumRelocs", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.SecNum", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.Selection", Field, 19, ""},
+ {"COFFSymbolAuxFormat5.Size", Field, 19, ""},
+ {"COFFSymbolSize", Const, 1, ""},
+ {"DataDirectory", Type, 3, ""},
+ {"DataDirectory.Size", Field, 3, ""},
+ {"DataDirectory.VirtualAddress", Field, 3, ""},
+ {"File", Type, 0, ""},
+ {"File.COFFSymbols", Field, 8, ""},
+ {"File.FileHeader", Field, 0, ""},
+ {"File.OptionalHeader", Field, 3, ""},
+ {"File.Sections", Field, 0, ""},
+ {"File.StringTable", Field, 8, ""},
+ {"File.Symbols", Field, 1, ""},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.Characteristics", Field, 0, ""},
+ {"FileHeader.Machine", Field, 0, ""},
+ {"FileHeader.NumberOfSections", Field, 0, ""},
+ {"FileHeader.NumberOfSymbols", Field, 0, ""},
+ {"FileHeader.PointerToSymbolTable", Field, 0, ""},
+ {"FileHeader.SizeOfOptionalHeader", Field, 0, ""},
+ {"FileHeader.TimeDateStamp", Field, 0, ""},
+ {"FormatError", Type, 0, ""},
+ {"IMAGE_COMDAT_SELECT_ANY", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19, ""},
+ {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19, ""},
+ {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11, ""},
+ {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11, ""},
+ {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15, ""},
+ {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15, ""},
+ {"IMAGE_FILE_32BIT_MACHINE", Const, 15, ""},
+ {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15, ""},
+ {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15, ""},
+ {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15, ""},
+ {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_DLL", Const, 15, ""},
+ {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15, ""},
+ {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15, ""},
+ {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_MACHINE_AM33", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_AMD64", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_ARM", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_ARM64", Const, 11, ""},
+ {"IMAGE_FILE_MACHINE_ARMNT", Const, 12, ""},
+ {"IMAGE_FILE_MACHINE_EBC", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_I386", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_IA64", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19, ""},
+ {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19, ""},
+ {"IMAGE_FILE_MACHINE_M32R", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_MIPS16", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_POWERPC", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_R4000", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_RISCV128", Const, 20, ""},
+ {"IMAGE_FILE_MACHINE_RISCV32", Const, 20, ""},
+ {"IMAGE_FILE_MACHINE_RISCV64", Const, 20, ""},
+ {"IMAGE_FILE_MACHINE_SH3", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_SH4", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_SH5", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_THUMB", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0, ""},
+ {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0, ""},
+ {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15, ""},
+ {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15, ""},
+ {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15, ""},
+ {"IMAGE_FILE_SYSTEM", Const, 15, ""},
+ {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15, ""},
+ {"IMAGE_SCN_CNT_CODE", Const, 19, ""},
+ {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19, ""},
+ {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19, ""},
+ {"IMAGE_SCN_LNK_COMDAT", Const, 19, ""},
+ {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19, ""},
+ {"IMAGE_SCN_MEM_EXECUTE", Const, 19, ""},
+ {"IMAGE_SCN_MEM_READ", Const, 19, ""},
+ {"IMAGE_SCN_MEM_WRITE", Const, 19, ""},
+ {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_NATIVE", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15, ""},
+ {"IMAGE_SUBSYSTEM_XBOX", Const, 15, ""},
+ {"ImportDirectory", Type, 0, ""},
+ {"ImportDirectory.FirstThunk", Field, 0, ""},
+ {"ImportDirectory.ForwarderChain", Field, 0, ""},
+ {"ImportDirectory.Name", Field, 0, ""},
+ {"ImportDirectory.OriginalFirstThunk", Field, 0, ""},
+ {"ImportDirectory.TimeDateStamp", Field, 0, ""},
+ {"NewFile", Func, 0, "func(r io.ReaderAt) (*File, error)"},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"OptionalHeader32", Type, 3, ""},
+ {"OptionalHeader32.AddressOfEntryPoint", Field, 3, ""},
+ {"OptionalHeader32.BaseOfCode", Field, 3, ""},
+ {"OptionalHeader32.BaseOfData", Field, 3, ""},
+ {"OptionalHeader32.CheckSum", Field, 3, ""},
+ {"OptionalHeader32.DataDirectory", Field, 3, ""},
+ {"OptionalHeader32.DllCharacteristics", Field, 3, ""},
+ {"OptionalHeader32.FileAlignment", Field, 3, ""},
+ {"OptionalHeader32.ImageBase", Field, 3, ""},
+ {"OptionalHeader32.LoaderFlags", Field, 3, ""},
+ {"OptionalHeader32.Magic", Field, 3, ""},
+ {"OptionalHeader32.MajorImageVersion", Field, 3, ""},
+ {"OptionalHeader32.MajorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader32.MajorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorImageVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader32.MinorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3, ""},
+ {"OptionalHeader32.SectionAlignment", Field, 3, ""},
+ {"OptionalHeader32.SizeOfCode", Field, 3, ""},
+ {"OptionalHeader32.SizeOfHeaders", Field, 3, ""},
+ {"OptionalHeader32.SizeOfHeapCommit", Field, 3, ""},
+ {"OptionalHeader32.SizeOfHeapReserve", Field, 3, ""},
+ {"OptionalHeader32.SizeOfImage", Field, 3, ""},
+ {"OptionalHeader32.SizeOfInitializedData", Field, 3, ""},
+ {"OptionalHeader32.SizeOfStackCommit", Field, 3, ""},
+ {"OptionalHeader32.SizeOfStackReserve", Field, 3, ""},
+ {"OptionalHeader32.SizeOfUninitializedData", Field, 3, ""},
+ {"OptionalHeader32.Subsystem", Field, 3, ""},
+ {"OptionalHeader32.Win32VersionValue", Field, 3, ""},
+ {"OptionalHeader64", Type, 3, ""},
+ {"OptionalHeader64.AddressOfEntryPoint", Field, 3, ""},
+ {"OptionalHeader64.BaseOfCode", Field, 3, ""},
+ {"OptionalHeader64.CheckSum", Field, 3, ""},
+ {"OptionalHeader64.DataDirectory", Field, 3, ""},
+ {"OptionalHeader64.DllCharacteristics", Field, 3, ""},
+ {"OptionalHeader64.FileAlignment", Field, 3, ""},
+ {"OptionalHeader64.ImageBase", Field, 3, ""},
+ {"OptionalHeader64.LoaderFlags", Field, 3, ""},
+ {"OptionalHeader64.Magic", Field, 3, ""},
+ {"OptionalHeader64.MajorImageVersion", Field, 3, ""},
+ {"OptionalHeader64.MajorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader64.MajorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorImageVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorLinkerVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3, ""},
+ {"OptionalHeader64.MinorSubsystemVersion", Field, 3, ""},
+ {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3, ""},
+ {"OptionalHeader64.SectionAlignment", Field, 3, ""},
+ {"OptionalHeader64.SizeOfCode", Field, 3, ""},
+ {"OptionalHeader64.SizeOfHeaders", Field, 3, ""},
+ {"OptionalHeader64.SizeOfHeapCommit", Field, 3, ""},
+ {"OptionalHeader64.SizeOfHeapReserve", Field, 3, ""},
+ {"OptionalHeader64.SizeOfImage", Field, 3, ""},
+ {"OptionalHeader64.SizeOfInitializedData", Field, 3, ""},
+ {"OptionalHeader64.SizeOfStackCommit", Field, 3, ""},
+ {"OptionalHeader64.SizeOfStackReserve", Field, 3, ""},
+ {"OptionalHeader64.SizeOfUninitializedData", Field, 3, ""},
+ {"OptionalHeader64.Subsystem", Field, 3, ""},
+ {"OptionalHeader64.Win32VersionValue", Field, 3, ""},
+ {"Reloc", Type, 8, ""},
+ {"Reloc.SymbolTableIndex", Field, 8, ""},
+ {"Reloc.Type", Field, 8, ""},
+ {"Reloc.VirtualAddress", Field, 8, ""},
+ {"Section", Type, 0, ""},
+ {"Section.ReaderAt", Field, 0, ""},
+ {"Section.Relocs", Field, 8, ""},
+ {"Section.SectionHeader", Field, 0, ""},
+ {"SectionHeader", Type, 0, ""},
+ {"SectionHeader.Characteristics", Field, 0, ""},
+ {"SectionHeader.Name", Field, 0, ""},
+ {"SectionHeader.NumberOfLineNumbers", Field, 0, ""},
+ {"SectionHeader.NumberOfRelocations", Field, 0, ""},
+ {"SectionHeader.Offset", Field, 0, ""},
+ {"SectionHeader.PointerToLineNumbers", Field, 0, ""},
+ {"SectionHeader.PointerToRelocations", Field, 0, ""},
+ {"SectionHeader.Size", Field, 0, ""},
+ {"SectionHeader.VirtualAddress", Field, 0, ""},
+ {"SectionHeader.VirtualSize", Field, 0, ""},
+ {"SectionHeader32", Type, 0, ""},
+ {"SectionHeader32.Characteristics", Field, 0, ""},
+ {"SectionHeader32.Name", Field, 0, ""},
+ {"SectionHeader32.NumberOfLineNumbers", Field, 0, ""},
+ {"SectionHeader32.NumberOfRelocations", Field, 0, ""},
+ {"SectionHeader32.PointerToLineNumbers", Field, 0, ""},
+ {"SectionHeader32.PointerToRawData", Field, 0, ""},
+ {"SectionHeader32.PointerToRelocations", Field, 0, ""},
+ {"SectionHeader32.SizeOfRawData", Field, 0, ""},
+ {"SectionHeader32.VirtualAddress", Field, 0, ""},
+ {"SectionHeader32.VirtualSize", Field, 0, ""},
+ {"StringTable", Type, 8, ""},
+ {"Symbol", Type, 1, ""},
+ {"Symbol.Name", Field, 1, ""},
+ {"Symbol.SectionNumber", Field, 1, ""},
+ {"Symbol.StorageClass", Field, 1, ""},
+ {"Symbol.Type", Field, 1, ""},
+ {"Symbol.Value", Field, 1, ""},
+ },
+ "debug/plan9obj": {
+ {"(*File).Close", Method, 3, ""},
+ {"(*File).Section", Method, 3, ""},
+ {"(*File).Symbols", Method, 3, ""},
+ {"(*Section).Data", Method, 3, ""},
+ {"(*Section).Open", Method, 3, ""},
+ {"(Section).ReadAt", Method, 3, ""},
+ {"ErrNoSymbols", Var, 18, ""},
+ {"File", Type, 3, ""},
+ {"File.FileHeader", Field, 3, ""},
+ {"File.Sections", Field, 3, ""},
+ {"FileHeader", Type, 3, ""},
+ {"FileHeader.Bss", Field, 3, ""},
+ {"FileHeader.Entry", Field, 3, ""},
+ {"FileHeader.HdrSize", Field, 4, ""},
+ {"FileHeader.LoadAddress", Field, 4, ""},
+ {"FileHeader.Magic", Field, 3, ""},
+ {"FileHeader.PtrSize", Field, 3, ""},
+ {"Magic386", Const, 3, ""},
+ {"Magic64", Const, 3, ""},
+ {"MagicAMD64", Const, 3, ""},
+ {"MagicARM", Const, 3, ""},
+ {"NewFile", Func, 3, "func(r io.ReaderAt) (*File, error)"},
+ {"Open", Func, 3, "func(name string) (*File, error)"},
+ {"Section", Type, 3, ""},
+ {"Section.ReaderAt", Field, 3, ""},
+ {"Section.SectionHeader", Field, 3, ""},
+ {"SectionHeader", Type, 3, ""},
+ {"SectionHeader.Name", Field, 3, ""},
+ {"SectionHeader.Offset", Field, 3, ""},
+ {"SectionHeader.Size", Field, 3, ""},
+ {"Sym", Type, 3, ""},
+ {"Sym.Name", Field, 3, ""},
+ {"Sym.Type", Field, 3, ""},
+ {"Sym.Value", Field, 3, ""},
+ },
+ "embed": {
+ {"(FS).Open", Method, 16, ""},
+ {"(FS).ReadDir", Method, 16, ""},
+ {"(FS).ReadFile", Method, 16, ""},
+ {"FS", Type, 16, ""},
+ },
+ "encoding": {
+ {"BinaryAppender", Type, 24, ""},
+ {"BinaryMarshaler", Type, 2, ""},
+ {"BinaryUnmarshaler", Type, 2, ""},
+ {"TextAppender", Type, 24, ""},
+ {"TextMarshaler", Type, 2, ""},
+ {"TextUnmarshaler", Type, 2, ""},
+ },
+ "encoding/ascii85": {
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"Decode", Func, 0, "func(dst []byte, src []byte, flush bool) (ndst int, nsrc int, err error)"},
+ {"Encode", Func, 0, "func(dst []byte, src []byte) int"},
+ {"MaxEncodedLen", Func, 0, "func(n int) int"},
+ {"NewDecoder", Func, 0, "func(r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) io.WriteCloser"},
+ },
+ "encoding/asn1": {
+ {"(BitString).At", Method, 0, ""},
+ {"(BitString).RightAlign", Method, 0, ""},
+ {"(ObjectIdentifier).Equal", Method, 0, ""},
+ {"(ObjectIdentifier).String", Method, 3, ""},
+ {"(StructuralError).Error", Method, 0, ""},
+ {"(SyntaxError).Error", Method, 0, ""},
+ {"BitString", Type, 0, ""},
+ {"BitString.BitLength", Field, 0, ""},
+ {"BitString.Bytes", Field, 0, ""},
+ {"ClassApplication", Const, 6, ""},
+ {"ClassContextSpecific", Const, 6, ""},
+ {"ClassPrivate", Const, 6, ""},
+ {"ClassUniversal", Const, 6, ""},
+ {"Enumerated", Type, 0, ""},
+ {"Flag", Type, 0, ""},
+ {"Marshal", Func, 0, "func(val any) ([]byte, error)"},
+ {"MarshalWithParams", Func, 10, "func(val any, params string) ([]byte, error)"},
+ {"NullBytes", Var, 9, ""},
+ {"NullRawValue", Var, 9, ""},
+ {"ObjectIdentifier", Type, 0, ""},
+ {"RawContent", Type, 0, ""},
+ {"RawValue", Type, 0, ""},
+ {"RawValue.Bytes", Field, 0, ""},
+ {"RawValue.Class", Field, 0, ""},
+ {"RawValue.FullBytes", Field, 0, ""},
+ {"RawValue.IsCompound", Field, 0, ""},
+ {"RawValue.Tag", Field, 0, ""},
+ {"StructuralError", Type, 0, ""},
+ {"StructuralError.Msg", Field, 0, ""},
+ {"SyntaxError", Type, 0, ""},
+ {"SyntaxError.Msg", Field, 0, ""},
+ {"TagBMPString", Const, 14, ""},
+ {"TagBitString", Const, 6, ""},
+ {"TagBoolean", Const, 6, ""},
+ {"TagEnum", Const, 6, ""},
+ {"TagGeneralString", Const, 6, ""},
+ {"TagGeneralizedTime", Const, 6, ""},
+ {"TagIA5String", Const, 6, ""},
+ {"TagInteger", Const, 6, ""},
+ {"TagNull", Const, 9, ""},
+ {"TagNumericString", Const, 10, ""},
+ {"TagOID", Const, 6, ""},
+ {"TagOctetString", Const, 6, ""},
+ {"TagPrintableString", Const, 6, ""},
+ {"TagSequence", Const, 6, ""},
+ {"TagSet", Const, 6, ""},
+ {"TagT61String", Const, 6, ""},
+ {"TagUTCTime", Const, 6, ""},
+ {"TagUTF8String", Const, 6, ""},
+ {"Unmarshal", Func, 0, "func(b []byte, val any) (rest []byte, err error)"},
+ {"UnmarshalWithParams", Func, 0, "func(b []byte, val any, params string) (rest []byte, err error)"},
+ },
+ "encoding/base32": {
+ {"(*Encoding).AppendDecode", Method, 22, ""},
+ {"(*Encoding).AppendEncode", Method, 22, ""},
+ {"(*Encoding).Decode", Method, 0, ""},
+ {"(*Encoding).DecodeString", Method, 0, ""},
+ {"(*Encoding).DecodedLen", Method, 0, ""},
+ {"(*Encoding).Encode", Method, 0, ""},
+ {"(*Encoding).EncodeToString", Method, 0, ""},
+ {"(*Encoding).EncodedLen", Method, 0, ""},
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"(Encoding).WithPadding", Method, 9, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"Encoding", Type, 0, ""},
+ {"HexEncoding", Var, 0, ""},
+ {"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
+ {"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
+ {"NoPadding", Const, 9, ""},
+ {"StdEncoding", Var, 0, ""},
+ {"StdPadding", Const, 9, ""},
+ },
+ "encoding/base64": {
+ {"(*Encoding).AppendDecode", Method, 22, ""},
+ {"(*Encoding).AppendEncode", Method, 22, ""},
+ {"(*Encoding).Decode", Method, 0, ""},
+ {"(*Encoding).DecodeString", Method, 0, ""},
+ {"(*Encoding).DecodedLen", Method, 0, ""},
+ {"(*Encoding).Encode", Method, 0, ""},
+ {"(*Encoding).EncodeToString", Method, 0, ""},
+ {"(*Encoding).EncodedLen", Method, 0, ""},
+ {"(CorruptInputError).Error", Method, 0, ""},
+ {"(Encoding).Strict", Method, 8, ""},
+ {"(Encoding).WithPadding", Method, 5, ""},
+ {"CorruptInputError", Type, 0, ""},
+ {"Encoding", Type, 0, ""},
+ {"NewDecoder", Func, 0, "func(enc *Encoding, r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 0, "func(enc *Encoding, w io.Writer) io.WriteCloser"},
+ {"NewEncoding", Func, 0, "func(encoder string) *Encoding"},
+ {"NoPadding", Const, 5, ""},
+ {"RawStdEncoding", Var, 5, ""},
+ {"RawURLEncoding", Var, 5, ""},
+ {"StdEncoding", Var, 0, ""},
+ {"StdPadding", Const, 5, ""},
+ {"URLEncoding", Var, 0, ""},
+ },
+ "encoding/binary": {
+ {"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"},
+ {"AppendByteOrder", Type, 19, ""},
+ {"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"},
+ {"AppendVarint", Func, 19, "func(buf []byte, x int64) []byte"},
+ {"BigEndian", Var, 0, ""},
+ {"ByteOrder", Type, 0, ""},
+ {"Decode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
+ {"Encode", Func, 23, "func(buf []byte, order ByteOrder, data any) (int, error)"},
+ {"LittleEndian", Var, 0, ""},
+ {"MaxVarintLen16", Const, 0, ""},
+ {"MaxVarintLen32", Const, 0, ""},
+ {"MaxVarintLen64", Const, 0, ""},
+ {"NativeEndian", Var, 21, ""},
+ {"PutUvarint", Func, 0, "func(buf []byte, x uint64) int"},
+ {"PutVarint", Func, 0, "func(buf []byte, x int64) int"},
+ {"Read", Func, 0, "func(r io.Reader, order ByteOrder, data any) error"},
+ {"ReadUvarint", Func, 0, "func(r io.ByteReader) (uint64, error)"},
+ {"ReadVarint", Func, 0, "func(r io.ByteReader) (int64, error)"},
+ {"Size", Func, 0, "func(v any) int"},
+ {"Uvarint", Func, 0, "func(buf []byte) (uint64, int)"},
+ {"Varint", Func, 0, "func(buf []byte) (int64, int)"},
+ {"Write", Func, 0, "func(w io.Writer, order ByteOrder, data any) error"},
+ },
+ "encoding/csv": {
+ {"(*ParseError).Error", Method, 0, ""},
+ {"(*ParseError).Unwrap", Method, 13, ""},
+ {"(*Reader).FieldPos", Method, 17, ""},
+ {"(*Reader).InputOffset", Method, 19, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadAll", Method, 0, ""},
+ {"(*Writer).Error", Method, 1, ""},
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"(*Writer).WriteAll", Method, 0, ""},
+ {"ErrBareQuote", Var, 0, ""},
+ {"ErrFieldCount", Var, 0, ""},
+ {"ErrQuote", Var, 0, ""},
+ {"ErrTrailingComma", Var, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader) *Reader"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"ParseError", Type, 0, ""},
+ {"ParseError.Column", Field, 0, ""},
+ {"ParseError.Err", Field, 0, ""},
+ {"ParseError.Line", Field, 0, ""},
+ {"ParseError.StartLine", Field, 10, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.Comma", Field, 0, ""},
+ {"Reader.Comment", Field, 0, ""},
+ {"Reader.FieldsPerRecord", Field, 0, ""},
+ {"Reader.LazyQuotes", Field, 0, ""},
+ {"Reader.ReuseRecord", Field, 9, ""},
+ {"Reader.TrailingComma", Field, 0, ""},
+ {"Reader.TrimLeadingSpace", Field, 0, ""},
+ {"Writer", Type, 0, ""},
+ {"Writer.Comma", Field, 0, ""},
+ {"Writer.UseCRLF", Field, 0, ""},
+ },
+ "encoding/gob": {
+ {"(*Decoder).Decode", Method, 0, ""},
+ {"(*Decoder).DecodeValue", Method, 0, ""},
+ {"(*Encoder).Encode", Method, 0, ""},
+ {"(*Encoder).EncodeValue", Method, 0, ""},
+ {"CommonType", Type, 0, ""},
+ {"CommonType.Id", Field, 0, ""},
+ {"CommonType.Name", Field, 0, ""},
+ {"Decoder", Type, 0, ""},
+ {"Encoder", Type, 0, ""},
+ {"GobDecoder", Type, 0, ""},
+ {"GobEncoder", Type, 0, ""},
+ {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+ {"Register", Func, 0, "func(value any)"},
+ {"RegisterName", Func, 0, "func(name string, value any)"},
+ },
+ "encoding/hex": {
+ {"(InvalidByteError).Error", Method, 0, ""},
+ {"AppendDecode", Func, 22, "func(dst []byte, src []byte) ([]byte, error)"},
+ {"AppendEncode", Func, 22, "func(dst []byte, src []byte) []byte"},
+ {"Decode", Func, 0, "func(dst []byte, src []byte) (int, error)"},
+ {"DecodeString", Func, 0, "func(s string) ([]byte, error)"},
+ {"DecodedLen", Func, 0, "func(x int) int"},
+ {"Dump", Func, 0, "func(data []byte) string"},
+ {"Dumper", Func, 0, "func(w io.Writer) io.WriteCloser"},
+ {"Encode", Func, 0, "func(dst []byte, src []byte) int"},
+ {"EncodeToString", Func, 0, "func(src []byte) string"},
+ {"EncodedLen", Func, 0, "func(n int) int"},
+ {"ErrLength", Var, 0, ""},
+ {"InvalidByteError", Type, 0, ""},
+ {"NewDecoder", Func, 10, "func(r io.Reader) io.Reader"},
+ {"NewEncoder", Func, 10, "func(w io.Writer) io.Writer"},
+ },
+ "encoding/json": {
+ {"(*Decoder).Buffered", Method, 1, ""},
+ {"(*Decoder).Decode", Method, 0, ""},
+ {"(*Decoder).DisallowUnknownFields", Method, 10, ""},
+ {"(*Decoder).InputOffset", Method, 14, ""},
+ {"(*Decoder).More", Method, 5, ""},
+ {"(*Decoder).Token", Method, 5, ""},
+ {"(*Decoder).UseNumber", Method, 1, ""},
+ {"(*Encoder).Encode", Method, 0, ""},
+ {"(*Encoder).SetEscapeHTML", Method, 7, ""},
+ {"(*Encoder).SetIndent", Method, 7, ""},
+ {"(*InvalidUTF8Error).Error", Method, 0, ""},
+ {"(*InvalidUnmarshalError).Error", Method, 0, ""},
+ {"(*MarshalerError).Error", Method, 0, ""},
+ {"(*MarshalerError).Unwrap", Method, 13, ""},
+ {"(*RawMessage).MarshalJSON", Method, 0, ""},
+ {"(*RawMessage).UnmarshalJSON", Method, 0, ""},
+ {"(*SyntaxError).Error", Method, 0, ""},
+ {"(*UnmarshalFieldError).Error", Method, 0, ""},
+ {"(*UnmarshalTypeError).Error", Method, 0, ""},
+ {"(*UnsupportedTypeError).Error", Method, 0, ""},
+ {"(*UnsupportedValueError).Error", Method, 0, ""},
+ {"(Delim).String", Method, 5, ""},
+ {"(Number).Float64", Method, 1, ""},
+ {"(Number).Int64", Method, 1, ""},
+ {"(Number).String", Method, 1, ""},
+ {"(RawMessage).MarshalJSON", Method, 8, ""},
+ {"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"},
+ {"Decoder", Type, 0, ""},
+ {"Delim", Type, 5, ""},
+ {"Encoder", Type, 0, ""},
+ {"HTMLEscape", Func, 0, "func(dst *bytes.Buffer, src []byte)"},
+ {"Indent", Func, 0, "func(dst *bytes.Buffer, src []byte, prefix string, indent string) error"},
+ {"InvalidUTF8Error", Type, 0, ""},
+ {"InvalidUTF8Error.S", Field, 0, ""},
+ {"InvalidUnmarshalError", Type, 0, ""},
+ {"InvalidUnmarshalError.Type", Field, 0, ""},
+ {"Marshal", Func, 0, "func(v any) ([]byte, error)"},
+ {"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
+ {"Marshaler", Type, 0, ""},
+ {"MarshalerError", Type, 0, ""},
+ {"MarshalerError.Err", Field, 0, ""},
+ {"MarshalerError.Type", Field, 0, ""},
+ {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+ {"Number", Type, 1, ""},
+ {"RawMessage", Type, 0, ""},
+ {"SyntaxError", Type, 0, ""},
+ {"SyntaxError.Offset", Field, 0, ""},
+ {"Token", Type, 5, ""},
+ {"Unmarshal", Func, 0, "func(data []byte, v any) error"},
+ {"UnmarshalFieldError", Type, 0, ""},
+ {"UnmarshalFieldError.Field", Field, 0, ""},
+ {"UnmarshalFieldError.Key", Field, 0, ""},
+ {"UnmarshalFieldError.Type", Field, 0, ""},
+ {"UnmarshalTypeError", Type, 0, ""},
+ {"UnmarshalTypeError.Field", Field, 8, ""},
+ {"UnmarshalTypeError.Offset", Field, 5, ""},
+ {"UnmarshalTypeError.Struct", Field, 8, ""},
+ {"UnmarshalTypeError.Type", Field, 0, ""},
+ {"UnmarshalTypeError.Value", Field, 0, ""},
+ {"Unmarshaler", Type, 0, ""},
+ {"UnsupportedTypeError", Type, 0, ""},
+ {"UnsupportedTypeError.Type", Field, 0, ""},
+ {"UnsupportedValueError", Type, 0, ""},
+ {"UnsupportedValueError.Str", Field, 0, ""},
+ {"UnsupportedValueError.Value", Field, 0, ""},
+ {"Valid", Func, 9, "func(data []byte) bool"},
+ },
+ "encoding/pem": {
+ {"Block", Type, 0, ""},
+ {"Block.Bytes", Field, 0, ""},
+ {"Block.Headers", Field, 0, ""},
+ {"Block.Type", Field, 0, ""},
+ {"Decode", Func, 0, "func(data []byte) (p *Block, rest []byte)"},
+ {"Encode", Func, 0, "func(out io.Writer, b *Block) error"},
+ {"EncodeToMemory", Func, 0, "func(b *Block) []byte"},
+ },
+ "encoding/xml": {
+ {"(*Decoder).Decode", Method, 0, ""},
+ {"(*Decoder).DecodeElement", Method, 0, ""},
+ {"(*Decoder).InputOffset", Method, 4, ""},
+ {"(*Decoder).InputPos", Method, 19, ""},
+ {"(*Decoder).RawToken", Method, 0, ""},
+ {"(*Decoder).Skip", Method, 0, ""},
+ {"(*Decoder).Token", Method, 0, ""},
+ {"(*Encoder).Close", Method, 20, ""},
+ {"(*Encoder).Encode", Method, 0, ""},
+ {"(*Encoder).EncodeElement", Method, 2, ""},
+ {"(*Encoder).EncodeToken", Method, 2, ""},
+ {"(*Encoder).Flush", Method, 2, ""},
+ {"(*Encoder).Indent", Method, 1, ""},
+ {"(*SyntaxError).Error", Method, 0, ""},
+ {"(*TagPathError).Error", Method, 0, ""},
+ {"(*UnsupportedTypeError).Error", Method, 0, ""},
+ {"(CharData).Copy", Method, 0, ""},
+ {"(Comment).Copy", Method, 0, ""},
+ {"(Directive).Copy", Method, 0, ""},
+ {"(ProcInst).Copy", Method, 0, ""},
+ {"(StartElement).Copy", Method, 0, ""},
+ {"(StartElement).End", Method, 2, ""},
+ {"(UnmarshalError).Error", Method, 0, ""},
+ {"Attr", Type, 0, ""},
+ {"Attr.Name", Field, 0, ""},
+ {"Attr.Value", Field, 0, ""},
+ {"CharData", Type, 0, ""},
+ {"Comment", Type, 0, ""},
+ {"CopyToken", Func, 0, "func(t Token) Token"},
+ {"Decoder", Type, 0, ""},
+ {"Decoder.AutoClose", Field, 0, ""},
+ {"Decoder.CharsetReader", Field, 0, ""},
+ {"Decoder.DefaultSpace", Field, 1, ""},
+ {"Decoder.Entity", Field, 0, ""},
+ {"Decoder.Strict", Field, 0, ""},
+ {"Directive", Type, 0, ""},
+ {"Encoder", Type, 0, ""},
+ {"EndElement", Type, 0, ""},
+ {"EndElement.Name", Field, 0, ""},
+ {"Escape", Func, 0, "func(w io.Writer, s []byte)"},
+ {"EscapeText", Func, 1, "func(w io.Writer, s []byte) error"},
+ {"HTMLAutoClose", Var, 0, ""},
+ {"HTMLEntity", Var, 0, ""},
+ {"Header", Const, 0, ""},
+ {"Marshal", Func, 0, "func(v any) ([]byte, error)"},
+ {"MarshalIndent", Func, 0, "func(v any, prefix string, indent string) ([]byte, error)"},
+ {"Marshaler", Type, 2, ""},
+ {"MarshalerAttr", Type, 2, ""},
+ {"Name", Type, 0, ""},
+ {"Name.Local", Field, 0, ""},
+ {"Name.Space", Field, 0, ""},
+ {"NewDecoder", Func, 0, "func(r io.Reader) *Decoder"},
+ {"NewEncoder", Func, 0, "func(w io.Writer) *Encoder"},
+ {"NewTokenDecoder", Func, 10, "func(t TokenReader) *Decoder"},
+ {"ProcInst", Type, 0, ""},
+ {"ProcInst.Inst", Field, 0, ""},
+ {"ProcInst.Target", Field, 0, ""},
+ {"StartElement", Type, 0, ""},
+ {"StartElement.Attr", Field, 0, ""},
+ {"StartElement.Name", Field, 0, ""},
+ {"SyntaxError", Type, 0, ""},
+ {"SyntaxError.Line", Field, 0, ""},
+ {"SyntaxError.Msg", Field, 0, ""},
+ {"TagPathError", Type, 0, ""},
+ {"TagPathError.Field1", Field, 0, ""},
+ {"TagPathError.Field2", Field, 0, ""},
+ {"TagPathError.Struct", Field, 0, ""},
+ {"TagPathError.Tag1", Field, 0, ""},
+ {"TagPathError.Tag2", Field, 0, ""},
+ {"Token", Type, 0, ""},
+ {"TokenReader", Type, 10, ""},
+ {"Unmarshal", Func, 0, "func(data []byte, v any) error"},
+ {"UnmarshalError", Type, 0, ""},
+ {"Unmarshaler", Type, 2, ""},
+ {"UnmarshalerAttr", Type, 2, ""},
+ {"UnsupportedTypeError", Type, 0, ""},
+ {"UnsupportedTypeError.Type", Field, 0, ""},
+ },
+ "errors": {
+ {"As", Func, 13, "func(err error, target any) bool"},
+ {"ErrUnsupported", Var, 21, ""},
+ {"Is", Func, 13, "func(err error, target error) bool"},
+ {"Join", Func, 20, "func(errs ...error) error"},
+ {"New", Func, 0, "func(text string) error"},
+ {"Unwrap", Func, 13, "func(err error) error"},
+ },
+ "expvar": {
+ {"(*Float).Add", Method, 0, ""},
+ {"(*Float).Set", Method, 0, ""},
+ {"(*Float).String", Method, 0, ""},
+ {"(*Float).Value", Method, 8, ""},
+ {"(*Int).Add", Method, 0, ""},
+ {"(*Int).Set", Method, 0, ""},
+ {"(*Int).String", Method, 0, ""},
+ {"(*Int).Value", Method, 8, ""},
+ {"(*Map).Add", Method, 0, ""},
+ {"(*Map).AddFloat", Method, 0, ""},
+ {"(*Map).Delete", Method, 12, ""},
+ {"(*Map).Do", Method, 0, ""},
+ {"(*Map).Get", Method, 0, ""},
+ {"(*Map).Init", Method, 0, ""},
+ {"(*Map).Set", Method, 0, ""},
+ {"(*Map).String", Method, 0, ""},
+ {"(*String).Set", Method, 0, ""},
+ {"(*String).String", Method, 0, ""},
+ {"(*String).Value", Method, 8, ""},
+ {"(Func).String", Method, 0, ""},
+ {"(Func).Value", Method, 8, ""},
+ {"Do", Func, 0, "func(f func(KeyValue))"},
+ {"Float", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"Get", Func, 0, "func(name string) Var"},
+ {"Handler", Func, 8, "func() http.Handler"},
+ {"Int", Type, 0, ""},
+ {"KeyValue", Type, 0, ""},
+ {"KeyValue.Key", Field, 0, ""},
+ {"KeyValue.Value", Field, 0, ""},
+ {"Map", Type, 0, ""},
+ {"NewFloat", Func, 0, "func(name string) *Float"},
+ {"NewInt", Func, 0, "func(name string) *Int"},
+ {"NewMap", Func, 0, "func(name string) *Map"},
+ {"NewString", Func, 0, "func(name string) *String"},
+ {"Publish", Func, 0, "func(name string, v Var)"},
+ {"String", Type, 0, ""},
+ {"Var", Type, 0, ""},
+ },
+ "flag": {
+ {"(*FlagSet).Arg", Method, 0, ""},
+ {"(*FlagSet).Args", Method, 0, ""},
+ {"(*FlagSet).Bool", Method, 0, ""},
+ {"(*FlagSet).BoolFunc", Method, 21, ""},
+ {"(*FlagSet).BoolVar", Method, 0, ""},
+ {"(*FlagSet).Duration", Method, 0, ""},
+ {"(*FlagSet).DurationVar", Method, 0, ""},
+ {"(*FlagSet).ErrorHandling", Method, 10, ""},
+ {"(*FlagSet).Float64", Method, 0, ""},
+ {"(*FlagSet).Float64Var", Method, 0, ""},
+ {"(*FlagSet).Func", Method, 16, ""},
+ {"(*FlagSet).Init", Method, 0, ""},
+ {"(*FlagSet).Int", Method, 0, ""},
+ {"(*FlagSet).Int64", Method, 0, ""},
+ {"(*FlagSet).Int64Var", Method, 0, ""},
+ {"(*FlagSet).IntVar", Method, 0, ""},
+ {"(*FlagSet).Lookup", Method, 0, ""},
+ {"(*FlagSet).NArg", Method, 0, ""},
+ {"(*FlagSet).NFlag", Method, 0, ""},
+ {"(*FlagSet).Name", Method, 10, ""},
+ {"(*FlagSet).Output", Method, 10, ""},
+ {"(*FlagSet).Parse", Method, 0, ""},
+ {"(*FlagSet).Parsed", Method, 0, ""},
+ {"(*FlagSet).PrintDefaults", Method, 0, ""},
+ {"(*FlagSet).Set", Method, 0, ""},
+ {"(*FlagSet).SetOutput", Method, 0, ""},
+ {"(*FlagSet).String", Method, 0, ""},
+ {"(*FlagSet).StringVar", Method, 0, ""},
+ {"(*FlagSet).TextVar", Method, 19, ""},
+ {"(*FlagSet).Uint", Method, 0, ""},
+ {"(*FlagSet).Uint64", Method, 0, ""},
+ {"(*FlagSet).Uint64Var", Method, 0, ""},
+ {"(*FlagSet).UintVar", Method, 0, ""},
+ {"(*FlagSet).Var", Method, 0, ""},
+ {"(*FlagSet).Visit", Method, 0, ""},
+ {"(*FlagSet).VisitAll", Method, 0, ""},
+ {"Arg", Func, 0, "func(i int) string"},
+ {"Args", Func, 0, "func() []string"},
+ {"Bool", Func, 0, "func(name string, value bool, usage string) *bool"},
+ {"BoolFunc", Func, 21, "func(name string, usage string, fn func(string) error)"},
+ {"BoolVar", Func, 0, "func(p *bool, name string, value bool, usage string)"},
+ {"CommandLine", Var, 2, ""},
+ {"ContinueOnError", Const, 0, ""},
+ {"Duration", Func, 0, "func(name string, value time.Duration, usage string) *time.Duration"},
+ {"DurationVar", Func, 0, "func(p *time.Duration, name string, value time.Duration, usage string)"},
+ {"ErrHelp", Var, 0, ""},
+ {"ErrorHandling", Type, 0, ""},
+ {"ExitOnError", Const, 0, ""},
+ {"Flag", Type, 0, ""},
+ {"Flag.DefValue", Field, 0, ""},
+ {"Flag.Name", Field, 0, ""},
+ {"Flag.Usage", Field, 0, ""},
+ {"Flag.Value", Field, 0, ""},
+ {"FlagSet", Type, 0, ""},
+ {"FlagSet.Usage", Field, 0, ""},
+ {"Float64", Func, 0, "func(name string, value float64, usage string) *float64"},
+ {"Float64Var", Func, 0, "func(p *float64, name string, value float64, usage string)"},
+ {"Func", Func, 16, "func(name string, usage string, fn func(string) error)"},
+ {"Getter", Type, 2, ""},
+ {"Int", Func, 0, "func(name string, value int, usage string) *int"},
+ {"Int64", Func, 0, "func(name string, value int64, usage string) *int64"},
+ {"Int64Var", Func, 0, "func(p *int64, name string, value int64, usage string)"},
+ {"IntVar", Func, 0, "func(p *int, name string, value int, usage string)"},
+ {"Lookup", Func, 0, "func(name string) *Flag"},
+ {"NArg", Func, 0, "func() int"},
+ {"NFlag", Func, 0, "func() int"},
+ {"NewFlagSet", Func, 0, "func(name string, errorHandling ErrorHandling) *FlagSet"},
+ {"PanicOnError", Const, 0, ""},
+ {"Parse", Func, 0, "func()"},
+ {"Parsed", Func, 0, "func() bool"},
+ {"PrintDefaults", Func, 0, "func()"},
+ {"Set", Func, 0, "func(name string, value string) error"},
+ {"String", Func, 0, "func(name string, value string, usage string) *string"},
+ {"StringVar", Func, 0, "func(p *string, name string, value string, usage string)"},
+ {"TextVar", Func, 19, "func(p encoding.TextUnmarshaler, name string, value encoding.TextMarshaler, usage string)"},
+ {"Uint", Func, 0, "func(name string, value uint, usage string) *uint"},
+ {"Uint64", Func, 0, "func(name string, value uint64, usage string) *uint64"},
+ {"Uint64Var", Func, 0, "func(p *uint64, name string, value uint64, usage string)"},
+ {"UintVar", Func, 0, "func(p *uint, name string, value uint, usage string)"},
+ {"UnquoteUsage", Func, 5, "func(flag *Flag) (name string, usage string)"},
+ {"Usage", Var, 0, ""},
+ {"Value", Type, 0, ""},
+ {"Var", Func, 0, "func(value Value, name string, usage string)"},
+ {"Visit", Func, 0, "func(fn func(*Flag))"},
+ {"VisitAll", Func, 0, "func(fn func(*Flag))"},
+ },
+ "fmt": {
+ {"Append", Func, 19, "func(b []byte, a ...any) []byte"},
+ {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"},
+ {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"},
+ {"Errorf", Func, 0, "func(format string, a ...any) error"},
+ {"FormatString", Func, 20, "func(state State, verb rune) string"},
+ {"Formatter", Type, 0, ""},
+ {"Fprint", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
+ {"Fprintf", Func, 0, "func(w io.Writer, format string, a ...any) (n int, err error)"},
+ {"Fprintln", Func, 0, "func(w io.Writer, a ...any) (n int, err error)"},
+ {"Fscan", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
+ {"Fscanf", Func, 0, "func(r io.Reader, format string, a ...any) (n int, err error)"},
+ {"Fscanln", Func, 0, "func(r io.Reader, a ...any) (n int, err error)"},
+ {"GoStringer", Type, 0, ""},
+ {"Print", Func, 0, "func(a ...any) (n int, err error)"},
+ {"Printf", Func, 0, "func(format string, a ...any) (n int, err error)"},
+ {"Println", Func, 0, "func(a ...any) (n int, err error)"},
+ {"Scan", Func, 0, "func(a ...any) (n int, err error)"},
+ {"ScanState", Type, 0, ""},
+ {"Scanf", Func, 0, "func(format string, a ...any) (n int, err error)"},
+ {"Scanln", Func, 0, "func(a ...any) (n int, err error)"},
+ {"Scanner", Type, 0, ""},
+ {"Sprint", Func, 0, "func(a ...any) string"},
+ {"Sprintf", Func, 0, "func(format string, a ...any) string"},
+ {"Sprintln", Func, 0, "func(a ...any) string"},
+ {"Sscan", Func, 0, "func(str string, a ...any) (n int, err error)"},
+ {"Sscanf", Func, 0, "func(str string, format string, a ...any) (n int, err error)"},
+ {"Sscanln", Func, 0, "func(str string, a ...any) (n int, err error)"},
+ {"State", Type, 0, ""},
+ {"Stringer", Type, 0, ""},
+ },
+ "go/ast": {
+ {"(*ArrayType).End", Method, 0, ""},
+ {"(*ArrayType).Pos", Method, 0, ""},
+ {"(*AssignStmt).End", Method, 0, ""},
+ {"(*AssignStmt).Pos", Method, 0, ""},
+ {"(*BadDecl).End", Method, 0, ""},
+ {"(*BadDecl).Pos", Method, 0, ""},
+ {"(*BadExpr).End", Method, 0, ""},
+ {"(*BadExpr).Pos", Method, 0, ""},
+ {"(*BadStmt).End", Method, 0, ""},
+ {"(*BadStmt).Pos", Method, 0, ""},
+ {"(*BasicLit).End", Method, 0, ""},
+ {"(*BasicLit).Pos", Method, 0, ""},
+ {"(*BinaryExpr).End", Method, 0, ""},
+ {"(*BinaryExpr).Pos", Method, 0, ""},
+ {"(*BlockStmt).End", Method, 0, ""},
+ {"(*BlockStmt).Pos", Method, 0, ""},
+ {"(*BranchStmt).End", Method, 0, ""},
+ {"(*BranchStmt).Pos", Method, 0, ""},
+ {"(*CallExpr).End", Method, 0, ""},
+ {"(*CallExpr).Pos", Method, 0, ""},
+ {"(*CaseClause).End", Method, 0, ""},
+ {"(*CaseClause).Pos", Method, 0, ""},
+ {"(*ChanType).End", Method, 0, ""},
+ {"(*ChanType).Pos", Method, 0, ""},
+ {"(*CommClause).End", Method, 0, ""},
+ {"(*CommClause).Pos", Method, 0, ""},
+ {"(*Comment).End", Method, 0, ""},
+ {"(*Comment).Pos", Method, 0, ""},
+ {"(*CommentGroup).End", Method, 0, ""},
+ {"(*CommentGroup).Pos", Method, 0, ""},
+ {"(*CommentGroup).Text", Method, 0, ""},
+ {"(*CompositeLit).End", Method, 0, ""},
+ {"(*CompositeLit).Pos", Method, 0, ""},
+ {"(*DeclStmt).End", Method, 0, ""},
+ {"(*DeclStmt).Pos", Method, 0, ""},
+ {"(*DeferStmt).End", Method, 0, ""},
+ {"(*DeferStmt).Pos", Method, 0, ""},
+ {"(*Ellipsis).End", Method, 0, ""},
+ {"(*Ellipsis).Pos", Method, 0, ""},
+ {"(*EmptyStmt).End", Method, 0, ""},
+ {"(*EmptyStmt).Pos", Method, 0, ""},
+ {"(*ExprStmt).End", Method, 0, ""},
+ {"(*ExprStmt).Pos", Method, 0, ""},
+ {"(*Field).End", Method, 0, ""},
+ {"(*Field).Pos", Method, 0, ""},
+ {"(*FieldList).End", Method, 0, ""},
+ {"(*FieldList).NumFields", Method, 0, ""},
+ {"(*FieldList).Pos", Method, 0, ""},
+ {"(*File).End", Method, 0, ""},
+ {"(*File).Pos", Method, 0, ""},
+ {"(*ForStmt).End", Method, 0, ""},
+ {"(*ForStmt).Pos", Method, 0, ""},
+ {"(*FuncDecl).End", Method, 0, ""},
+ {"(*FuncDecl).Pos", Method, 0, ""},
+ {"(*FuncLit).End", Method, 0, ""},
+ {"(*FuncLit).Pos", Method, 0, ""},
+ {"(*FuncType).End", Method, 0, ""},
+ {"(*FuncType).Pos", Method, 0, ""},
+ {"(*GenDecl).End", Method, 0, ""},
+ {"(*GenDecl).Pos", Method, 0, ""},
+ {"(*GoStmt).End", Method, 0, ""},
+ {"(*GoStmt).Pos", Method, 0, ""},
+ {"(*Ident).End", Method, 0, ""},
+ {"(*Ident).IsExported", Method, 0, ""},
+ {"(*Ident).Pos", Method, 0, ""},
+ {"(*Ident).String", Method, 0, ""},
+ {"(*IfStmt).End", Method, 0, ""},
+ {"(*IfStmt).Pos", Method, 0, ""},
+ {"(*ImportSpec).End", Method, 0, ""},
+ {"(*ImportSpec).Pos", Method, 0, ""},
+ {"(*IncDecStmt).End", Method, 0, ""},
+ {"(*IncDecStmt).Pos", Method, 0, ""},
+ {"(*IndexExpr).End", Method, 0, ""},
+ {"(*IndexExpr).Pos", Method, 0, ""},
+ {"(*IndexListExpr).End", Method, 18, ""},
+ {"(*IndexListExpr).Pos", Method, 18, ""},
+ {"(*InterfaceType).End", Method, 0, ""},
+ {"(*InterfaceType).Pos", Method, 0, ""},
+ {"(*KeyValueExpr).End", Method, 0, ""},
+ {"(*KeyValueExpr).Pos", Method, 0, ""},
+ {"(*LabeledStmt).End", Method, 0, ""},
+ {"(*LabeledStmt).Pos", Method, 0, ""},
+ {"(*MapType).End", Method, 0, ""},
+ {"(*MapType).Pos", Method, 0, ""},
+ {"(*Object).Pos", Method, 0, ""},
+ {"(*Package).End", Method, 0, ""},
+ {"(*Package).Pos", Method, 0, ""},
+ {"(*ParenExpr).End", Method, 0, ""},
+ {"(*ParenExpr).Pos", Method, 0, ""},
+ {"(*RangeStmt).End", Method, 0, ""},
+ {"(*RangeStmt).Pos", Method, 0, ""},
+ {"(*ReturnStmt).End", Method, 0, ""},
+ {"(*ReturnStmt).Pos", Method, 0, ""},
+ {"(*Scope).Insert", Method, 0, ""},
+ {"(*Scope).Lookup", Method, 0, ""},
+ {"(*Scope).String", Method, 0, ""},
+ {"(*SelectStmt).End", Method, 0, ""},
+ {"(*SelectStmt).Pos", Method, 0, ""},
+ {"(*SelectorExpr).End", Method, 0, ""},
+ {"(*SelectorExpr).Pos", Method, 0, ""},
+ {"(*SendStmt).End", Method, 0, ""},
+ {"(*SendStmt).Pos", Method, 0, ""},
+ {"(*SliceExpr).End", Method, 0, ""},
+ {"(*SliceExpr).Pos", Method, 0, ""},
+ {"(*StarExpr).End", Method, 0, ""},
+ {"(*StarExpr).Pos", Method, 0, ""},
+ {"(*StructType).End", Method, 0, ""},
+ {"(*StructType).Pos", Method, 0, ""},
+ {"(*SwitchStmt).End", Method, 0, ""},
+ {"(*SwitchStmt).Pos", Method, 0, ""},
+ {"(*TypeAssertExpr).End", Method, 0, ""},
+ {"(*TypeAssertExpr).Pos", Method, 0, ""},
+ {"(*TypeSpec).End", Method, 0, ""},
+ {"(*TypeSpec).Pos", Method, 0, ""},
+ {"(*TypeSwitchStmt).End", Method, 0, ""},
+ {"(*TypeSwitchStmt).Pos", Method, 0, ""},
+ {"(*UnaryExpr).End", Method, 0, ""},
+ {"(*UnaryExpr).Pos", Method, 0, ""},
+ {"(*ValueSpec).End", Method, 0, ""},
+ {"(*ValueSpec).Pos", Method, 0, ""},
+ {"(CommentMap).Comments", Method, 1, ""},
+ {"(CommentMap).Filter", Method, 1, ""},
+ {"(CommentMap).String", Method, 1, ""},
+ {"(CommentMap).Update", Method, 1, ""},
+ {"(ObjKind).String", Method, 0, ""},
+ {"ArrayType", Type, 0, ""},
+ {"ArrayType.Elt", Field, 0, ""},
+ {"ArrayType.Lbrack", Field, 0, ""},
+ {"ArrayType.Len", Field, 0, ""},
+ {"AssignStmt", Type, 0, ""},
+ {"AssignStmt.Lhs", Field, 0, ""},
+ {"AssignStmt.Rhs", Field, 0, ""},
+ {"AssignStmt.Tok", Field, 0, ""},
+ {"AssignStmt.TokPos", Field, 0, ""},
+ {"Bad", Const, 0, ""},
+ {"BadDecl", Type, 0, ""},
+ {"BadDecl.From", Field, 0, ""},
+ {"BadDecl.To", Field, 0, ""},
+ {"BadExpr", Type, 0, ""},
+ {"BadExpr.From", Field, 0, ""},
+ {"BadExpr.To", Field, 0, ""},
+ {"BadStmt", Type, 0, ""},
+ {"BadStmt.From", Field, 0, ""},
+ {"BadStmt.To", Field, 0, ""},
+ {"BasicLit", Type, 0, ""},
+ {"BasicLit.Kind", Field, 0, ""},
+ {"BasicLit.Value", Field, 0, ""},
+ {"BasicLit.ValuePos", Field, 0, ""},
+ {"BinaryExpr", Type, 0, ""},
+ {"BinaryExpr.Op", Field, 0, ""},
+ {"BinaryExpr.OpPos", Field, 0, ""},
+ {"BinaryExpr.X", Field, 0, ""},
+ {"BinaryExpr.Y", Field, 0, ""},
+ {"BlockStmt", Type, 0, ""},
+ {"BlockStmt.Lbrace", Field, 0, ""},
+ {"BlockStmt.List", Field, 0, ""},
+ {"BlockStmt.Rbrace", Field, 0, ""},
+ {"BranchStmt", Type, 0, ""},
+ {"BranchStmt.Label", Field, 0, ""},
+ {"BranchStmt.Tok", Field, 0, ""},
+ {"BranchStmt.TokPos", Field, 0, ""},
+ {"CallExpr", Type, 0, ""},
+ {"CallExpr.Args", Field, 0, ""},
+ {"CallExpr.Ellipsis", Field, 0, ""},
+ {"CallExpr.Fun", Field, 0, ""},
+ {"CallExpr.Lparen", Field, 0, ""},
+ {"CallExpr.Rparen", Field, 0, ""},
+ {"CaseClause", Type, 0, ""},
+ {"CaseClause.Body", Field, 0, ""},
+ {"CaseClause.Case", Field, 0, ""},
+ {"CaseClause.Colon", Field, 0, ""},
+ {"CaseClause.List", Field, 0, ""},
+ {"ChanDir", Type, 0, ""},
+ {"ChanType", Type, 0, ""},
+ {"ChanType.Arrow", Field, 1, ""},
+ {"ChanType.Begin", Field, 0, ""},
+ {"ChanType.Dir", Field, 0, ""},
+ {"ChanType.Value", Field, 0, ""},
+ {"CommClause", Type, 0, ""},
+ {"CommClause.Body", Field, 0, ""},
+ {"CommClause.Case", Field, 0, ""},
+ {"CommClause.Colon", Field, 0, ""},
+ {"CommClause.Comm", Field, 0, ""},
+ {"Comment", Type, 0, ""},
+ {"Comment.Slash", Field, 0, ""},
+ {"Comment.Text", Field, 0, ""},
+ {"CommentGroup", Type, 0, ""},
+ {"CommentGroup.List", Field, 0, ""},
+ {"CommentMap", Type, 1, ""},
+ {"CompositeLit", Type, 0, ""},
+ {"CompositeLit.Elts", Field, 0, ""},
+ {"CompositeLit.Incomplete", Field, 11, ""},
+ {"CompositeLit.Lbrace", Field, 0, ""},
+ {"CompositeLit.Rbrace", Field, 0, ""},
+ {"CompositeLit.Type", Field, 0, ""},
+ {"Con", Const, 0, ""},
+ {"Decl", Type, 0, ""},
+ {"DeclStmt", Type, 0, ""},
+ {"DeclStmt.Decl", Field, 0, ""},
+ {"DeferStmt", Type, 0, ""},
+ {"DeferStmt.Call", Field, 0, ""},
+ {"DeferStmt.Defer", Field, 0, ""},
+ {"Ellipsis", Type, 0, ""},
+ {"Ellipsis.Ellipsis", Field, 0, ""},
+ {"Ellipsis.Elt", Field, 0, ""},
+ {"EmptyStmt", Type, 0, ""},
+ {"EmptyStmt.Implicit", Field, 5, ""},
+ {"EmptyStmt.Semicolon", Field, 0, ""},
+ {"Expr", Type, 0, ""},
+ {"ExprStmt", Type, 0, ""},
+ {"ExprStmt.X", Field, 0, ""},
+ {"Field", Type, 0, ""},
+ {"Field.Comment", Field, 0, ""},
+ {"Field.Doc", Field, 0, ""},
+ {"Field.Names", Field, 0, ""},
+ {"Field.Tag", Field, 0, ""},
+ {"Field.Type", Field, 0, ""},
+ {"FieldFilter", Type, 0, ""},
+ {"FieldList", Type, 0, ""},
+ {"FieldList.Closing", Field, 0, ""},
+ {"FieldList.List", Field, 0, ""},
+ {"FieldList.Opening", Field, 0, ""},
+ {"File", Type, 0, ""},
+ {"File.Comments", Field, 0, ""},
+ {"File.Decls", Field, 0, ""},
+ {"File.Doc", Field, 0, ""},
+ {"File.FileEnd", Field, 20, ""},
+ {"File.FileStart", Field, 20, ""},
+ {"File.GoVersion", Field, 21, ""},
+ {"File.Imports", Field, 0, ""},
+ {"File.Name", Field, 0, ""},
+ {"File.Package", Field, 0, ""},
+ {"File.Scope", Field, 0, ""},
+ {"File.Unresolved", Field, 0, ""},
+ {"FileExports", Func, 0, "func(src *File) bool"},
+ {"Filter", Type, 0, ""},
+ {"FilterDecl", Func, 0, "func(decl Decl, f Filter) bool"},
+ {"FilterFile", Func, 0, "func(src *File, f Filter) bool"},
+ {"FilterFuncDuplicates", Const, 0, ""},
+ {"FilterImportDuplicates", Const, 0, ""},
+ {"FilterPackage", Func, 0, "func(pkg *Package, f Filter) bool"},
+ {"FilterUnassociatedComments", Const, 0, ""},
+ {"ForStmt", Type, 0, ""},
+ {"ForStmt.Body", Field, 0, ""},
+ {"ForStmt.Cond", Field, 0, ""},
+ {"ForStmt.For", Field, 0, ""},
+ {"ForStmt.Init", Field, 0, ""},
+ {"ForStmt.Post", Field, 0, ""},
+ {"Fprint", Func, 0, "func(w io.Writer, fset *token.FileSet, x any, f FieldFilter) error"},
+ {"Fun", Const, 0, ""},
+ {"FuncDecl", Type, 0, ""},
+ {"FuncDecl.Body", Field, 0, ""},
+ {"FuncDecl.Doc", Field, 0, ""},
+ {"FuncDecl.Name", Field, 0, ""},
+ {"FuncDecl.Recv", Field, 0, ""},
+ {"FuncDecl.Type", Field, 0, ""},
+ {"FuncLit", Type, 0, ""},
+ {"FuncLit.Body", Field, 0, ""},
+ {"FuncLit.Type", Field, 0, ""},
+ {"FuncType", Type, 0, ""},
+ {"FuncType.Func", Field, 0, ""},
+ {"FuncType.Params", Field, 0, ""},
+ {"FuncType.Results", Field, 0, ""},
+ {"FuncType.TypeParams", Field, 18, ""},
+ {"GenDecl", Type, 0, ""},
+ {"GenDecl.Doc", Field, 0, ""},
+ {"GenDecl.Lparen", Field, 0, ""},
+ {"GenDecl.Rparen", Field, 0, ""},
+ {"GenDecl.Specs", Field, 0, ""},
+ {"GenDecl.Tok", Field, 0, ""},
+ {"GenDecl.TokPos", Field, 0, ""},
+ {"GoStmt", Type, 0, ""},
+ {"GoStmt.Call", Field, 0, ""},
+ {"GoStmt.Go", Field, 0, ""},
+ {"Ident", Type, 0, ""},
+ {"Ident.Name", Field, 0, ""},
+ {"Ident.NamePos", Field, 0, ""},
+ {"Ident.Obj", Field, 0, ""},
+ {"IfStmt", Type, 0, ""},
+ {"IfStmt.Body", Field, 0, ""},
+ {"IfStmt.Cond", Field, 0, ""},
+ {"IfStmt.Else", Field, 0, ""},
+ {"IfStmt.If", Field, 0, ""},
+ {"IfStmt.Init", Field, 0, ""},
+ {"ImportSpec", Type, 0, ""},
+ {"ImportSpec.Comment", Field, 0, ""},
+ {"ImportSpec.Doc", Field, 0, ""},
+ {"ImportSpec.EndPos", Field, 0, ""},
+ {"ImportSpec.Name", Field, 0, ""},
+ {"ImportSpec.Path", Field, 0, ""},
+ {"Importer", Type, 0, ""},
+ {"IncDecStmt", Type, 0, ""},
+ {"IncDecStmt.Tok", Field, 0, ""},
+ {"IncDecStmt.TokPos", Field, 0, ""},
+ {"IncDecStmt.X", Field, 0, ""},
+ {"IndexExpr", Type, 0, ""},
+ {"IndexExpr.Index", Field, 0, ""},
+ {"IndexExpr.Lbrack", Field, 0, ""},
+ {"IndexExpr.Rbrack", Field, 0, ""},
+ {"IndexExpr.X", Field, 0, ""},
+ {"IndexListExpr", Type, 18, ""},
+ {"IndexListExpr.Indices", Field, 18, ""},
+ {"IndexListExpr.Lbrack", Field, 18, ""},
+ {"IndexListExpr.Rbrack", Field, 18, ""},
+ {"IndexListExpr.X", Field, 18, ""},
+ {"Inspect", Func, 0, "func(node Node, f func(Node) bool)"},
+ {"InterfaceType", Type, 0, ""},
+ {"InterfaceType.Incomplete", Field, 0, ""},
+ {"InterfaceType.Interface", Field, 0, ""},
+ {"InterfaceType.Methods", Field, 0, ""},
+ {"IsExported", Func, 0, "func(name string) bool"},
+ {"IsGenerated", Func, 21, "func(file *File) bool"},
+ {"KeyValueExpr", Type, 0, ""},
+ {"KeyValueExpr.Colon", Field, 0, ""},
+ {"KeyValueExpr.Key", Field, 0, ""},
+ {"KeyValueExpr.Value", Field, 0, ""},
+ {"LabeledStmt", Type, 0, ""},
+ {"LabeledStmt.Colon", Field, 0, ""},
+ {"LabeledStmt.Label", Field, 0, ""},
+ {"LabeledStmt.Stmt", Field, 0, ""},
+ {"Lbl", Const, 0, ""},
+ {"MapType", Type, 0, ""},
+ {"MapType.Key", Field, 0, ""},
+ {"MapType.Map", Field, 0, ""},
+ {"MapType.Value", Field, 0, ""},
+ {"MergeMode", Type, 0, ""},
+ {"MergePackageFiles", Func, 0, "func(pkg *Package, mode MergeMode) *File"},
+ {"NewCommentMap", Func, 1, "func(fset *token.FileSet, node Node, comments []*CommentGroup) CommentMap"},
+ {"NewIdent", Func, 0, "func(name string) *Ident"},
+ {"NewObj", Func, 0, "func(kind ObjKind, name string) *Object"},
+ {"NewPackage", Func, 0, "func(fset *token.FileSet, files map[string]*File, importer Importer, universe *Scope) (*Package, error)"},
+ {"NewScope", Func, 0, "func(outer *Scope) *Scope"},
+ {"Node", Type, 0, ""},
+ {"NotNilFilter", Func, 0, "func(_ string, v reflect.Value) bool"},
+ {"ObjKind", Type, 0, ""},
+ {"Object", Type, 0, ""},
+ {"Object.Data", Field, 0, ""},
+ {"Object.Decl", Field, 0, ""},
+ {"Object.Kind", Field, 0, ""},
+ {"Object.Name", Field, 0, ""},
+ {"Object.Type", Field, 0, ""},
+ {"Package", Type, 0, ""},
+ {"Package.Files", Field, 0, ""},
+ {"Package.Imports", Field, 0, ""},
+ {"Package.Name", Field, 0, ""},
+ {"Package.Scope", Field, 0, ""},
+ {"PackageExports", Func, 0, "func(pkg *Package) bool"},
+ {"ParenExpr", Type, 0, ""},
+ {"ParenExpr.Lparen", Field, 0, ""},
+ {"ParenExpr.Rparen", Field, 0, ""},
+ {"ParenExpr.X", Field, 0, ""},
+ {"Pkg", Const, 0, ""},
+ {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"},
+ {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"},
+ {"Print", Func, 0, "func(fset *token.FileSet, x any) error"},
+ {"RECV", Const, 0, ""},
+ {"RangeStmt", Type, 0, ""},
+ {"RangeStmt.Body", Field, 0, ""},
+ {"RangeStmt.For", Field, 0, ""},
+ {"RangeStmt.Key", Field, 0, ""},
+ {"RangeStmt.Range", Field, 20, ""},
+ {"RangeStmt.Tok", Field, 0, ""},
+ {"RangeStmt.TokPos", Field, 0, ""},
+ {"RangeStmt.Value", Field, 0, ""},
+ {"RangeStmt.X", Field, 0, ""},
+ {"ReturnStmt", Type, 0, ""},
+ {"ReturnStmt.Results", Field, 0, ""},
+ {"ReturnStmt.Return", Field, 0, ""},
+ {"SEND", Const, 0, ""},
+ {"Scope", Type, 0, ""},
+ {"Scope.Objects", Field, 0, ""},
+ {"Scope.Outer", Field, 0, ""},
+ {"SelectStmt", Type, 0, ""},
+ {"SelectStmt.Body", Field, 0, ""},
+ {"SelectStmt.Select", Field, 0, ""},
+ {"SelectorExpr", Type, 0, ""},
+ {"SelectorExpr.Sel", Field, 0, ""},
+ {"SelectorExpr.X", Field, 0, ""},
+ {"SendStmt", Type, 0, ""},
+ {"SendStmt.Arrow", Field, 0, ""},
+ {"SendStmt.Chan", Field, 0, ""},
+ {"SendStmt.Value", Field, 0, ""},
+ {"SliceExpr", Type, 0, ""},
+ {"SliceExpr.High", Field, 0, ""},
+ {"SliceExpr.Lbrack", Field, 0, ""},
+ {"SliceExpr.Low", Field, 0, ""},
+ {"SliceExpr.Max", Field, 2, ""},
+ {"SliceExpr.Rbrack", Field, 0, ""},
+ {"SliceExpr.Slice3", Field, 2, ""},
+ {"SliceExpr.X", Field, 0, ""},
+ {"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"},
+ {"Spec", Type, 0, ""},
+ {"StarExpr", Type, 0, ""},
+ {"StarExpr.Star", Field, 0, ""},
+ {"StarExpr.X", Field, 0, ""},
+ {"Stmt", Type, 0, ""},
+ {"StructType", Type, 0, ""},
+ {"StructType.Fields", Field, 0, ""},
+ {"StructType.Incomplete", Field, 0, ""},
+ {"StructType.Struct", Field, 0, ""},
+ {"SwitchStmt", Type, 0, ""},
+ {"SwitchStmt.Body", Field, 0, ""},
+ {"SwitchStmt.Init", Field, 0, ""},
+ {"SwitchStmt.Switch", Field, 0, ""},
+ {"SwitchStmt.Tag", Field, 0, ""},
+ {"Typ", Const, 0, ""},
+ {"TypeAssertExpr", Type, 0, ""},
+ {"TypeAssertExpr.Lparen", Field, 2, ""},
+ {"TypeAssertExpr.Rparen", Field, 2, ""},
+ {"TypeAssertExpr.Type", Field, 0, ""},
+ {"TypeAssertExpr.X", Field, 0, ""},
+ {"TypeSpec", Type, 0, ""},
+ {"TypeSpec.Assign", Field, 9, ""},
+ {"TypeSpec.Comment", Field, 0, ""},
+ {"TypeSpec.Doc", Field, 0, ""},
+ {"TypeSpec.Name", Field, 0, ""},
+ {"TypeSpec.Type", Field, 0, ""},
+ {"TypeSpec.TypeParams", Field, 18, ""},
+ {"TypeSwitchStmt", Type, 0, ""},
+ {"TypeSwitchStmt.Assign", Field, 0, ""},
+ {"TypeSwitchStmt.Body", Field, 0, ""},
+ {"TypeSwitchStmt.Init", Field, 0, ""},
+ {"TypeSwitchStmt.Switch", Field, 0, ""},
+ {"UnaryExpr", Type, 0, ""},
+ {"UnaryExpr.Op", Field, 0, ""},
+ {"UnaryExpr.OpPos", Field, 0, ""},
+ {"UnaryExpr.X", Field, 0, ""},
+ {"Unparen", Func, 22, "func(e Expr) Expr"},
+ {"ValueSpec", Type, 0, ""},
+ {"ValueSpec.Comment", Field, 0, ""},
+ {"ValueSpec.Doc", Field, 0, ""},
+ {"ValueSpec.Names", Field, 0, ""},
+ {"ValueSpec.Type", Field, 0, ""},
+ {"ValueSpec.Values", Field, 0, ""},
+ {"Var", Const, 0, ""},
+ {"Visitor", Type, 0, ""},
+ {"Walk", Func, 0, "func(v Visitor, node Node)"},
+ },
+ "go/build": {
+ {"(*Context).Import", Method, 0, ""},
+ {"(*Context).ImportDir", Method, 0, ""},
+ {"(*Context).MatchFile", Method, 2, ""},
+ {"(*Context).SrcDirs", Method, 0, ""},
+ {"(*MultiplePackageError).Error", Method, 4, ""},
+ {"(*NoGoError).Error", Method, 0, ""},
+ {"(*Package).IsCommand", Method, 0, ""},
+ {"AllowBinary", Const, 0, ""},
+ {"ArchChar", Func, 0, "func(goarch string) (string, error)"},
+ {"Context", Type, 0, ""},
+ {"Context.BuildTags", Field, 0, ""},
+ {"Context.CgoEnabled", Field, 0, ""},
+ {"Context.Compiler", Field, 0, ""},
+ {"Context.Dir", Field, 14, ""},
+ {"Context.GOARCH", Field, 0, ""},
+ {"Context.GOOS", Field, 0, ""},
+ {"Context.GOPATH", Field, 0, ""},
+ {"Context.GOROOT", Field, 0, ""},
+ {"Context.HasSubdir", Field, 0, ""},
+ {"Context.InstallSuffix", Field, 1, ""},
+ {"Context.IsAbsPath", Field, 0, ""},
+ {"Context.IsDir", Field, 0, ""},
+ {"Context.JoinPath", Field, 0, ""},
+ {"Context.OpenFile", Field, 0, ""},
+ {"Context.ReadDir", Field, 0, ""},
+ {"Context.ReleaseTags", Field, 1, ""},
+ {"Context.SplitPathList", Field, 0, ""},
+ {"Context.ToolTags", Field, 17, ""},
+ {"Context.UseAllFiles", Field, 0, ""},
+ {"Default", Var, 0, ""},
+ {"Directive", Type, 21, ""},
+ {"Directive.Pos", Field, 21, ""},
+ {"Directive.Text", Field, 21, ""},
+ {"FindOnly", Const, 0, ""},
+ {"IgnoreVendor", Const, 6, ""},
+ {"Import", Func, 0, "func(path string, srcDir string, mode ImportMode) (*Package, error)"},
+ {"ImportComment", Const, 4, ""},
+ {"ImportDir", Func, 0, "func(dir string, mode ImportMode) (*Package, error)"},
+ {"ImportMode", Type, 0, ""},
+ {"IsLocalImport", Func, 0, "func(path string) bool"},
+ {"MultiplePackageError", Type, 4, ""},
+ {"MultiplePackageError.Dir", Field, 4, ""},
+ {"MultiplePackageError.Files", Field, 4, ""},
+ {"MultiplePackageError.Packages", Field, 4, ""},
+ {"NoGoError", Type, 0, ""},
+ {"NoGoError.Dir", Field, 0, ""},
+ {"Package", Type, 0, ""},
+ {"Package.AllTags", Field, 2, ""},
+ {"Package.BinDir", Field, 0, ""},
+ {"Package.BinaryOnly", Field, 7, ""},
+ {"Package.CFiles", Field, 0, ""},
+ {"Package.CXXFiles", Field, 2, ""},
+ {"Package.CgoCFLAGS", Field, 0, ""},
+ {"Package.CgoCPPFLAGS", Field, 2, ""},
+ {"Package.CgoCXXFLAGS", Field, 2, ""},
+ {"Package.CgoFFLAGS", Field, 7, ""},
+ {"Package.CgoFiles", Field, 0, ""},
+ {"Package.CgoLDFLAGS", Field, 0, ""},
+ {"Package.CgoPkgConfig", Field, 0, ""},
+ {"Package.ConflictDir", Field, 2, ""},
+ {"Package.Dir", Field, 0, ""},
+ {"Package.Directives", Field, 21, ""},
+ {"Package.Doc", Field, 0, ""},
+ {"Package.EmbedPatternPos", Field, 16, ""},
+ {"Package.EmbedPatterns", Field, 16, ""},
+ {"Package.FFiles", Field, 7, ""},
+ {"Package.GoFiles", Field, 0, ""},
+ {"Package.Goroot", Field, 0, ""},
+ {"Package.HFiles", Field, 0, ""},
+ {"Package.IgnoredGoFiles", Field, 1, ""},
+ {"Package.IgnoredOtherFiles", Field, 16, ""},
+ {"Package.ImportComment", Field, 4, ""},
+ {"Package.ImportPath", Field, 0, ""},
+ {"Package.ImportPos", Field, 0, ""},
+ {"Package.Imports", Field, 0, ""},
+ {"Package.InvalidGoFiles", Field, 6, ""},
+ {"Package.MFiles", Field, 3, ""},
+ {"Package.Name", Field, 0, ""},
+ {"Package.PkgObj", Field, 0, ""},
+ {"Package.PkgRoot", Field, 0, ""},
+ {"Package.PkgTargetRoot", Field, 5, ""},
+ {"Package.Root", Field, 0, ""},
+ {"Package.SFiles", Field, 0, ""},
+ {"Package.SrcRoot", Field, 0, ""},
+ {"Package.SwigCXXFiles", Field, 1, ""},
+ {"Package.SwigFiles", Field, 1, ""},
+ {"Package.SysoFiles", Field, 0, ""},
+ {"Package.TestDirectives", Field, 21, ""},
+ {"Package.TestEmbedPatternPos", Field, 16, ""},
+ {"Package.TestEmbedPatterns", Field, 16, ""},
+ {"Package.TestGoFiles", Field, 0, ""},
+ {"Package.TestImportPos", Field, 0, ""},
+ {"Package.TestImports", Field, 0, ""},
+ {"Package.XTestDirectives", Field, 21, ""},
+ {"Package.XTestEmbedPatternPos", Field, 16, ""},
+ {"Package.XTestEmbedPatterns", Field, 16, ""},
+ {"Package.XTestGoFiles", Field, 0, ""},
+ {"Package.XTestImportPos", Field, 0, ""},
+ {"Package.XTestImports", Field, 0, ""},
+ {"ToolDir", Var, 0, ""},
+ },
+ "go/build/constraint": {
+ {"(*AndExpr).Eval", Method, 16, ""},
+ {"(*AndExpr).String", Method, 16, ""},
+ {"(*NotExpr).Eval", Method, 16, ""},
+ {"(*NotExpr).String", Method, 16, ""},
+ {"(*OrExpr).Eval", Method, 16, ""},
+ {"(*OrExpr).String", Method, 16, ""},
+ {"(*SyntaxError).Error", Method, 16, ""},
+ {"(*TagExpr).Eval", Method, 16, ""},
+ {"(*TagExpr).String", Method, 16, ""},
+ {"AndExpr", Type, 16, ""},
+ {"AndExpr.X", Field, 16, ""},
+ {"AndExpr.Y", Field, 16, ""},
+ {"Expr", Type, 16, ""},
+ {"GoVersion", Func, 21, "func(x Expr) string"},
+ {"IsGoBuild", Func, 16, "func(line string) bool"},
+ {"IsPlusBuild", Func, 16, "func(line string) bool"},
+ {"NotExpr", Type, 16, ""},
+ {"NotExpr.X", Field, 16, ""},
+ {"OrExpr", Type, 16, ""},
+ {"OrExpr.X", Field, 16, ""},
+ {"OrExpr.Y", Field, 16, ""},
+ {"Parse", Func, 16, "func(line string) (Expr, error)"},
+ {"PlusBuildLines", Func, 16, "func(x Expr) ([]string, error)"},
+ {"SyntaxError", Type, 16, ""},
+ {"SyntaxError.Err", Field, 16, ""},
+ {"SyntaxError.Offset", Field, 16, ""},
+ {"TagExpr", Type, 16, ""},
+ {"TagExpr.Tag", Field, 16, ""},
+ },
+ "go/constant": {
+ {"(Kind).String", Method, 18, ""},
+ {"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"},
+ {"BitLen", Func, 5, "func(x Value) int"},
+ {"Bool", Const, 5, ""},
+ {"BoolVal", Func, 5, "func(x Value) bool"},
+ {"Bytes", Func, 5, "func(x Value) []byte"},
+ {"Compare", Func, 5, "func(x_ Value, op token.Token, y_ Value) bool"},
+ {"Complex", Const, 5, ""},
+ {"Denom", Func, 5, "func(x Value) Value"},
+ {"Float", Const, 5, ""},
+ {"Float32Val", Func, 5, "func(x Value) (float32, bool)"},
+ {"Float64Val", Func, 5, "func(x Value) (float64, bool)"},
+ {"Imag", Func, 5, "func(x Value) Value"},
+ {"Int", Const, 5, ""},
+ {"Int64Val", Func, 5, "func(x Value) (int64, bool)"},
+ {"Kind", Type, 5, ""},
+ {"Make", Func, 13, "func(x any) Value"},
+ {"MakeBool", Func, 5, "func(b bool) Value"},
+ {"MakeFloat64", Func, 5, "func(x float64) Value"},
+ {"MakeFromBytes", Func, 5, "func(bytes []byte) Value"},
+ {"MakeFromLiteral", Func, 5, "func(lit string, tok token.Token, zero uint) Value"},
+ {"MakeImag", Func, 5, "func(x Value) Value"},
+ {"MakeInt64", Func, 5, "func(x int64) Value"},
+ {"MakeString", Func, 5, "func(s string) Value"},
+ {"MakeUint64", Func, 5, "func(x uint64) Value"},
+ {"MakeUnknown", Func, 5, "func() Value"},
+ {"Num", Func, 5, "func(x Value) Value"},
+ {"Real", Func, 5, "func(x Value) Value"},
+ {"Shift", Func, 5, "func(x Value, op token.Token, s uint) Value"},
+ {"Sign", Func, 5, "func(x Value) int"},
+ {"String", Const, 5, ""},
+ {"StringVal", Func, 5, "func(x Value) string"},
+ {"ToComplex", Func, 6, "func(x Value) Value"},
+ {"ToFloat", Func, 6, "func(x Value) Value"},
+ {"ToInt", Func, 6, "func(x Value) Value"},
+ {"Uint64Val", Func, 5, "func(x Value) (uint64, bool)"},
+ {"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"},
+ {"Unknown", Const, 5, ""},
+ {"Val", Func, 13, "func(x Value) any"},
+ {"Value", Type, 5, ""},
+ },
+ "go/doc": {
+ {"(*Package).Filter", Method, 0, ""},
+ {"(*Package).HTML", Method, 19, ""},
+ {"(*Package).Markdown", Method, 19, ""},
+ {"(*Package).Parser", Method, 19, ""},
+ {"(*Package).Printer", Method, 19, ""},
+ {"(*Package).Synopsis", Method, 19, ""},
+ {"(*Package).Text", Method, 19, ""},
+ {"AllDecls", Const, 0, ""},
+ {"AllMethods", Const, 0, ""},
+ {"Example", Type, 0, ""},
+ {"Example.Code", Field, 0, ""},
+ {"Example.Comments", Field, 0, ""},
+ {"Example.Doc", Field, 0, ""},
+ {"Example.EmptyOutput", Field, 1, ""},
+ {"Example.Name", Field, 0, ""},
+ {"Example.Order", Field, 1, ""},
+ {"Example.Output", Field, 0, ""},
+ {"Example.Play", Field, 1, ""},
+ {"Example.Suffix", Field, 14, ""},
+ {"Example.Unordered", Field, 7, ""},
+ {"Examples", Func, 0, "func(testFiles ...*ast.File) []*Example"},
+ {"Filter", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"Func.Decl", Field, 0, ""},
+ {"Func.Doc", Field, 0, ""},
+ {"Func.Examples", Field, 14, ""},
+ {"Func.Level", Field, 0, ""},
+ {"Func.Name", Field, 0, ""},
+ {"Func.Orig", Field, 0, ""},
+ {"Func.Recv", Field, 0, ""},
+ {"IllegalPrefixes", Var, 1, ""},
+ {"IsPredeclared", Func, 8, "func(s string) bool"},
+ {"Mode", Type, 0, ""},
+ {"New", Func, 0, "func(pkg *ast.Package, importPath string, mode Mode) *Package"},
+ {"NewFromFiles", Func, 14, "func(fset *token.FileSet, files []*ast.File, importPath string, opts ...any) (*Package, error)"},
+ {"Note", Type, 1, ""},
+ {"Note.Body", Field, 1, ""},
+ {"Note.End", Field, 1, ""},
+ {"Note.Pos", Field, 1, ""},
+ {"Note.UID", Field, 1, ""},
+ {"Package", Type, 0, ""},
+ {"Package.Bugs", Field, 0, ""},
+ {"Package.Consts", Field, 0, ""},
+ {"Package.Doc", Field, 0, ""},
+ {"Package.Examples", Field, 14, ""},
+ {"Package.Filenames", Field, 0, ""},
+ {"Package.Funcs", Field, 0, ""},
+ {"Package.ImportPath", Field, 0, ""},
+ {"Package.Imports", Field, 0, ""},
+ {"Package.Name", Field, 0, ""},
+ {"Package.Notes", Field, 1, ""},
+ {"Package.Types", Field, 0, ""},
+ {"Package.Vars", Field, 0, ""},
+ {"PreserveAST", Const, 12, ""},
+ {"Synopsis", Func, 0, "func(text string) string"},
+ {"ToHTML", Func, 0, "func(w io.Writer, text string, words map[string]string)"},
+ {"ToText", Func, 0, "func(w io.Writer, text string, prefix string, codePrefix string, width int)"},
+ {"Type", Type, 0, ""},
+ {"Type.Consts", Field, 0, ""},
+ {"Type.Decl", Field, 0, ""},
+ {"Type.Doc", Field, 0, ""},
+ {"Type.Examples", Field, 14, ""},
+ {"Type.Funcs", Field, 0, ""},
+ {"Type.Methods", Field, 0, ""},
+ {"Type.Name", Field, 0, ""},
+ {"Type.Vars", Field, 0, ""},
+ {"Value", Type, 0, ""},
+ {"Value.Decl", Field, 0, ""},
+ {"Value.Doc", Field, 0, ""},
+ {"Value.Names", Field, 0, ""},
+ },
+ "go/doc/comment": {
+ {"(*DocLink).DefaultURL", Method, 19, ""},
+ {"(*Heading).DefaultID", Method, 19, ""},
+ {"(*List).BlankBefore", Method, 19, ""},
+ {"(*List).BlankBetween", Method, 19, ""},
+ {"(*Parser).Parse", Method, 19, ""},
+ {"(*Printer).Comment", Method, 19, ""},
+ {"(*Printer).HTML", Method, 19, ""},
+ {"(*Printer).Markdown", Method, 19, ""},
+ {"(*Printer).Text", Method, 19, ""},
+ {"Block", Type, 19, ""},
+ {"Code", Type, 19, ""},
+ {"Code.Text", Field, 19, ""},
+ {"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"},
+ {"Doc", Type, 19, ""},
+ {"Doc.Content", Field, 19, ""},
+ {"Doc.Links", Field, 19, ""},
+ {"DocLink", Type, 19, ""},
+ {"DocLink.ImportPath", Field, 19, ""},
+ {"DocLink.Name", Field, 19, ""},
+ {"DocLink.Recv", Field, 19, ""},
+ {"DocLink.Text", Field, 19, ""},
+ {"Heading", Type, 19, ""},
+ {"Heading.Text", Field, 19, ""},
+ {"Italic", Type, 19, ""},
+ {"Link", Type, 19, ""},
+ {"Link.Auto", Field, 19, ""},
+ {"Link.Text", Field, 19, ""},
+ {"Link.URL", Field, 19, ""},
+ {"LinkDef", Type, 19, ""},
+ {"LinkDef.Text", Field, 19, ""},
+ {"LinkDef.URL", Field, 19, ""},
+ {"LinkDef.Used", Field, 19, ""},
+ {"List", Type, 19, ""},
+ {"List.ForceBlankBefore", Field, 19, ""},
+ {"List.ForceBlankBetween", Field, 19, ""},
+ {"List.Items", Field, 19, ""},
+ {"ListItem", Type, 19, ""},
+ {"ListItem.Content", Field, 19, ""},
+ {"ListItem.Number", Field, 19, ""},
+ {"Paragraph", Type, 19, ""},
+ {"Paragraph.Text", Field, 19, ""},
+ {"Parser", Type, 19, ""},
+ {"Parser.LookupPackage", Field, 19, ""},
+ {"Parser.LookupSym", Field, 19, ""},
+ {"Parser.Words", Field, 19, ""},
+ {"Plain", Type, 19, ""},
+ {"Printer", Type, 19, ""},
+ {"Printer.DocLinkBaseURL", Field, 19, ""},
+ {"Printer.DocLinkURL", Field, 19, ""},
+ {"Printer.HeadingID", Field, 19, ""},
+ {"Printer.HeadingLevel", Field, 19, ""},
+ {"Printer.TextCodePrefix", Field, 19, ""},
+ {"Printer.TextPrefix", Field, 19, ""},
+ {"Printer.TextWidth", Field, 19, ""},
+ {"Text", Type, 19, ""},
+ },
+ "go/format": {
+ {"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"},
+ {"Source", Func, 1, "func(src []byte) ([]byte, error)"},
+ },
+ "go/importer": {
+ {"Default", Func, 5, "func() types.Importer"},
+ {"For", Func, 5, "func(compiler string, lookup Lookup) types.Importer"},
+ {"ForCompiler", Func, 12, "func(fset *token.FileSet, compiler string, lookup Lookup) types.Importer"},
+ {"Lookup", Type, 5, ""},
+ },
+ "go/parser": {
+ {"AllErrors", Const, 1, ""},
+ {"DeclarationErrors", Const, 0, ""},
+ {"ImportsOnly", Const, 0, ""},
+ {"Mode", Type, 0, ""},
+ {"PackageClauseOnly", Const, 0, ""},
+ {"ParseComments", Const, 0, ""},
+ {"ParseDir", Func, 0, "func(fset *token.FileSet, path string, filter func(fs.FileInfo) bool, mode Mode) (pkgs map[string]*ast.Package, first error)"},
+ {"ParseExpr", Func, 0, "func(x string) (ast.Expr, error)"},
+ {"ParseExprFrom", Func, 5, "func(fset *token.FileSet, filename string, src any, mode Mode) (expr ast.Expr, err error)"},
+ {"ParseFile", Func, 0, "func(fset *token.FileSet, filename string, src any, mode Mode) (f *ast.File, err error)"},
+ {"SkipObjectResolution", Const, 17, ""},
+ {"SpuriousErrors", Const, 0, ""},
+ {"Trace", Const, 0, ""},
+ },
+ "go/printer": {
+ {"(*Config).Fprint", Method, 0, ""},
+ {"CommentedNode", Type, 0, ""},
+ {"CommentedNode.Comments", Field, 0, ""},
+ {"CommentedNode.Node", Field, 0, ""},
+ {"Config", Type, 0, ""},
+ {"Config.Indent", Field, 1, ""},
+ {"Config.Mode", Field, 0, ""},
+ {"Config.Tabwidth", Field, 0, ""},
+ {"Fprint", Func, 0, "func(output io.Writer, fset *token.FileSet, node any) error"},
+ {"Mode", Type, 0, ""},
+ {"RawFormat", Const, 0, ""},
+ {"SourcePos", Const, 0, ""},
+ {"TabIndent", Const, 0, ""},
+ {"UseSpaces", Const, 0, ""},
+ },
+ "go/scanner": {
+ {"(*ErrorList).Add", Method, 0, ""},
+ {"(*ErrorList).RemoveMultiples", Method, 0, ""},
+ {"(*ErrorList).Reset", Method, 0, ""},
+ {"(*Scanner).Init", Method, 0, ""},
+ {"(*Scanner).Scan", Method, 0, ""},
+ {"(Error).Error", Method, 0, ""},
+ {"(ErrorList).Err", Method, 0, ""},
+ {"(ErrorList).Error", Method, 0, ""},
+ {"(ErrorList).Len", Method, 0, ""},
+ {"(ErrorList).Less", Method, 0, ""},
+ {"(ErrorList).Sort", Method, 0, ""},
+ {"(ErrorList).Swap", Method, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Msg", Field, 0, ""},
+ {"Error.Pos", Field, 0, ""},
+ {"ErrorHandler", Type, 0, ""},
+ {"ErrorList", Type, 0, ""},
+ {"Mode", Type, 0, ""},
+ {"PrintError", Func, 0, "func(w io.Writer, err error)"},
+ {"ScanComments", Const, 0, ""},
+ {"Scanner", Type, 0, ""},
+ {"Scanner.ErrorCount", Field, 0, ""},
+ },
+ "go/token": {
+ {"(*File).AddLine", Method, 0, ""},
+ {"(*File).AddLineColumnInfo", Method, 11, ""},
+ {"(*File).AddLineInfo", Method, 0, ""},
+ {"(*File).Base", Method, 0, ""},
+ {"(*File).Line", Method, 0, ""},
+ {"(*File).LineCount", Method, 0, ""},
+ {"(*File).LineStart", Method, 12, ""},
+ {"(*File).Lines", Method, 21, ""},
+ {"(*File).MergeLine", Method, 2, ""},
+ {"(*File).Name", Method, 0, ""},
+ {"(*File).Offset", Method, 0, ""},
+ {"(*File).Pos", Method, 0, ""},
+ {"(*File).Position", Method, 0, ""},
+ {"(*File).PositionFor", Method, 4, ""},
+ {"(*File).SetLines", Method, 0, ""},
+ {"(*File).SetLinesForContent", Method, 0, ""},
+ {"(*File).Size", Method, 0, ""},
+ {"(*FileSet).AddExistingFiles", Method, 25, ""},
+ {"(*FileSet).AddFile", Method, 0, ""},
+ {"(*FileSet).Base", Method, 0, ""},
+ {"(*FileSet).File", Method, 0, ""},
+ {"(*FileSet).Iterate", Method, 0, ""},
+ {"(*FileSet).Position", Method, 0, ""},
+ {"(*FileSet).PositionFor", Method, 4, ""},
+ {"(*FileSet).Read", Method, 0, ""},
+ {"(*FileSet).RemoveFile", Method, 20, ""},
+ {"(*FileSet).Write", Method, 0, ""},
+ {"(*Position).IsValid", Method, 0, ""},
+ {"(Pos).IsValid", Method, 0, ""},
+ {"(Position).String", Method, 0, ""},
+ {"(Token).IsKeyword", Method, 0, ""},
+ {"(Token).IsLiteral", Method, 0, ""},
+ {"(Token).IsOperator", Method, 0, ""},
+ {"(Token).Precedence", Method, 0, ""},
+ {"(Token).String", Method, 0, ""},
+ {"ADD", Const, 0, ""},
+ {"ADD_ASSIGN", Const, 0, ""},
+ {"AND", Const, 0, ""},
+ {"AND_ASSIGN", Const, 0, ""},
+ {"AND_NOT", Const, 0, ""},
+ {"AND_NOT_ASSIGN", Const, 0, ""},
+ {"ARROW", Const, 0, ""},
+ {"ASSIGN", Const, 0, ""},
+ {"BREAK", Const, 0, ""},
+ {"CASE", Const, 0, ""},
+ {"CHAN", Const, 0, ""},
+ {"CHAR", Const, 0, ""},
+ {"COLON", Const, 0, ""},
+ {"COMMA", Const, 0, ""},
+ {"COMMENT", Const, 0, ""},
+ {"CONST", Const, 0, ""},
+ {"CONTINUE", Const, 0, ""},
+ {"DEC", Const, 0, ""},
+ {"DEFAULT", Const, 0, ""},
+ {"DEFER", Const, 0, ""},
+ {"DEFINE", Const, 0, ""},
+ {"ELLIPSIS", Const, 0, ""},
+ {"ELSE", Const, 0, ""},
+ {"EOF", Const, 0, ""},
+ {"EQL", Const, 0, ""},
+ {"FALLTHROUGH", Const, 0, ""},
+ {"FLOAT", Const, 0, ""},
+ {"FOR", Const, 0, ""},
+ {"FUNC", Const, 0, ""},
+ {"File", Type, 0, ""},
+ {"FileSet", Type, 0, ""},
+ {"GEQ", Const, 0, ""},
+ {"GO", Const, 0, ""},
+ {"GOTO", Const, 0, ""},
+ {"GTR", Const, 0, ""},
+ {"HighestPrec", Const, 0, ""},
+ {"IDENT", Const, 0, ""},
+ {"IF", Const, 0, ""},
+ {"ILLEGAL", Const, 0, ""},
+ {"IMAG", Const, 0, ""},
+ {"IMPORT", Const, 0, ""},
+ {"INC", Const, 0, ""},
+ {"INT", Const, 0, ""},
+ {"INTERFACE", Const, 0, ""},
+ {"IsExported", Func, 13, "func(name string) bool"},
+ {"IsIdentifier", Func, 13, "func(name string) bool"},
+ {"IsKeyword", Func, 13, "func(name string) bool"},
+ {"LAND", Const, 0, ""},
+ {"LBRACE", Const, 0, ""},
+ {"LBRACK", Const, 0, ""},
+ {"LEQ", Const, 0, ""},
+ {"LOR", Const, 0, ""},
+ {"LPAREN", Const, 0, ""},
+ {"LSS", Const, 0, ""},
+ {"Lookup", Func, 0, "func(ident string) Token"},
+ {"LowestPrec", Const, 0, ""},
+ {"MAP", Const, 0, ""},
+ {"MUL", Const, 0, ""},
+ {"MUL_ASSIGN", Const, 0, ""},
+ {"NEQ", Const, 0, ""},
+ {"NOT", Const, 0, ""},
+ {"NewFileSet", Func, 0, "func() *FileSet"},
+ {"NoPos", Const, 0, ""},
+ {"OR", Const, 0, ""},
+ {"OR_ASSIGN", Const, 0, ""},
+ {"PACKAGE", Const, 0, ""},
+ {"PERIOD", Const, 0, ""},
+ {"Pos", Type, 0, ""},
+ {"Position", Type, 0, ""},
+ {"Position.Column", Field, 0, ""},
+ {"Position.Filename", Field, 0, ""},
+ {"Position.Line", Field, 0, ""},
+ {"Position.Offset", Field, 0, ""},
+ {"QUO", Const, 0, ""},
+ {"QUO_ASSIGN", Const, 0, ""},
+ {"RANGE", Const, 0, ""},
+ {"RBRACE", Const, 0, ""},
+ {"RBRACK", Const, 0, ""},
+ {"REM", Const, 0, ""},
+ {"REM_ASSIGN", Const, 0, ""},
+ {"RETURN", Const, 0, ""},
+ {"RPAREN", Const, 0, ""},
+ {"SELECT", Const, 0, ""},
+ {"SEMICOLON", Const, 0, ""},
+ {"SHL", Const, 0, ""},
+ {"SHL_ASSIGN", Const, 0, ""},
+ {"SHR", Const, 0, ""},
+ {"SHR_ASSIGN", Const, 0, ""},
+ {"STRING", Const, 0, ""},
+ {"STRUCT", Const, 0, ""},
+ {"SUB", Const, 0, ""},
+ {"SUB_ASSIGN", Const, 0, ""},
+ {"SWITCH", Const, 0, ""},
+ {"TILDE", Const, 18, ""},
+ {"TYPE", Const, 0, ""},
+ {"Token", Type, 0, ""},
+ {"UnaryPrec", Const, 0, ""},
+ {"VAR", Const, 0, ""},
+ {"XOR", Const, 0, ""},
+ {"XOR_ASSIGN", Const, 0, ""},
+ },
+ "go/types": {
+ {"(*Alias).Obj", Method, 22, ""},
+ {"(*Alias).Origin", Method, 23, ""},
+ {"(*Alias).Rhs", Method, 23, ""},
+ {"(*Alias).SetTypeParams", Method, 23, ""},
+ {"(*Alias).String", Method, 22, ""},
+ {"(*Alias).TypeArgs", Method, 23, ""},
+ {"(*Alias).TypeParams", Method, 23, ""},
+ {"(*Alias).Underlying", Method, 22, ""},
+ {"(*ArgumentError).Error", Method, 18, ""},
+ {"(*ArgumentError).Unwrap", Method, 18, ""},
+ {"(*Array).Elem", Method, 5, ""},
+ {"(*Array).Len", Method, 5, ""},
+ {"(*Array).String", Method, 5, ""},
+ {"(*Array).Underlying", Method, 5, ""},
+ {"(*Basic).Info", Method, 5, ""},
+ {"(*Basic).Kind", Method, 5, ""},
+ {"(*Basic).Name", Method, 5, ""},
+ {"(*Basic).String", Method, 5, ""},
+ {"(*Basic).Underlying", Method, 5, ""},
+ {"(*Builtin).Exported", Method, 5, ""},
+ {"(*Builtin).Id", Method, 5, ""},
+ {"(*Builtin).Name", Method, 5, ""},
+ {"(*Builtin).Parent", Method, 5, ""},
+ {"(*Builtin).Pkg", Method, 5, ""},
+ {"(*Builtin).Pos", Method, 5, ""},
+ {"(*Builtin).String", Method, 5, ""},
+ {"(*Builtin).Type", Method, 5, ""},
+ {"(*Chan).Dir", Method, 5, ""},
+ {"(*Chan).Elem", Method, 5, ""},
+ {"(*Chan).String", Method, 5, ""},
+ {"(*Chan).Underlying", Method, 5, ""},
+ {"(*Checker).Files", Method, 5, ""},
+ {"(*Config).Check", Method, 5, ""},
+ {"(*Const).Exported", Method, 5, ""},
+ {"(*Const).Id", Method, 5, ""},
+ {"(*Const).Name", Method, 5, ""},
+ {"(*Const).Parent", Method, 5, ""},
+ {"(*Const).Pkg", Method, 5, ""},
+ {"(*Const).Pos", Method, 5, ""},
+ {"(*Const).String", Method, 5, ""},
+ {"(*Const).Type", Method, 5, ""},
+ {"(*Const).Val", Method, 5, ""},
+ {"(*Func).Exported", Method, 5, ""},
+ {"(*Func).FullName", Method, 5, ""},
+ {"(*Func).Id", Method, 5, ""},
+ {"(*Func).Name", Method, 5, ""},
+ {"(*Func).Origin", Method, 19, ""},
+ {"(*Func).Parent", Method, 5, ""},
+ {"(*Func).Pkg", Method, 5, ""},
+ {"(*Func).Pos", Method, 5, ""},
+ {"(*Func).Scope", Method, 5, ""},
+ {"(*Func).Signature", Method, 23, ""},
+ {"(*Func).String", Method, 5, ""},
+ {"(*Func).Type", Method, 5, ""},
+ {"(*Info).ObjectOf", Method, 5, ""},
+ {"(*Info).PkgNameOf", Method, 22, ""},
+ {"(*Info).TypeOf", Method, 5, ""},
+ {"(*Initializer).String", Method, 5, ""},
+ {"(*Interface).Complete", Method, 5, ""},
+ {"(*Interface).Embedded", Method, 5, ""},
+ {"(*Interface).EmbeddedType", Method, 11, ""},
+ {"(*Interface).EmbeddedTypes", Method, 24, ""},
+ {"(*Interface).Empty", Method, 5, ""},
+ {"(*Interface).ExplicitMethod", Method, 5, ""},
+ {"(*Interface).ExplicitMethods", Method, 24, ""},
+ {"(*Interface).IsComparable", Method, 18, ""},
+ {"(*Interface).IsImplicit", Method, 18, ""},
+ {"(*Interface).IsMethodSet", Method, 18, ""},
+ {"(*Interface).MarkImplicit", Method, 18, ""},
+ {"(*Interface).Method", Method, 5, ""},
+ {"(*Interface).Methods", Method, 24, ""},
+ {"(*Interface).NumEmbeddeds", Method, 5, ""},
+ {"(*Interface).NumExplicitMethods", Method, 5, ""},
+ {"(*Interface).NumMethods", Method, 5, ""},
+ {"(*Interface).String", Method, 5, ""},
+ {"(*Interface).Underlying", Method, 5, ""},
+ {"(*Label).Exported", Method, 5, ""},
+ {"(*Label).Id", Method, 5, ""},
+ {"(*Label).Name", Method, 5, ""},
+ {"(*Label).Parent", Method, 5, ""},
+ {"(*Label).Pkg", Method, 5, ""},
+ {"(*Label).Pos", Method, 5, ""},
+ {"(*Label).String", Method, 5, ""},
+ {"(*Label).Type", Method, 5, ""},
+ {"(*Map).Elem", Method, 5, ""},
+ {"(*Map).Key", Method, 5, ""},
+ {"(*Map).String", Method, 5, ""},
+ {"(*Map).Underlying", Method, 5, ""},
+ {"(*MethodSet).At", Method, 5, ""},
+ {"(*MethodSet).Len", Method, 5, ""},
+ {"(*MethodSet).Lookup", Method, 5, ""},
+ {"(*MethodSet).Methods", Method, 24, ""},
+ {"(*MethodSet).String", Method, 5, ""},
+ {"(*Named).AddMethod", Method, 5, ""},
+ {"(*Named).Method", Method, 5, ""},
+ {"(*Named).Methods", Method, 24, ""},
+ {"(*Named).NumMethods", Method, 5, ""},
+ {"(*Named).Obj", Method, 5, ""},
+ {"(*Named).Origin", Method, 18, ""},
+ {"(*Named).SetTypeParams", Method, 18, ""},
+ {"(*Named).SetUnderlying", Method, 5, ""},
+ {"(*Named).String", Method, 5, ""},
+ {"(*Named).TypeArgs", Method, 18, ""},
+ {"(*Named).TypeParams", Method, 18, ""},
+ {"(*Named).Underlying", Method, 5, ""},
+ {"(*Nil).Exported", Method, 5, ""},
+ {"(*Nil).Id", Method, 5, ""},
+ {"(*Nil).Name", Method, 5, ""},
+ {"(*Nil).Parent", Method, 5, ""},
+ {"(*Nil).Pkg", Method, 5, ""},
+ {"(*Nil).Pos", Method, 5, ""},
+ {"(*Nil).String", Method, 5, ""},
+ {"(*Nil).Type", Method, 5, ""},
+ {"(*Package).Complete", Method, 5, ""},
+ {"(*Package).GoVersion", Method, 21, ""},
+ {"(*Package).Imports", Method, 5, ""},
+ {"(*Package).MarkComplete", Method, 5, ""},
+ {"(*Package).Name", Method, 5, ""},
+ {"(*Package).Path", Method, 5, ""},
+ {"(*Package).Scope", Method, 5, ""},
+ {"(*Package).SetImports", Method, 5, ""},
+ {"(*Package).SetName", Method, 6, ""},
+ {"(*Package).String", Method, 5, ""},
+ {"(*PkgName).Exported", Method, 5, ""},
+ {"(*PkgName).Id", Method, 5, ""},
+ {"(*PkgName).Imported", Method, 5, ""},
+ {"(*PkgName).Name", Method, 5, ""},
+ {"(*PkgName).Parent", Method, 5, ""},
+ {"(*PkgName).Pkg", Method, 5, ""},
+ {"(*PkgName).Pos", Method, 5, ""},
+ {"(*PkgName).String", Method, 5, ""},
+ {"(*PkgName).Type", Method, 5, ""},
+ {"(*Pointer).Elem", Method, 5, ""},
+ {"(*Pointer).String", Method, 5, ""},
+ {"(*Pointer).Underlying", Method, 5, ""},
+ {"(*Scope).Child", Method, 5, ""},
+ {"(*Scope).Children", Method, 24, ""},
+ {"(*Scope).Contains", Method, 5, ""},
+ {"(*Scope).End", Method, 5, ""},
+ {"(*Scope).Innermost", Method, 5, ""},
+ {"(*Scope).Insert", Method, 5, ""},
+ {"(*Scope).Len", Method, 5, ""},
+ {"(*Scope).Lookup", Method, 5, ""},
+ {"(*Scope).LookupParent", Method, 5, ""},
+ {"(*Scope).Names", Method, 5, ""},
+ {"(*Scope).NumChildren", Method, 5, ""},
+ {"(*Scope).Parent", Method, 5, ""},
+ {"(*Scope).Pos", Method, 5, ""},
+ {"(*Scope).String", Method, 5, ""},
+ {"(*Scope).WriteTo", Method, 5, ""},
+ {"(*Selection).Index", Method, 5, ""},
+ {"(*Selection).Indirect", Method, 5, ""},
+ {"(*Selection).Kind", Method, 5, ""},
+ {"(*Selection).Obj", Method, 5, ""},
+ {"(*Selection).Recv", Method, 5, ""},
+ {"(*Selection).String", Method, 5, ""},
+ {"(*Selection).Type", Method, 5, ""},
+ {"(*Signature).Params", Method, 5, ""},
+ {"(*Signature).Recv", Method, 5, ""},
+ {"(*Signature).RecvTypeParams", Method, 18, ""},
+ {"(*Signature).Results", Method, 5, ""},
+ {"(*Signature).String", Method, 5, ""},
+ {"(*Signature).TypeParams", Method, 18, ""},
+ {"(*Signature).Underlying", Method, 5, ""},
+ {"(*Signature).Variadic", Method, 5, ""},
+ {"(*Slice).Elem", Method, 5, ""},
+ {"(*Slice).String", Method, 5, ""},
+ {"(*Slice).Underlying", Method, 5, ""},
+ {"(*StdSizes).Alignof", Method, 5, ""},
+ {"(*StdSizes).Offsetsof", Method, 5, ""},
+ {"(*StdSizes).Sizeof", Method, 5, ""},
+ {"(*Struct).Field", Method, 5, ""},
+ {"(*Struct).Fields", Method, 24, ""},
+ {"(*Struct).NumFields", Method, 5, ""},
+ {"(*Struct).String", Method, 5, ""},
+ {"(*Struct).Tag", Method, 5, ""},
+ {"(*Struct).Underlying", Method, 5, ""},
+ {"(*Term).String", Method, 18, ""},
+ {"(*Term).Tilde", Method, 18, ""},
+ {"(*Term).Type", Method, 18, ""},
+ {"(*Tuple).At", Method, 5, ""},
+ {"(*Tuple).Len", Method, 5, ""},
+ {"(*Tuple).String", Method, 5, ""},
+ {"(*Tuple).Underlying", Method, 5, ""},
+ {"(*Tuple).Variables", Method, 24, ""},
+ {"(*TypeList).At", Method, 18, ""},
+ {"(*TypeList).Len", Method, 18, ""},
+ {"(*TypeList).Types", Method, 24, ""},
+ {"(*TypeName).Exported", Method, 5, ""},
+ {"(*TypeName).Id", Method, 5, ""},
+ {"(*TypeName).IsAlias", Method, 9, ""},
+ {"(*TypeName).Name", Method, 5, ""},
+ {"(*TypeName).Parent", Method, 5, ""},
+ {"(*TypeName).Pkg", Method, 5, ""},
+ {"(*TypeName).Pos", Method, 5, ""},
+ {"(*TypeName).String", Method, 5, ""},
+ {"(*TypeName).Type", Method, 5, ""},
+ {"(*TypeParam).Constraint", Method, 18, ""},
+ {"(*TypeParam).Index", Method, 18, ""},
+ {"(*TypeParam).Obj", Method, 18, ""},
+ {"(*TypeParam).SetConstraint", Method, 18, ""},
+ {"(*TypeParam).String", Method, 18, ""},
+ {"(*TypeParam).Underlying", Method, 18, ""},
+ {"(*TypeParamList).At", Method, 18, ""},
+ {"(*TypeParamList).Len", Method, 18, ""},
+ {"(*TypeParamList).TypeParams", Method, 24, ""},
+ {"(*Union).Len", Method, 18, ""},
+ {"(*Union).String", Method, 18, ""},
+ {"(*Union).Term", Method, 18, ""},
+ {"(*Union).Terms", Method, 24, ""},
+ {"(*Union).Underlying", Method, 18, ""},
+ {"(*Var).Anonymous", Method, 5, ""},
+ {"(*Var).Embedded", Method, 11, ""},
+ {"(*Var).Exported", Method, 5, ""},
+ {"(*Var).Id", Method, 5, ""},
+ {"(*Var).IsField", Method, 5, ""},
+ {"(*Var).Kind", Method, 25, ""},
+ {"(*Var).Name", Method, 5, ""},
+ {"(*Var).Origin", Method, 19, ""},
+ {"(*Var).Parent", Method, 5, ""},
+ {"(*Var).Pkg", Method, 5, ""},
+ {"(*Var).Pos", Method, 5, ""},
+ {"(*Var).SetKind", Method, 25, ""},
+ {"(*Var).String", Method, 5, ""},
+ {"(*Var).Type", Method, 5, ""},
+ {"(Checker).ObjectOf", Method, 5, ""},
+ {"(Checker).PkgNameOf", Method, 22, ""},
+ {"(Checker).TypeOf", Method, 5, ""},
+ {"(Error).Error", Method, 5, ""},
+ {"(TypeAndValue).Addressable", Method, 5, ""},
+ {"(TypeAndValue).Assignable", Method, 5, ""},
+ {"(TypeAndValue).HasOk", Method, 5, ""},
+ {"(TypeAndValue).IsBuiltin", Method, 5, ""},
+ {"(TypeAndValue).IsNil", Method, 5, ""},
+ {"(TypeAndValue).IsType", Method, 5, ""},
+ {"(TypeAndValue).IsValue", Method, 5, ""},
+ {"(TypeAndValue).IsVoid", Method, 5, ""},
+ {"(VarKind).String", Method, 25, ""},
+ {"Alias", Type, 22, ""},
+ {"ArgumentError", Type, 18, ""},
+ {"ArgumentError.Err", Field, 18, ""},
+ {"ArgumentError.Index", Field, 18, ""},
+ {"Array", Type, 5, ""},
+ {"AssertableTo", Func, 5, "func(V *Interface, T Type) bool"},
+ {"AssignableTo", Func, 5, "func(V Type, T Type) bool"},
+ {"Basic", Type, 5, ""},
+ {"BasicInfo", Type, 5, ""},
+ {"BasicKind", Type, 5, ""},
+ {"Bool", Const, 5, ""},
+ {"Builtin", Type, 5, ""},
+ {"Byte", Const, 5, ""},
+ {"Chan", Type, 5, ""},
+ {"ChanDir", Type, 5, ""},
+ {"CheckExpr", Func, 13, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr ast.Expr, info *Info) (err error)"},
+ {"Checker", Type, 5, ""},
+ {"Checker.Info", Field, 5, ""},
+ {"Comparable", Func, 5, "func(T Type) bool"},
+ {"Complex128", Const, 5, ""},
+ {"Complex64", Const, 5, ""},
+ {"Config", Type, 5, ""},
+ {"Config.Context", Field, 18, ""},
+ {"Config.DisableUnusedImportCheck", Field, 5, ""},
+ {"Config.Error", Field, 5, ""},
+ {"Config.FakeImportC", Field, 5, ""},
+ {"Config.GoVersion", Field, 18, ""},
+ {"Config.IgnoreFuncBodies", Field, 5, ""},
+ {"Config.Importer", Field, 5, ""},
+ {"Config.Sizes", Field, 5, ""},
+ {"Const", Type, 5, ""},
+ {"Context", Type, 18, ""},
+ {"ConvertibleTo", Func, 5, "func(V Type, T Type) bool"},
+ {"DefPredeclaredTestFuncs", Func, 5, "func()"},
+ {"Default", Func, 8, "func(t Type) Type"},
+ {"Error", Type, 5, ""},
+ {"Error.Fset", Field, 5, ""},
+ {"Error.Msg", Field, 5, ""},
+ {"Error.Pos", Field, 5, ""},
+ {"Error.Soft", Field, 5, ""},
+ {"Eval", Func, 5, "func(fset *token.FileSet, pkg *Package, pos token.Pos, expr string) (_ TypeAndValue, err error)"},
+ {"ExprString", Func, 5, "func(x ast.Expr) string"},
+ {"FieldVal", Const, 5, ""},
+ {"FieldVar", Const, 25, ""},
+ {"Float32", Const, 5, ""},
+ {"Float64", Const, 5, ""},
+ {"Func", Type, 5, ""},
+ {"Id", Func, 5, "func(pkg *Package, name string) string"},
+ {"Identical", Func, 5, "func(x Type, y Type) bool"},
+ {"IdenticalIgnoreTags", Func, 8, "func(x Type, y Type) bool"},
+ {"Implements", Func, 5, "func(V Type, T *Interface) bool"},
+ {"ImportMode", Type, 6, ""},
+ {"Importer", Type, 5, ""},
+ {"ImporterFrom", Type, 6, ""},
+ {"Info", Type, 5, ""},
+ {"Info.Defs", Field, 5, ""},
+ {"Info.FileVersions", Field, 22, ""},
+ {"Info.Implicits", Field, 5, ""},
+ {"Info.InitOrder", Field, 5, ""},
+ {"Info.Instances", Field, 18, ""},
+ {"Info.Scopes", Field, 5, ""},
+ {"Info.Selections", Field, 5, ""},
+ {"Info.Types", Field, 5, ""},
+ {"Info.Uses", Field, 5, ""},
+ {"Initializer", Type, 5, ""},
+ {"Initializer.Lhs", Field, 5, ""},
+ {"Initializer.Rhs", Field, 5, ""},
+ {"Instance", Type, 18, ""},
+ {"Instance.Type", Field, 18, ""},
+ {"Instance.TypeArgs", Field, 18, ""},
+ {"Instantiate", Func, 18, "func(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error)"},
+ {"Int", Const, 5, ""},
+ {"Int16", Const, 5, ""},
+ {"Int32", Const, 5, ""},
+ {"Int64", Const, 5, ""},
+ {"Int8", Const, 5, ""},
+ {"Interface", Type, 5, ""},
+ {"Invalid", Const, 5, ""},
+ {"IsBoolean", Const, 5, ""},
+ {"IsComplex", Const, 5, ""},
+ {"IsConstType", Const, 5, ""},
+ {"IsFloat", Const, 5, ""},
+ {"IsInteger", Const, 5, ""},
+ {"IsInterface", Func, 5, "func(t Type) bool"},
+ {"IsNumeric", Const, 5, ""},
+ {"IsOrdered", Const, 5, ""},
+ {"IsString", Const, 5, ""},
+ {"IsUnsigned", Const, 5, ""},
+ {"IsUntyped", Const, 5, ""},
+ {"Label", Type, 5, ""},
+ {"LocalVar", Const, 25, ""},
+ {"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"},
+ {"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"},
+ {"Map", Type, 5, ""},
+ {"MethodExpr", Const, 5, ""},
+ {"MethodSet", Type, 5, ""},
+ {"MethodVal", Const, 5, ""},
+ {"MissingMethod", Func, 5, "func(V Type, T *Interface, static bool) (method *Func, wrongType bool)"},
+ {"Named", Type, 5, ""},
+ {"NewAlias", Func, 22, "func(obj *TypeName, rhs Type) *Alias"},
+ {"NewArray", Func, 5, "func(elem Type, len int64) *Array"},
+ {"NewChan", Func, 5, "func(dir ChanDir, elem Type) *Chan"},
+ {"NewChecker", Func, 5, "func(conf *Config, fset *token.FileSet, pkg *Package, info *Info) *Checker"},
+ {"NewConst", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const"},
+ {"NewContext", Func, 18, "func() *Context"},
+ {"NewField", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type, embedded bool) *Var"},
+ {"NewFunc", Func, 5, "func(pos token.Pos, pkg *Package, name string, sig *Signature) *Func"},
+ {"NewInterface", Func, 5, "func(methods []*Func, embeddeds []*Named) *Interface"},
+ {"NewInterfaceType", Func, 11, "func(methods []*Func, embeddeds []Type) *Interface"},
+ {"NewLabel", Func, 5, "func(pos token.Pos, pkg *Package, name string) *Label"},
+ {"NewMap", Func, 5, "func(key Type, elem Type) *Map"},
+ {"NewMethodSet", Func, 5, "func(T Type) *MethodSet"},
+ {"NewNamed", Func, 5, "func(obj *TypeName, underlying Type, methods []*Func) *Named"},
+ {"NewPackage", Func, 5, "func(path string, name string) *Package"},
+ {"NewParam", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
+ {"NewPkgName", Func, 5, "func(pos token.Pos, pkg *Package, name string, imported *Package) *PkgName"},
+ {"NewPointer", Func, 5, "func(elem Type) *Pointer"},
+ {"NewScope", Func, 5, "func(parent *Scope, pos token.Pos, end token.Pos, comment string) *Scope"},
+ {"NewSignature", Func, 5, "func(recv *Var, params *Tuple, results *Tuple, variadic bool) *Signature"},
+ {"NewSignatureType", Func, 18, "func(recv *Var, recvTypeParams []*TypeParam, typeParams []*TypeParam, params *Tuple, results *Tuple, variadic bool) *Signature"},
+ {"NewSlice", Func, 5, "func(elem Type) *Slice"},
+ {"NewStruct", Func, 5, "func(fields []*Var, tags []string) *Struct"},
+ {"NewTerm", Func, 18, "func(tilde bool, typ Type) *Term"},
+ {"NewTuple", Func, 5, "func(x ...*Var) *Tuple"},
+ {"NewTypeName", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *TypeName"},
+ {"NewTypeParam", Func, 18, "func(obj *TypeName, constraint Type) *TypeParam"},
+ {"NewUnion", Func, 18, "func(terms []*Term) *Union"},
+ {"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"},
+ {"Nil", Type, 5, ""},
+ {"Object", Type, 5, ""},
+ {"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"},
+ {"Package", Type, 5, ""},
+ {"PackageVar", Const, 25, ""},
+ {"ParamVar", Const, 25, ""},
+ {"PkgName", Type, 5, ""},
+ {"Pointer", Type, 5, ""},
+ {"Qualifier", Type, 5, ""},
+ {"RecvOnly", Const, 5, ""},
+ {"RecvVar", Const, 25, ""},
+ {"RelativeTo", Func, 5, "func(pkg *Package) Qualifier"},
+ {"ResultVar", Const, 25, ""},
+ {"Rune", Const, 5, ""},
+ {"Satisfies", Func, 20, "func(V Type, T *Interface) bool"},
+ {"Scope", Type, 5, ""},
+ {"Selection", Type, 5, ""},
+ {"SelectionKind", Type, 5, ""},
+ {"SelectionString", Func, 5, "func(s *Selection, qf Qualifier) string"},
+ {"SendOnly", Const, 5, ""},
+ {"SendRecv", Const, 5, ""},
+ {"Signature", Type, 5, ""},
+ {"Sizes", Type, 5, ""},
+ {"SizesFor", Func, 9, "func(compiler string, arch string) Sizes"},
+ {"Slice", Type, 5, ""},
+ {"StdSizes", Type, 5, ""},
+ {"StdSizes.MaxAlign", Field, 5, ""},
+ {"StdSizes.WordSize", Field, 5, ""},
+ {"String", Const, 5, ""},
+ {"Struct", Type, 5, ""},
+ {"Term", Type, 18, ""},
+ {"Tuple", Type, 5, ""},
+ {"Typ", Var, 5, ""},
+ {"Type", Type, 5, ""},
+ {"TypeAndValue", Type, 5, ""},
+ {"TypeAndValue.Type", Field, 5, ""},
+ {"TypeAndValue.Value", Field, 5, ""},
+ {"TypeList", Type, 18, ""},
+ {"TypeName", Type, 5, ""},
+ {"TypeParam", Type, 18, ""},
+ {"TypeParamList", Type, 18, ""},
+ {"TypeString", Func, 5, "func(typ Type, qf Qualifier) string"},
+ {"Uint", Const, 5, ""},
+ {"Uint16", Const, 5, ""},
+ {"Uint32", Const, 5, ""},
+ {"Uint64", Const, 5, ""},
+ {"Uint8", Const, 5, ""},
+ {"Uintptr", Const, 5, ""},
+ {"Unalias", Func, 22, "func(t Type) Type"},
+ {"Union", Type, 18, ""},
+ {"Universe", Var, 5, ""},
+ {"Unsafe", Var, 5, ""},
+ {"UnsafePointer", Const, 5, ""},
+ {"UntypedBool", Const, 5, ""},
+ {"UntypedComplex", Const, 5, ""},
+ {"UntypedFloat", Const, 5, ""},
+ {"UntypedInt", Const, 5, ""},
+ {"UntypedNil", Const, 5, ""},
+ {"UntypedRune", Const, 5, ""},
+ {"UntypedString", Const, 5, ""},
+ {"Var", Type, 5, ""},
+ {"VarKind", Type, 25, ""},
+ {"WriteExpr", Func, 5, "func(buf *bytes.Buffer, x ast.Expr)"},
+ {"WriteSignature", Func, 5, "func(buf *bytes.Buffer, sig *Signature, qf Qualifier)"},
+ {"WriteType", Func, 5, "func(buf *bytes.Buffer, typ Type, qf Qualifier)"},
+ },
+ "go/version": {
+ {"Compare", Func, 22, "func(x string, y string) int"},
+ {"IsValid", Func, 22, "func(x string) bool"},
+ {"Lang", Func, 22, "func(x string) string"},
+ },
+ "hash": {
+ {"Cloner", Type, 25, ""},
+ {"Hash", Type, 0, ""},
+ {"Hash32", Type, 0, ""},
+ {"Hash64", Type, 0, ""},
+ {"XOF", Type, 25, ""},
+ },
+ "hash/adler32": {
+ {"Checksum", Func, 0, "func(data []byte) uint32"},
+ {"New", Func, 0, "func() hash.Hash32"},
+ {"Size", Const, 0, ""},
+ },
+ "hash/crc32": {
+ {"Castagnoli", Const, 0, ""},
+ {"Checksum", Func, 0, "func(data []byte, tab *Table) uint32"},
+ {"ChecksumIEEE", Func, 0, "func(data []byte) uint32"},
+ {"IEEE", Const, 0, ""},
+ {"IEEETable", Var, 0, ""},
+ {"Koopman", Const, 0, ""},
+ {"MakeTable", Func, 0, "func(poly uint32) *Table"},
+ {"New", Func, 0, "func(tab *Table) hash.Hash32"},
+ {"NewIEEE", Func, 0, "func() hash.Hash32"},
+ {"Size", Const, 0, ""},
+ {"Table", Type, 0, ""},
+ {"Update", Func, 0, "func(crc uint32, tab *Table, p []byte) uint32"},
+ },
+ "hash/crc64": {
+ {"Checksum", Func, 0, "func(data []byte, tab *Table) uint64"},
+ {"ECMA", Const, 0, ""},
+ {"ISO", Const, 0, ""},
+ {"MakeTable", Func, 0, "func(poly uint64) *Table"},
+ {"New", Func, 0, "func(tab *Table) hash.Hash64"},
+ {"Size", Const, 0, ""},
+ {"Table", Type, 0, ""},
+ {"Update", Func, 0, "func(crc uint64, tab *Table, p []byte) uint64"},
+ },
+ "hash/fnv": {
+ {"New128", Func, 9, "func() hash.Hash"},
+ {"New128a", Func, 9, "func() hash.Hash"},
+ {"New32", Func, 0, "func() hash.Hash32"},
+ {"New32a", Func, 0, "func() hash.Hash32"},
+ {"New64", Func, 0, "func() hash.Hash64"},
+ {"New64a", Func, 0, "func() hash.Hash64"},
+ },
+ "hash/maphash": {
+ {"(*Hash).BlockSize", Method, 14, ""},
+ {"(*Hash).Clone", Method, 25, ""},
+ {"(*Hash).Reset", Method, 14, ""},
+ {"(*Hash).Seed", Method, 14, ""},
+ {"(*Hash).SetSeed", Method, 14, ""},
+ {"(*Hash).Size", Method, 14, ""},
+ {"(*Hash).Sum", Method, 14, ""},
+ {"(*Hash).Sum64", Method, 14, ""},
+ {"(*Hash).Write", Method, 14, ""},
+ {"(*Hash).WriteByte", Method, 14, ""},
+ {"(*Hash).WriteString", Method, 14, ""},
+ {"Bytes", Func, 19, "func(seed Seed, b []byte) uint64"},
+ {"Comparable", Func, 24, "func[T comparable](seed Seed, v T) uint64"},
+ {"Hash", Type, 14, ""},
+ {"MakeSeed", Func, 14, "func() Seed"},
+ {"Seed", Type, 14, ""},
+ {"String", Func, 19, "func(seed Seed, s string) uint64"},
+ {"WriteComparable", Func, 24, "func[T comparable](h *Hash, x T)"},
+ },
+ "html": {
+ {"EscapeString", Func, 0, "func(s string) string"},
+ {"UnescapeString", Func, 0, "func(s string) string"},
+ },
+ "html/template": {
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Template).AddParseTree", Method, 0, ""},
+ {"(*Template).Clone", Method, 0, ""},
+ {"(*Template).DefinedTemplates", Method, 6, ""},
+ {"(*Template).Delims", Method, 0, ""},
+ {"(*Template).Execute", Method, 0, ""},
+ {"(*Template).ExecuteTemplate", Method, 0, ""},
+ {"(*Template).Funcs", Method, 0, ""},
+ {"(*Template).Lookup", Method, 0, ""},
+ {"(*Template).Name", Method, 0, ""},
+ {"(*Template).New", Method, 0, ""},
+ {"(*Template).Option", Method, 5, ""},
+ {"(*Template).Parse", Method, 0, ""},
+ {"(*Template).ParseFS", Method, 16, ""},
+ {"(*Template).ParseFiles", Method, 0, ""},
+ {"(*Template).ParseGlob", Method, 0, ""},
+ {"(*Template).Templates", Method, 0, ""},
+ {"CSS", Type, 0, ""},
+ {"ErrAmbigContext", Const, 0, ""},
+ {"ErrBadHTML", Const, 0, ""},
+ {"ErrBranchEnd", Const, 0, ""},
+ {"ErrEndContext", Const, 0, ""},
+ {"ErrJSTemplate", Const, 21, ""},
+ {"ErrNoSuchTemplate", Const, 0, ""},
+ {"ErrOutputContext", Const, 0, ""},
+ {"ErrPartialCharset", Const, 0, ""},
+ {"ErrPartialEscape", Const, 0, ""},
+ {"ErrPredefinedEscaper", Const, 9, ""},
+ {"ErrRangeLoopReentry", Const, 0, ""},
+ {"ErrSlashAmbig", Const, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Description", Field, 0, ""},
+ {"Error.ErrorCode", Field, 0, ""},
+ {"Error.Line", Field, 0, ""},
+ {"Error.Name", Field, 0, ""},
+ {"Error.Node", Field, 4, ""},
+ {"ErrorCode", Type, 0, ""},
+ {"FuncMap", Type, 0, ""},
+ {"HTML", Type, 0, ""},
+ {"HTMLAttr", Type, 0, ""},
+ {"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"HTMLEscapeString", Func, 0, "func(s string) string"},
+ {"HTMLEscaper", Func, 0, "func(args ...any) string"},
+ {"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
+ {"JS", Type, 0, ""},
+ {"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"JSEscapeString", Func, 0, "func(s string) string"},
+ {"JSEscaper", Func, 0, "func(args ...any) string"},
+ {"JSStr", Type, 0, ""},
+ {"Must", Func, 0, "func(t *Template, err error) *Template"},
+ {"New", Func, 0, "func(name string) *Template"},
+ {"OK", Const, 0, ""},
+ {"ParseFS", Func, 16, "func(fs fs.FS, patterns ...string) (*Template, error)"},
+ {"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
+ {"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
+ {"Srcset", Type, 10, ""},
+ {"Template", Type, 0, ""},
+ {"Template.Tree", Field, 2, ""},
+ {"URL", Type, 0, ""},
+ {"URLQueryEscaper", Func, 0, "func(args ...any) string"},
+ },
+ "image": {
+ {"(*Alpha).AlphaAt", Method, 4, ""},
+ {"(*Alpha).At", Method, 0, ""},
+ {"(*Alpha).Bounds", Method, 0, ""},
+ {"(*Alpha).ColorModel", Method, 0, ""},
+ {"(*Alpha).Opaque", Method, 0, ""},
+ {"(*Alpha).PixOffset", Method, 0, ""},
+ {"(*Alpha).RGBA64At", Method, 17, ""},
+ {"(*Alpha).Set", Method, 0, ""},
+ {"(*Alpha).SetAlpha", Method, 0, ""},
+ {"(*Alpha).SetRGBA64", Method, 17, ""},
+ {"(*Alpha).SubImage", Method, 0, ""},
+ {"(*Alpha16).Alpha16At", Method, 4, ""},
+ {"(*Alpha16).At", Method, 0, ""},
+ {"(*Alpha16).Bounds", Method, 0, ""},
+ {"(*Alpha16).ColorModel", Method, 0, ""},
+ {"(*Alpha16).Opaque", Method, 0, ""},
+ {"(*Alpha16).PixOffset", Method, 0, ""},
+ {"(*Alpha16).RGBA64At", Method, 17, ""},
+ {"(*Alpha16).Set", Method, 0, ""},
+ {"(*Alpha16).SetAlpha16", Method, 0, ""},
+ {"(*Alpha16).SetRGBA64", Method, 17, ""},
+ {"(*Alpha16).SubImage", Method, 0, ""},
+ {"(*CMYK).At", Method, 5, ""},
+ {"(*CMYK).Bounds", Method, 5, ""},
+ {"(*CMYK).CMYKAt", Method, 5, ""},
+ {"(*CMYK).ColorModel", Method, 5, ""},
+ {"(*CMYK).Opaque", Method, 5, ""},
+ {"(*CMYK).PixOffset", Method, 5, ""},
+ {"(*CMYK).RGBA64At", Method, 17, ""},
+ {"(*CMYK).Set", Method, 5, ""},
+ {"(*CMYK).SetCMYK", Method, 5, ""},
+ {"(*CMYK).SetRGBA64", Method, 17, ""},
+ {"(*CMYK).SubImage", Method, 5, ""},
+ {"(*Gray).At", Method, 0, ""},
+ {"(*Gray).Bounds", Method, 0, ""},
+ {"(*Gray).ColorModel", Method, 0, ""},
+ {"(*Gray).GrayAt", Method, 4, ""},
+ {"(*Gray).Opaque", Method, 0, ""},
+ {"(*Gray).PixOffset", Method, 0, ""},
+ {"(*Gray).RGBA64At", Method, 17, ""},
+ {"(*Gray).Set", Method, 0, ""},
+ {"(*Gray).SetGray", Method, 0, ""},
+ {"(*Gray).SetRGBA64", Method, 17, ""},
+ {"(*Gray).SubImage", Method, 0, ""},
+ {"(*Gray16).At", Method, 0, ""},
+ {"(*Gray16).Bounds", Method, 0, ""},
+ {"(*Gray16).ColorModel", Method, 0, ""},
+ {"(*Gray16).Gray16At", Method, 4, ""},
+ {"(*Gray16).Opaque", Method, 0, ""},
+ {"(*Gray16).PixOffset", Method, 0, ""},
+ {"(*Gray16).RGBA64At", Method, 17, ""},
+ {"(*Gray16).Set", Method, 0, ""},
+ {"(*Gray16).SetGray16", Method, 0, ""},
+ {"(*Gray16).SetRGBA64", Method, 17, ""},
+ {"(*Gray16).SubImage", Method, 0, ""},
+ {"(*NRGBA).At", Method, 0, ""},
+ {"(*NRGBA).Bounds", Method, 0, ""},
+ {"(*NRGBA).ColorModel", Method, 0, ""},
+ {"(*NRGBA).NRGBAAt", Method, 4, ""},
+ {"(*NRGBA).Opaque", Method, 0, ""},
+ {"(*NRGBA).PixOffset", Method, 0, ""},
+ {"(*NRGBA).RGBA64At", Method, 17, ""},
+ {"(*NRGBA).Set", Method, 0, ""},
+ {"(*NRGBA).SetNRGBA", Method, 0, ""},
+ {"(*NRGBA).SetRGBA64", Method, 17, ""},
+ {"(*NRGBA).SubImage", Method, 0, ""},
+ {"(*NRGBA64).At", Method, 0, ""},
+ {"(*NRGBA64).Bounds", Method, 0, ""},
+ {"(*NRGBA64).ColorModel", Method, 0, ""},
+ {"(*NRGBA64).NRGBA64At", Method, 4, ""},
+ {"(*NRGBA64).Opaque", Method, 0, ""},
+ {"(*NRGBA64).PixOffset", Method, 0, ""},
+ {"(*NRGBA64).RGBA64At", Method, 17, ""},
+ {"(*NRGBA64).Set", Method, 0, ""},
+ {"(*NRGBA64).SetNRGBA64", Method, 0, ""},
+ {"(*NRGBA64).SetRGBA64", Method, 17, ""},
+ {"(*NRGBA64).SubImage", Method, 0, ""},
+ {"(*NYCbCrA).AOffset", Method, 6, ""},
+ {"(*NYCbCrA).At", Method, 6, ""},
+ {"(*NYCbCrA).Bounds", Method, 6, ""},
+ {"(*NYCbCrA).COffset", Method, 6, ""},
+ {"(*NYCbCrA).ColorModel", Method, 6, ""},
+ {"(*NYCbCrA).NYCbCrAAt", Method, 6, ""},
+ {"(*NYCbCrA).Opaque", Method, 6, ""},
+ {"(*NYCbCrA).RGBA64At", Method, 17, ""},
+ {"(*NYCbCrA).SubImage", Method, 6, ""},
+ {"(*NYCbCrA).YCbCrAt", Method, 6, ""},
+ {"(*NYCbCrA).YOffset", Method, 6, ""},
+ {"(*Paletted).At", Method, 0, ""},
+ {"(*Paletted).Bounds", Method, 0, ""},
+ {"(*Paletted).ColorIndexAt", Method, 0, ""},
+ {"(*Paletted).ColorModel", Method, 0, ""},
+ {"(*Paletted).Opaque", Method, 0, ""},
+ {"(*Paletted).PixOffset", Method, 0, ""},
+ {"(*Paletted).RGBA64At", Method, 17, ""},
+ {"(*Paletted).Set", Method, 0, ""},
+ {"(*Paletted).SetColorIndex", Method, 0, ""},
+ {"(*Paletted).SetRGBA64", Method, 17, ""},
+ {"(*Paletted).SubImage", Method, 0, ""},
+ {"(*RGBA).At", Method, 0, ""},
+ {"(*RGBA).Bounds", Method, 0, ""},
+ {"(*RGBA).ColorModel", Method, 0, ""},
+ {"(*RGBA).Opaque", Method, 0, ""},
+ {"(*RGBA).PixOffset", Method, 0, ""},
+ {"(*RGBA).RGBA64At", Method, 17, ""},
+ {"(*RGBA).RGBAAt", Method, 4, ""},
+ {"(*RGBA).Set", Method, 0, ""},
+ {"(*RGBA).SetRGBA", Method, 0, ""},
+ {"(*RGBA).SetRGBA64", Method, 17, ""},
+ {"(*RGBA).SubImage", Method, 0, ""},
+ {"(*RGBA64).At", Method, 0, ""},
+ {"(*RGBA64).Bounds", Method, 0, ""},
+ {"(*RGBA64).ColorModel", Method, 0, ""},
+ {"(*RGBA64).Opaque", Method, 0, ""},
+ {"(*RGBA64).PixOffset", Method, 0, ""},
+ {"(*RGBA64).RGBA64At", Method, 4, ""},
+ {"(*RGBA64).Set", Method, 0, ""},
+ {"(*RGBA64).SetRGBA64", Method, 0, ""},
+ {"(*RGBA64).SubImage", Method, 0, ""},
+ {"(*Uniform).At", Method, 0, ""},
+ {"(*Uniform).Bounds", Method, 0, ""},
+ {"(*Uniform).ColorModel", Method, 0, ""},
+ {"(*Uniform).Convert", Method, 0, ""},
+ {"(*Uniform).Opaque", Method, 0, ""},
+ {"(*Uniform).RGBA", Method, 0, ""},
+ {"(*Uniform).RGBA64At", Method, 17, ""},
+ {"(*YCbCr).At", Method, 0, ""},
+ {"(*YCbCr).Bounds", Method, 0, ""},
+ {"(*YCbCr).COffset", Method, 0, ""},
+ {"(*YCbCr).ColorModel", Method, 0, ""},
+ {"(*YCbCr).Opaque", Method, 0, ""},
+ {"(*YCbCr).RGBA64At", Method, 17, ""},
+ {"(*YCbCr).SubImage", Method, 0, ""},
+ {"(*YCbCr).YCbCrAt", Method, 4, ""},
+ {"(*YCbCr).YOffset", Method, 0, ""},
+ {"(Point).Add", Method, 0, ""},
+ {"(Point).Div", Method, 0, ""},
+ {"(Point).Eq", Method, 0, ""},
+ {"(Point).In", Method, 0, ""},
+ {"(Point).Mod", Method, 0, ""},
+ {"(Point).Mul", Method, 0, ""},
+ {"(Point).String", Method, 0, ""},
+ {"(Point).Sub", Method, 0, ""},
+ {"(Rectangle).Add", Method, 0, ""},
+ {"(Rectangle).At", Method, 5, ""},
+ {"(Rectangle).Bounds", Method, 5, ""},
+ {"(Rectangle).Canon", Method, 0, ""},
+ {"(Rectangle).ColorModel", Method, 5, ""},
+ {"(Rectangle).Dx", Method, 0, ""},
+ {"(Rectangle).Dy", Method, 0, ""},
+ {"(Rectangle).Empty", Method, 0, ""},
+ {"(Rectangle).Eq", Method, 0, ""},
+ {"(Rectangle).In", Method, 0, ""},
+ {"(Rectangle).Inset", Method, 0, ""},
+ {"(Rectangle).Intersect", Method, 0, ""},
+ {"(Rectangle).Overlaps", Method, 0, ""},
+ {"(Rectangle).RGBA64At", Method, 17, ""},
+ {"(Rectangle).Size", Method, 0, ""},
+ {"(Rectangle).String", Method, 0, ""},
+ {"(Rectangle).Sub", Method, 0, ""},
+ {"(Rectangle).Union", Method, 0, ""},
+ {"(YCbCrSubsampleRatio).String", Method, 0, ""},
+ {"Alpha", Type, 0, ""},
+ {"Alpha.Pix", Field, 0, ""},
+ {"Alpha.Rect", Field, 0, ""},
+ {"Alpha.Stride", Field, 0, ""},
+ {"Alpha16", Type, 0, ""},
+ {"Alpha16.Pix", Field, 0, ""},
+ {"Alpha16.Rect", Field, 0, ""},
+ {"Alpha16.Stride", Field, 0, ""},
+ {"Black", Var, 0, ""},
+ {"CMYK", Type, 5, ""},
+ {"CMYK.Pix", Field, 5, ""},
+ {"CMYK.Rect", Field, 5, ""},
+ {"CMYK.Stride", Field, 5, ""},
+ {"Config", Type, 0, ""},
+ {"Config.ColorModel", Field, 0, ""},
+ {"Config.Height", Field, 0, ""},
+ {"Config.Width", Field, 0, ""},
+ {"Decode", Func, 0, "func(r io.Reader) (Image, string, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (Config, string, error)"},
+ {"ErrFormat", Var, 0, ""},
+ {"Gray", Type, 0, ""},
+ {"Gray.Pix", Field, 0, ""},
+ {"Gray.Rect", Field, 0, ""},
+ {"Gray.Stride", Field, 0, ""},
+ {"Gray16", Type, 0, ""},
+ {"Gray16.Pix", Field, 0, ""},
+ {"Gray16.Rect", Field, 0, ""},
+ {"Gray16.Stride", Field, 0, ""},
+ {"Image", Type, 0, ""},
+ {"NRGBA", Type, 0, ""},
+ {"NRGBA.Pix", Field, 0, ""},
+ {"NRGBA.Rect", Field, 0, ""},
+ {"NRGBA.Stride", Field, 0, ""},
+ {"NRGBA64", Type, 0, ""},
+ {"NRGBA64.Pix", Field, 0, ""},
+ {"NRGBA64.Rect", Field, 0, ""},
+ {"NRGBA64.Stride", Field, 0, ""},
+ {"NYCbCrA", Type, 6, ""},
+ {"NYCbCrA.A", Field, 6, ""},
+ {"NYCbCrA.AStride", Field, 6, ""},
+ {"NYCbCrA.YCbCr", Field, 6, ""},
+ {"NewAlpha", Func, 0, "func(r Rectangle) *Alpha"},
+ {"NewAlpha16", Func, 0, "func(r Rectangle) *Alpha16"},
+ {"NewCMYK", Func, 5, "func(r Rectangle) *CMYK"},
+ {"NewGray", Func, 0, "func(r Rectangle) *Gray"},
+ {"NewGray16", Func, 0, "func(r Rectangle) *Gray16"},
+ {"NewNRGBA", Func, 0, "func(r Rectangle) *NRGBA"},
+ {"NewNRGBA64", Func, 0, "func(r Rectangle) *NRGBA64"},
+ {"NewNYCbCrA", Func, 6, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *NYCbCrA"},
+ {"NewPaletted", Func, 0, "func(r Rectangle, p color.Palette) *Paletted"},
+ {"NewRGBA", Func, 0, "func(r Rectangle) *RGBA"},
+ {"NewRGBA64", Func, 0, "func(r Rectangle) *RGBA64"},
+ {"NewUniform", Func, 0, "func(c color.Color) *Uniform"},
+ {"NewYCbCr", Func, 0, "func(r Rectangle, subsampleRatio YCbCrSubsampleRatio) *YCbCr"},
+ {"Opaque", Var, 0, ""},
+ {"Paletted", Type, 0, ""},
+ {"Paletted.Palette", Field, 0, ""},
+ {"Paletted.Pix", Field, 0, ""},
+ {"Paletted.Rect", Field, 0, ""},
+ {"Paletted.Stride", Field, 0, ""},
+ {"PalettedImage", Type, 0, ""},
+ {"Point", Type, 0, ""},
+ {"Point.X", Field, 0, ""},
+ {"Point.Y", Field, 0, ""},
+ {"Pt", Func, 0, "func(X int, Y int) Point"},
+ {"RGBA", Type, 0, ""},
+ {"RGBA.Pix", Field, 0, ""},
+ {"RGBA.Rect", Field, 0, ""},
+ {"RGBA.Stride", Field, 0, ""},
+ {"RGBA64", Type, 0, ""},
+ {"RGBA64.Pix", Field, 0, ""},
+ {"RGBA64.Rect", Field, 0, ""},
+ {"RGBA64.Stride", Field, 0, ""},
+ {"RGBA64Image", Type, 17, ""},
+ {"Rect", Func, 0, "func(x0 int, y0 int, x1 int, y1 int) Rectangle"},
+ {"Rectangle", Type, 0, ""},
+ {"Rectangle.Max", Field, 0, ""},
+ {"Rectangle.Min", Field, 0, ""},
+ {"RegisterFormat", Func, 0, "func(name string, magic string, decode func(io.Reader) (Image, error), decodeConfig func(io.Reader) (Config, error))"},
+ {"Transparent", Var, 0, ""},
+ {"Uniform", Type, 0, ""},
+ {"Uniform.C", Field, 0, ""},
+ {"White", Var, 0, ""},
+ {"YCbCr", Type, 0, ""},
+ {"YCbCr.CStride", Field, 0, ""},
+ {"YCbCr.Cb", Field, 0, ""},
+ {"YCbCr.Cr", Field, 0, ""},
+ {"YCbCr.Rect", Field, 0, ""},
+ {"YCbCr.SubsampleRatio", Field, 0, ""},
+ {"YCbCr.Y", Field, 0, ""},
+ {"YCbCr.YStride", Field, 0, ""},
+ {"YCbCrSubsampleRatio", Type, 0, ""},
+ {"YCbCrSubsampleRatio410", Const, 5, ""},
+ {"YCbCrSubsampleRatio411", Const, 5, ""},
+ {"YCbCrSubsampleRatio420", Const, 0, ""},
+ {"YCbCrSubsampleRatio422", Const, 0, ""},
+ {"YCbCrSubsampleRatio440", Const, 1, ""},
+ {"YCbCrSubsampleRatio444", Const, 0, ""},
+ {"ZP", Var, 0, ""},
+ {"ZR", Var, 0, ""},
+ },
+ "image/color": {
+ {"(Alpha).RGBA", Method, 0, ""},
+ {"(Alpha16).RGBA", Method, 0, ""},
+ {"(CMYK).RGBA", Method, 5, ""},
+ {"(Gray).RGBA", Method, 0, ""},
+ {"(Gray16).RGBA", Method, 0, ""},
+ {"(NRGBA).RGBA", Method, 0, ""},
+ {"(NRGBA64).RGBA", Method, 0, ""},
+ {"(NYCbCrA).RGBA", Method, 6, ""},
+ {"(Palette).Convert", Method, 0, ""},
+ {"(Palette).Index", Method, 0, ""},
+ {"(RGBA).RGBA", Method, 0, ""},
+ {"(RGBA64).RGBA", Method, 0, ""},
+ {"(YCbCr).RGBA", Method, 0, ""},
+ {"Alpha", Type, 0, ""},
+ {"Alpha.A", Field, 0, ""},
+ {"Alpha16", Type, 0, ""},
+ {"Alpha16.A", Field, 0, ""},
+ {"Alpha16Model", Var, 0, ""},
+ {"AlphaModel", Var, 0, ""},
+ {"Black", Var, 0, ""},
+ {"CMYK", Type, 5, ""},
+ {"CMYK.C", Field, 5, ""},
+ {"CMYK.K", Field, 5, ""},
+ {"CMYK.M", Field, 5, ""},
+ {"CMYK.Y", Field, 5, ""},
+ {"CMYKModel", Var, 5, ""},
+ {"CMYKToRGB", Func, 5, "func(c uint8, m uint8, y uint8, k uint8) (uint8, uint8, uint8)"},
+ {"Color", Type, 0, ""},
+ {"Gray", Type, 0, ""},
+ {"Gray.Y", Field, 0, ""},
+ {"Gray16", Type, 0, ""},
+ {"Gray16.Y", Field, 0, ""},
+ {"Gray16Model", Var, 0, ""},
+ {"GrayModel", Var, 0, ""},
+ {"Model", Type, 0, ""},
+ {"ModelFunc", Func, 0, "func(f func(Color) Color) Model"},
+ {"NRGBA", Type, 0, ""},
+ {"NRGBA.A", Field, 0, ""},
+ {"NRGBA.B", Field, 0, ""},
+ {"NRGBA.G", Field, 0, ""},
+ {"NRGBA.R", Field, 0, ""},
+ {"NRGBA64", Type, 0, ""},
+ {"NRGBA64.A", Field, 0, ""},
+ {"NRGBA64.B", Field, 0, ""},
+ {"NRGBA64.G", Field, 0, ""},
+ {"NRGBA64.R", Field, 0, ""},
+ {"NRGBA64Model", Var, 0, ""},
+ {"NRGBAModel", Var, 0, ""},
+ {"NYCbCrA", Type, 6, ""},
+ {"NYCbCrA.A", Field, 6, ""},
+ {"NYCbCrA.YCbCr", Field, 6, ""},
+ {"NYCbCrAModel", Var, 6, ""},
+ {"Opaque", Var, 0, ""},
+ {"Palette", Type, 0, ""},
+ {"RGBA", Type, 0, ""},
+ {"RGBA.A", Field, 0, ""},
+ {"RGBA.B", Field, 0, ""},
+ {"RGBA.G", Field, 0, ""},
+ {"RGBA.R", Field, 0, ""},
+ {"RGBA64", Type, 0, ""},
+ {"RGBA64.A", Field, 0, ""},
+ {"RGBA64.B", Field, 0, ""},
+ {"RGBA64.G", Field, 0, ""},
+ {"RGBA64.R", Field, 0, ""},
+ {"RGBA64Model", Var, 0, ""},
+ {"RGBAModel", Var, 0, ""},
+ {"RGBToCMYK", Func, 5, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8, uint8)"},
+ {"RGBToYCbCr", Func, 0, "func(r uint8, g uint8, b uint8) (uint8, uint8, uint8)"},
+ {"Transparent", Var, 0, ""},
+ {"White", Var, 0, ""},
+ {"YCbCr", Type, 0, ""},
+ {"YCbCr.Cb", Field, 0, ""},
+ {"YCbCr.Cr", Field, 0, ""},
+ {"YCbCr.Y", Field, 0, ""},
+ {"YCbCrModel", Var, 0, ""},
+ {"YCbCrToRGB", Func, 0, "func(y uint8, cb uint8, cr uint8) (uint8, uint8, uint8)"},
+ },
+ "image/color/palette": {
+ {"Plan9", Var, 2, ""},
+ {"WebSafe", Var, 2, ""},
+ },
+ "image/draw": {
+ {"(Op).Draw", Method, 2, ""},
+ {"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"},
+ {"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"},
+ {"Drawer", Type, 2, ""},
+ {"FloydSteinberg", Var, 2, ""},
+ {"Image", Type, 0, ""},
+ {"Op", Type, 0, ""},
+ {"Over", Const, 0, ""},
+ {"Quantizer", Type, 2, ""},
+ {"RGBA64Image", Type, 17, ""},
+ {"Src", Const, 0, ""},
+ },
+ "image/gif": {
+ {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+ {"DecodeAll", Func, 0, "func(r io.Reader) (*GIF, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+ {"DisposalBackground", Const, 5, ""},
+ {"DisposalNone", Const, 5, ""},
+ {"DisposalPrevious", Const, 5, ""},
+ {"Encode", Func, 2, "func(w io.Writer, m image.Image, o *Options) error"},
+ {"EncodeAll", Func, 2, "func(w io.Writer, g *GIF) error"},
+ {"GIF", Type, 0, ""},
+ {"GIF.BackgroundIndex", Field, 5, ""},
+ {"GIF.Config", Field, 5, ""},
+ {"GIF.Delay", Field, 0, ""},
+ {"GIF.Disposal", Field, 5, ""},
+ {"GIF.Image", Field, 0, ""},
+ {"GIF.LoopCount", Field, 0, ""},
+ {"Options", Type, 2, ""},
+ {"Options.Drawer", Field, 2, ""},
+ {"Options.NumColors", Field, 2, ""},
+ {"Options.Quantizer", Field, 2, ""},
+ },
+ "image/jpeg": {
+ {"(FormatError).Error", Method, 0, ""},
+ {"(UnsupportedError).Error", Method, 0, ""},
+ {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+ {"DefaultQuality", Const, 0, ""},
+ {"Encode", Func, 0, "func(w io.Writer, m image.Image, o *Options) error"},
+ {"FormatError", Type, 0, ""},
+ {"Options", Type, 0, ""},
+ {"Options.Quality", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"UnsupportedError", Type, 0, ""},
+ },
+ "image/png": {
+ {"(*Encoder).Encode", Method, 4, ""},
+ {"(FormatError).Error", Method, 0, ""},
+ {"(UnsupportedError).Error", Method, 0, ""},
+ {"BestCompression", Const, 4, ""},
+ {"BestSpeed", Const, 4, ""},
+ {"CompressionLevel", Type, 4, ""},
+ {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"},
+ {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"},
+ {"DefaultCompression", Const, 4, ""},
+ {"Encode", Func, 0, "func(w io.Writer, m image.Image) error"},
+ {"Encoder", Type, 4, ""},
+ {"Encoder.BufferPool", Field, 9, ""},
+ {"Encoder.CompressionLevel", Field, 4, ""},
+ {"EncoderBuffer", Type, 9, ""},
+ {"EncoderBufferPool", Type, 9, ""},
+ {"FormatError", Type, 0, ""},
+ {"NoCompression", Const, 4, ""},
+ {"UnsupportedError", Type, 0, ""},
+ },
+ "index/suffixarray": {
+ {"(*Index).Bytes", Method, 0, ""},
+ {"(*Index).FindAllIndex", Method, 0, ""},
+ {"(*Index).Lookup", Method, 0, ""},
+ {"(*Index).Read", Method, 0, ""},
+ {"(*Index).Write", Method, 0, ""},
+ {"Index", Type, 0, ""},
+ {"New", Func, 0, "func(data []byte) *Index"},
+ },
+ "io": {
+ {"(*LimitedReader).Read", Method, 0, ""},
+ {"(*OffsetWriter).Seek", Method, 20, ""},
+ {"(*OffsetWriter).Write", Method, 20, ""},
+ {"(*OffsetWriter).WriteAt", Method, 20, ""},
+ {"(*PipeReader).Close", Method, 0, ""},
+ {"(*PipeReader).CloseWithError", Method, 0, ""},
+ {"(*PipeReader).Read", Method, 0, ""},
+ {"(*PipeWriter).Close", Method, 0, ""},
+ {"(*PipeWriter).CloseWithError", Method, 0, ""},
+ {"(*PipeWriter).Write", Method, 0, ""},
+ {"(*SectionReader).Outer", Method, 22, ""},
+ {"(*SectionReader).Read", Method, 0, ""},
+ {"(*SectionReader).ReadAt", Method, 0, ""},
+ {"(*SectionReader).Seek", Method, 0, ""},
+ {"(*SectionReader).Size", Method, 0, ""},
+ {"ByteReader", Type, 0, ""},
+ {"ByteScanner", Type, 0, ""},
+ {"ByteWriter", Type, 1, ""},
+ {"Closer", Type, 0, ""},
+ {"Copy", Func, 0, "func(dst Writer, src Reader) (written int64, err error)"},
+ {"CopyBuffer", Func, 5, "func(dst Writer, src Reader, buf []byte) (written int64, err error)"},
+ {"CopyN", Func, 0, "func(dst Writer, src Reader, n int64) (written int64, err error)"},
+ {"Discard", Var, 16, ""},
+ {"EOF", Var, 0, ""},
+ {"ErrClosedPipe", Var, 0, ""},
+ {"ErrNoProgress", Var, 1, ""},
+ {"ErrShortBuffer", Var, 0, ""},
+ {"ErrShortWrite", Var, 0, ""},
+ {"ErrUnexpectedEOF", Var, 0, ""},
+ {"LimitReader", Func, 0, "func(r Reader, n int64) Reader"},
+ {"LimitedReader", Type, 0, ""},
+ {"LimitedReader.N", Field, 0, ""},
+ {"LimitedReader.R", Field, 0, ""},
+ {"MultiReader", Func, 0, "func(readers ...Reader) Reader"},
+ {"MultiWriter", Func, 0, "func(writers ...Writer) Writer"},
+ {"NewOffsetWriter", Func, 20, "func(w WriterAt, off int64) *OffsetWriter"},
+ {"NewSectionReader", Func, 0, "func(r ReaderAt, off int64, n int64) *SectionReader"},
+ {"NopCloser", Func, 16, "func(r Reader) ReadCloser"},
+ {"OffsetWriter", Type, 20, ""},
+ {"Pipe", Func, 0, "func() (*PipeReader, *PipeWriter)"},
+ {"PipeReader", Type, 0, ""},
+ {"PipeWriter", Type, 0, ""},
+ {"ReadAll", Func, 16, "func(r Reader) ([]byte, error)"},
+ {"ReadAtLeast", Func, 0, "func(r Reader, buf []byte, min int) (n int, err error)"},
+ {"ReadCloser", Type, 0, ""},
+ {"ReadFull", Func, 0, "func(r Reader, buf []byte) (n int, err error)"},
+ {"ReadSeekCloser", Type, 16, ""},
+ {"ReadSeeker", Type, 0, ""},
+ {"ReadWriteCloser", Type, 0, ""},
+ {"ReadWriteSeeker", Type, 0, ""},
+ {"ReadWriter", Type, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"ReaderAt", Type, 0, ""},
+ {"ReaderFrom", Type, 0, ""},
+ {"RuneReader", Type, 0, ""},
+ {"RuneScanner", Type, 0, ""},
+ {"SectionReader", Type, 0, ""},
+ {"SeekCurrent", Const, 7, ""},
+ {"SeekEnd", Const, 7, ""},
+ {"SeekStart", Const, 7, ""},
+ {"Seeker", Type, 0, ""},
+ {"StringWriter", Type, 12, ""},
+ {"TeeReader", Func, 0, "func(r Reader, w Writer) Reader"},
+ {"WriteCloser", Type, 0, ""},
+ {"WriteSeeker", Type, 0, ""},
+ {"WriteString", Func, 0, "func(w Writer, s string) (n int, err error)"},
+ {"Writer", Type, 0, ""},
+ {"WriterAt", Type, 0, ""},
+ {"WriterTo", Type, 0, ""},
+ },
+ "io/fs": {
+ {"(*PathError).Error", Method, 16, ""},
+ {"(*PathError).Timeout", Method, 16, ""},
+ {"(*PathError).Unwrap", Method, 16, ""},
+ {"(FileMode).IsDir", Method, 16, ""},
+ {"(FileMode).IsRegular", Method, 16, ""},
+ {"(FileMode).Perm", Method, 16, ""},
+ {"(FileMode).String", Method, 16, ""},
+ {"(FileMode).Type", Method, 16, ""},
+ {"DirEntry", Type, 16, ""},
+ {"ErrClosed", Var, 16, ""},
+ {"ErrExist", Var, 16, ""},
+ {"ErrInvalid", Var, 16, ""},
+ {"ErrNotExist", Var, 16, ""},
+ {"ErrPermission", Var, 16, ""},
+ {"FS", Type, 16, ""},
+ {"File", Type, 16, ""},
+ {"FileInfo", Type, 16, ""},
+ {"FileInfoToDirEntry", Func, 17, "func(info FileInfo) DirEntry"},
+ {"FileMode", Type, 16, ""},
+ {"FormatDirEntry", Func, 21, "func(dir DirEntry) string"},
+ {"FormatFileInfo", Func, 21, "func(info FileInfo) string"},
+ {"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"},
+ {"GlobFS", Type, 16, ""},
+ {"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"},
+ {"ModeAppend", Const, 16, ""},
+ {"ModeCharDevice", Const, 16, ""},
+ {"ModeDevice", Const, 16, ""},
+ {"ModeDir", Const, 16, ""},
+ {"ModeExclusive", Const, 16, ""},
+ {"ModeIrregular", Const, 16, ""},
+ {"ModeNamedPipe", Const, 16, ""},
+ {"ModePerm", Const, 16, ""},
+ {"ModeSetgid", Const, 16, ""},
+ {"ModeSetuid", Const, 16, ""},
+ {"ModeSocket", Const, 16, ""},
+ {"ModeSticky", Const, 16, ""},
+ {"ModeSymlink", Const, 16, ""},
+ {"ModeTemporary", Const, 16, ""},
+ {"ModeType", Const, 16, ""},
+ {"PathError", Type, 16, ""},
+ {"PathError.Err", Field, 16, ""},
+ {"PathError.Op", Field, 16, ""},
+ {"PathError.Path", Field, 16, ""},
+ {"ReadDir", Func, 16, "func(fsys FS, name string) ([]DirEntry, error)"},
+ {"ReadDirFS", Type, 16, ""},
+ {"ReadDirFile", Type, 16, ""},
+ {"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"},
+ {"ReadFileFS", Type, 16, ""},
+ {"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"},
+ {"ReadLinkFS", Type, 25, ""},
+ {"SkipAll", Var, 20, ""},
+ {"SkipDir", Var, 16, ""},
+ {"Stat", Func, 16, "func(fsys FS, name string) (FileInfo, error)"},
+ {"StatFS", Type, 16, ""},
+ {"Sub", Func, 16, "func(fsys FS, dir string) (FS, error)"},
+ {"SubFS", Type, 16, ""},
+ {"ValidPath", Func, 16, "func(name string) bool"},
+ {"WalkDir", Func, 16, "func(fsys FS, root string, fn WalkDirFunc) error"},
+ {"WalkDirFunc", Type, 16, ""},
+ },
+ "io/ioutil": {
+ {"Discard", Var, 0, ""},
+ {"NopCloser", Func, 0, "func(r io.Reader) io.ReadCloser"},
+ {"ReadAll", Func, 0, "func(r io.Reader) ([]byte, error)"},
+ {"ReadDir", Func, 0, "func(dirname string) ([]fs.FileInfo, error)"},
+ {"ReadFile", Func, 0, "func(filename string) ([]byte, error)"},
+ {"TempDir", Func, 0, "func(dir string, pattern string) (name string, err error)"},
+ {"TempFile", Func, 0, "func(dir string, pattern string) (f *os.File, err error)"},
+ {"WriteFile", Func, 0, "func(filename string, data []byte, perm fs.FileMode) error"},
+ },
+ "iter": {
+ {"Pull", Func, 23, "func[V any](seq Seq[V]) (next func() (V, bool), stop func())"},
+ {"Pull2", Func, 23, "func[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func())"},
+ {"Seq", Type, 23, ""},
+ {"Seq2", Type, 23, ""},
+ },
+ "log": {
+ {"(*Logger).Fatal", Method, 0, ""},
+ {"(*Logger).Fatalf", Method, 0, ""},
+ {"(*Logger).Fatalln", Method, 0, ""},
+ {"(*Logger).Flags", Method, 0, ""},
+ {"(*Logger).Output", Method, 0, ""},
+ {"(*Logger).Panic", Method, 0, ""},
+ {"(*Logger).Panicf", Method, 0, ""},
+ {"(*Logger).Panicln", Method, 0, ""},
+ {"(*Logger).Prefix", Method, 0, ""},
+ {"(*Logger).Print", Method, 0, ""},
+ {"(*Logger).Printf", Method, 0, ""},
+ {"(*Logger).Println", Method, 0, ""},
+ {"(*Logger).SetFlags", Method, 0, ""},
+ {"(*Logger).SetOutput", Method, 5, ""},
+ {"(*Logger).SetPrefix", Method, 0, ""},
+ {"(*Logger).Writer", Method, 12, ""},
+ {"Default", Func, 16, "func() *Logger"},
+ {"Fatal", Func, 0, "func(v ...any)"},
+ {"Fatalf", Func, 0, "func(format string, v ...any)"},
+ {"Fatalln", Func, 0, "func(v ...any)"},
+ {"Flags", Func, 0, "func() int"},
+ {"LUTC", Const, 5, ""},
+ {"Ldate", Const, 0, ""},
+ {"Llongfile", Const, 0, ""},
+ {"Lmicroseconds", Const, 0, ""},
+ {"Lmsgprefix", Const, 14, ""},
+ {"Logger", Type, 0, ""},
+ {"Lshortfile", Const, 0, ""},
+ {"LstdFlags", Const, 0, ""},
+ {"Ltime", Const, 0, ""},
+ {"New", Func, 0, "func(out io.Writer, prefix string, flag int) *Logger"},
+ {"Output", Func, 5, "func(calldepth int, s string) error"},
+ {"Panic", Func, 0, "func(v ...any)"},
+ {"Panicf", Func, 0, "func(format string, v ...any)"},
+ {"Panicln", Func, 0, "func(v ...any)"},
+ {"Prefix", Func, 0, "func() string"},
+ {"Print", Func, 0, "func(v ...any)"},
+ {"Printf", Func, 0, "func(format string, v ...any)"},
+ {"Println", Func, 0, "func(v ...any)"},
+ {"SetFlags", Func, 0, "func(flag int)"},
+ {"SetOutput", Func, 0, "func(w io.Writer)"},
+ {"SetPrefix", Func, 0, "func(prefix string)"},
+ {"Writer", Func, 13, "func() io.Writer"},
+ },
+ "log/slog": {
+ {"(*JSONHandler).Enabled", Method, 21, ""},
+ {"(*JSONHandler).Handle", Method, 21, ""},
+ {"(*JSONHandler).WithAttrs", Method, 21, ""},
+ {"(*JSONHandler).WithGroup", Method, 21, ""},
+ {"(*Level).UnmarshalJSON", Method, 21, ""},
+ {"(*Level).UnmarshalText", Method, 21, ""},
+ {"(*LevelVar).AppendText", Method, 24, ""},
+ {"(*LevelVar).Level", Method, 21, ""},
+ {"(*LevelVar).MarshalText", Method, 21, ""},
+ {"(*LevelVar).Set", Method, 21, ""},
+ {"(*LevelVar).String", Method, 21, ""},
+ {"(*LevelVar).UnmarshalText", Method, 21, ""},
+ {"(*Logger).Debug", Method, 21, ""},
+ {"(*Logger).DebugContext", Method, 21, ""},
+ {"(*Logger).Enabled", Method, 21, ""},
+ {"(*Logger).Error", Method, 21, ""},
+ {"(*Logger).ErrorContext", Method, 21, ""},
+ {"(*Logger).Handler", Method, 21, ""},
+ {"(*Logger).Info", Method, 21, ""},
+ {"(*Logger).InfoContext", Method, 21, ""},
+ {"(*Logger).Log", Method, 21, ""},
+ {"(*Logger).LogAttrs", Method, 21, ""},
+ {"(*Logger).Warn", Method, 21, ""},
+ {"(*Logger).WarnContext", Method, 21, ""},
+ {"(*Logger).With", Method, 21, ""},
+ {"(*Logger).WithGroup", Method, 21, ""},
+ {"(*Record).Add", Method, 21, ""},
+ {"(*Record).AddAttrs", Method, 21, ""},
+ {"(*TextHandler).Enabled", Method, 21, ""},
+ {"(*TextHandler).Handle", Method, 21, ""},
+ {"(*TextHandler).WithAttrs", Method, 21, ""},
+ {"(*TextHandler).WithGroup", Method, 21, ""},
+ {"(Attr).Equal", Method, 21, ""},
+ {"(Attr).String", Method, 21, ""},
+ {"(Kind).String", Method, 21, ""},
+ {"(Level).AppendText", Method, 24, ""},
+ {"(Level).Level", Method, 21, ""},
+ {"(Level).MarshalJSON", Method, 21, ""},
+ {"(Level).MarshalText", Method, 21, ""},
+ {"(Level).String", Method, 21, ""},
+ {"(Record).Attrs", Method, 21, ""},
+ {"(Record).Clone", Method, 21, ""},
+ {"(Record).NumAttrs", Method, 21, ""},
+ {"(Record).Source", Method, 25, ""},
+ {"(Value).Any", Method, 21, ""},
+ {"(Value).Bool", Method, 21, ""},
+ {"(Value).Duration", Method, 21, ""},
+ {"(Value).Equal", Method, 21, ""},
+ {"(Value).Float64", Method, 21, ""},
+ {"(Value).Group", Method, 21, ""},
+ {"(Value).Int64", Method, 21, ""},
+ {"(Value).Kind", Method, 21, ""},
+ {"(Value).LogValuer", Method, 21, ""},
+ {"(Value).Resolve", Method, 21, ""},
+ {"(Value).String", Method, 21, ""},
+ {"(Value).Time", Method, 21, ""},
+ {"(Value).Uint64", Method, 21, ""},
+ {"Any", Func, 21, "func(key string, value any) Attr"},
+ {"AnyValue", Func, 21, "func(v any) Value"},
+ {"Attr", Type, 21, ""},
+ {"Attr.Key", Field, 21, ""},
+ {"Attr.Value", Field, 21, ""},
+ {"Bool", Func, 21, "func(key string, v bool) Attr"},
+ {"BoolValue", Func, 21, "func(v bool) Value"},
+ {"Debug", Func, 21, "func(msg string, args ...any)"},
+ {"DebugContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"Default", Func, 21, "func() *Logger"},
+ {"DiscardHandler", Var, 24, ""},
+ {"Duration", Func, 21, "func(key string, v time.Duration) Attr"},
+ {"DurationValue", Func, 21, "func(v time.Duration) Value"},
+ {"Error", Func, 21, "func(msg string, args ...any)"},
+ {"ErrorContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"Float64", Func, 21, "func(key string, v float64) Attr"},
+ {"Float64Value", Func, 21, "func(v float64) Value"},
+ {"Group", Func, 21, "func(key string, args ...any) Attr"},
+ {"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"},
+ {"GroupValue", Func, 21, "func(as ...Attr) Value"},
+ {"Handler", Type, 21, ""},
+ {"HandlerOptions", Type, 21, ""},
+ {"HandlerOptions.AddSource", Field, 21, ""},
+ {"HandlerOptions.Level", Field, 21, ""},
+ {"HandlerOptions.ReplaceAttr", Field, 21, ""},
+ {"Info", Func, 21, "func(msg string, args ...any)"},
+ {"InfoContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"Int", Func, 21, "func(key string, value int) Attr"},
+ {"Int64", Func, 21, "func(key string, value int64) Attr"},
+ {"Int64Value", Func, 21, "func(v int64) Value"},
+ {"IntValue", Func, 21, "func(v int) Value"},
+ {"JSONHandler", Type, 21, ""},
+ {"Kind", Type, 21, ""},
+ {"KindAny", Const, 21, ""},
+ {"KindBool", Const, 21, ""},
+ {"KindDuration", Const, 21, ""},
+ {"KindFloat64", Const, 21, ""},
+ {"KindGroup", Const, 21, ""},
+ {"KindInt64", Const, 21, ""},
+ {"KindLogValuer", Const, 21, ""},
+ {"KindString", Const, 21, ""},
+ {"KindTime", Const, 21, ""},
+ {"KindUint64", Const, 21, ""},
+ {"Level", Type, 21, ""},
+ {"LevelDebug", Const, 21, ""},
+ {"LevelError", Const, 21, ""},
+ {"LevelInfo", Const, 21, ""},
+ {"LevelKey", Const, 21, ""},
+ {"LevelVar", Type, 21, ""},
+ {"LevelWarn", Const, 21, ""},
+ {"Leveler", Type, 21, ""},
+ {"Log", Func, 21, "func(ctx context.Context, level Level, msg string, args ...any)"},
+ {"LogAttrs", Func, 21, "func(ctx context.Context, level Level, msg string, attrs ...Attr)"},
+ {"LogValuer", Type, 21, ""},
+ {"Logger", Type, 21, ""},
+ {"MessageKey", Const, 21, ""},
+ {"New", Func, 21, "func(h Handler) *Logger"},
+ {"NewJSONHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *JSONHandler"},
+ {"NewLogLogger", Func, 21, "func(h Handler, level Level) *log.Logger"},
+ {"NewRecord", Func, 21, "func(t time.Time, level Level, msg string, pc uintptr) Record"},
+ {"NewTextHandler", Func, 21, "func(w io.Writer, opts *HandlerOptions) *TextHandler"},
+ {"Record", Type, 21, ""},
+ {"Record.Level", Field, 21, ""},
+ {"Record.Message", Field, 21, ""},
+ {"Record.PC", Field, 21, ""},
+ {"Record.Time", Field, 21, ""},
+ {"SetDefault", Func, 21, "func(l *Logger)"},
+ {"SetLogLoggerLevel", Func, 22, "func(level Level) (oldLevel Level)"},
+ {"Source", Type, 21, ""},
+ {"Source.File", Field, 21, ""},
+ {"Source.Function", Field, 21, ""},
+ {"Source.Line", Field, 21, ""},
+ {"SourceKey", Const, 21, ""},
+ {"String", Func, 21, "func(key string, value string) Attr"},
+ {"StringValue", Func, 21, "func(value string) Value"},
+ {"TextHandler", Type, 21, ""},
+ {"Time", Func, 21, "func(key string, v time.Time) Attr"},
+ {"TimeKey", Const, 21, ""},
+ {"TimeValue", Func, 21, "func(v time.Time) Value"},
+ {"Uint64", Func, 21, "func(key string, v uint64) Attr"},
+ {"Uint64Value", Func, 21, "func(v uint64) Value"},
+ {"Value", Type, 21, ""},
+ {"Warn", Func, 21, "func(msg string, args ...any)"},
+ {"WarnContext", Func, 21, "func(ctx context.Context, msg string, args ...any)"},
+ {"With", Func, 21, "func(args ...any) *Logger"},
+ },
+ "log/syslog": {
+ {"(*Writer).Alert", Method, 0, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).Crit", Method, 0, ""},
+ {"(*Writer).Debug", Method, 0, ""},
+ {"(*Writer).Emerg", Method, 0, ""},
+ {"(*Writer).Err", Method, 0, ""},
+ {"(*Writer).Info", Method, 0, ""},
+ {"(*Writer).Notice", Method, 0, ""},
+ {"(*Writer).Warning", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"Dial", Func, 0, "func(network string, raddr string, priority Priority, tag string) (*Writer, error)"},
+ {"LOG_ALERT", Const, 0, ""},
+ {"LOG_AUTH", Const, 1, ""},
+ {"LOG_AUTHPRIV", Const, 1, ""},
+ {"LOG_CRIT", Const, 0, ""},
+ {"LOG_CRON", Const, 1, ""},
+ {"LOG_DAEMON", Const, 1, ""},
+ {"LOG_DEBUG", Const, 0, ""},
+ {"LOG_EMERG", Const, 0, ""},
+ {"LOG_ERR", Const, 0, ""},
+ {"LOG_FTP", Const, 1, ""},
+ {"LOG_INFO", Const, 0, ""},
+ {"LOG_KERN", Const, 1, ""},
+ {"LOG_LOCAL0", Const, 1, ""},
+ {"LOG_LOCAL1", Const, 1, ""},
+ {"LOG_LOCAL2", Const, 1, ""},
+ {"LOG_LOCAL3", Const, 1, ""},
+ {"LOG_LOCAL4", Const, 1, ""},
+ {"LOG_LOCAL5", Const, 1, ""},
+ {"LOG_LOCAL6", Const, 1, ""},
+ {"LOG_LOCAL7", Const, 1, ""},
+ {"LOG_LPR", Const, 1, ""},
+ {"LOG_MAIL", Const, 1, ""},
+ {"LOG_NEWS", Const, 1, ""},
+ {"LOG_NOTICE", Const, 0, ""},
+ {"LOG_SYSLOG", Const, 1, ""},
+ {"LOG_USER", Const, 1, ""},
+ {"LOG_UUCP", Const, 1, ""},
+ {"LOG_WARNING", Const, 0, ""},
+ {"New", Func, 0, "func(priority Priority, tag string) (*Writer, error)"},
+ {"NewLogger", Func, 0, "func(p Priority, logFlag int) (*log.Logger, error)"},
+ {"Priority", Type, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "maps": {
+ {"All", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq2[K, V]"},
+ {"Clone", Func, 21, "func[M ~map[K]V, K comparable, V any](m M) M"},
+ {"Collect", Func, 23, "func[K comparable, V any](seq iter.Seq2[K, V]) map[K]V"},
+ {"Copy", Func, 21, "func[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2)"},
+ {"DeleteFunc", Func, 21, "func[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool)"},
+ {"Equal", Func, 21, "func[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool"},
+ {"EqualFunc", Func, 21, "func[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool"},
+ {"Insert", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map, seq iter.Seq2[K, V])"},
+ {"Keys", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[K]"},
+ {"Values", Func, 23, "func[Map ~map[K]V, K comparable, V any](m Map) iter.Seq[V]"},
+ },
+ "math": {
+ {"Abs", Func, 0, "func(x float64) float64"},
+ {"Acos", Func, 0, "func(x float64) float64"},
+ {"Acosh", Func, 0, "func(x float64) float64"},
+ {"Asin", Func, 0, "func(x float64) float64"},
+ {"Asinh", Func, 0, "func(x float64) float64"},
+ {"Atan", Func, 0, "func(x float64) float64"},
+ {"Atan2", Func, 0, "func(y float64, x float64) float64"},
+ {"Atanh", Func, 0, "func(x float64) float64"},
+ {"Cbrt", Func, 0, "func(x float64) float64"},
+ {"Ceil", Func, 0, "func(x float64) float64"},
+ {"Copysign", Func, 0, "func(f float64, sign float64) float64"},
+ {"Cos", Func, 0, "func(x float64) float64"},
+ {"Cosh", Func, 0, "func(x float64) float64"},
+ {"Dim", Func, 0, "func(x float64, y float64) float64"},
+ {"E", Const, 0, ""},
+ {"Erf", Func, 0, "func(x float64) float64"},
+ {"Erfc", Func, 0, "func(x float64) float64"},
+ {"Erfcinv", Func, 10, "func(x float64) float64"},
+ {"Erfinv", Func, 10, "func(x float64) float64"},
+ {"Exp", Func, 0, "func(x float64) float64"},
+ {"Exp2", Func, 0, "func(x float64) float64"},
+ {"Expm1", Func, 0, "func(x float64) float64"},
+ {"FMA", Func, 14, "func(x float64, y float64, z float64) float64"},
+ {"Float32bits", Func, 0, "func(f float32) uint32"},
+ {"Float32frombits", Func, 0, "func(b uint32) float32"},
+ {"Float64bits", Func, 0, "func(f float64) uint64"},
+ {"Float64frombits", Func, 0, "func(b uint64) float64"},
+ {"Floor", Func, 0, "func(x float64) float64"},
+ {"Frexp", Func, 0, "func(f float64) (frac float64, exp int)"},
+ {"Gamma", Func, 0, "func(x float64) float64"},
+ {"Hypot", Func, 0, "func(p float64, q float64) float64"},
+ {"Ilogb", Func, 0, "func(x float64) int"},
+ {"Inf", Func, 0, "func(sign int) float64"},
+ {"IsInf", Func, 0, "func(f float64, sign int) bool"},
+ {"IsNaN", Func, 0, "func(f float64) (is bool)"},
+ {"J0", Func, 0, "func(x float64) float64"},
+ {"J1", Func, 0, "func(x float64) float64"},
+ {"Jn", Func, 0, "func(n int, x float64) float64"},
+ {"Ldexp", Func, 0, "func(frac float64, exp int) float64"},
+ {"Lgamma", Func, 0, "func(x float64) (lgamma float64, sign int)"},
+ {"Ln10", Const, 0, ""},
+ {"Ln2", Const, 0, ""},
+ {"Log", Func, 0, "func(x float64) float64"},
+ {"Log10", Func, 0, "func(x float64) float64"},
+ {"Log10E", Const, 0, ""},
+ {"Log1p", Func, 0, "func(x float64) float64"},
+ {"Log2", Func, 0, "func(x float64) float64"},
+ {"Log2E", Const, 0, ""},
+ {"Logb", Func, 0, "func(x float64) float64"},
+ {"Max", Func, 0, "func(x float64, y float64) float64"},
+ {"MaxFloat32", Const, 0, ""},
+ {"MaxFloat64", Const, 0, ""},
+ {"MaxInt", Const, 17, ""},
+ {"MaxInt16", Const, 0, ""},
+ {"MaxInt32", Const, 0, ""},
+ {"MaxInt64", Const, 0, ""},
+ {"MaxInt8", Const, 0, ""},
+ {"MaxUint", Const, 17, ""},
+ {"MaxUint16", Const, 0, ""},
+ {"MaxUint32", Const, 0, ""},
+ {"MaxUint64", Const, 0, ""},
+ {"MaxUint8", Const, 0, ""},
+ {"Min", Func, 0, "func(x float64, y float64) float64"},
+ {"MinInt", Const, 17, ""},
+ {"MinInt16", Const, 0, ""},
+ {"MinInt32", Const, 0, ""},
+ {"MinInt64", Const, 0, ""},
+ {"MinInt8", Const, 0, ""},
+ {"Mod", Func, 0, "func(x float64, y float64) float64"},
+ {"Modf", Func, 0, "func(f float64) (int float64, frac float64)"},
+ {"NaN", Func, 0, "func() float64"},
+ {"Nextafter", Func, 0, "func(x float64, y float64) (r float64)"},
+ {"Nextafter32", Func, 4, "func(x float32, y float32) (r float32)"},
+ {"Phi", Const, 0, ""},
+ {"Pi", Const, 0, ""},
+ {"Pow", Func, 0, "func(x float64, y float64) float64"},
+ {"Pow10", Func, 0, "func(n int) float64"},
+ {"Remainder", Func, 0, "func(x float64, y float64) float64"},
+ {"Round", Func, 10, "func(x float64) float64"},
+ {"RoundToEven", Func, 10, "func(x float64) float64"},
+ {"Signbit", Func, 0, "func(x float64) bool"},
+ {"Sin", Func, 0, "func(x float64) float64"},
+ {"Sincos", Func, 0, "func(x float64) (sin float64, cos float64)"},
+ {"Sinh", Func, 0, "func(x float64) float64"},
+ {"SmallestNonzeroFloat32", Const, 0, ""},
+ {"SmallestNonzeroFloat64", Const, 0, ""},
+ {"Sqrt", Func, 0, "func(x float64) float64"},
+ {"Sqrt2", Const, 0, ""},
+ {"SqrtE", Const, 0, ""},
+ {"SqrtPhi", Const, 0, ""},
+ {"SqrtPi", Const, 0, ""},
+ {"Tan", Func, 0, "func(x float64) float64"},
+ {"Tanh", Func, 0, "func(x float64) float64"},
+ {"Trunc", Func, 0, "func(x float64) float64"},
+ {"Y0", Func, 0, "func(x float64) float64"},
+ {"Y1", Func, 0, "func(x float64) float64"},
+ {"Yn", Func, 0, "func(n int, x float64) float64"},
+ },
+ "math/big": {
+ {"(*Float).Abs", Method, 5, ""},
+ {"(*Float).Acc", Method, 5, ""},
+ {"(*Float).Add", Method, 5, ""},
+ {"(*Float).Append", Method, 5, ""},
+ {"(*Float).AppendText", Method, 24, ""},
+ {"(*Float).Cmp", Method, 5, ""},
+ {"(*Float).Copy", Method, 5, ""},
+ {"(*Float).Float32", Method, 5, ""},
+ {"(*Float).Float64", Method, 5, ""},
+ {"(*Float).Format", Method, 5, ""},
+ {"(*Float).GobDecode", Method, 7, ""},
+ {"(*Float).GobEncode", Method, 7, ""},
+ {"(*Float).Int", Method, 5, ""},
+ {"(*Float).Int64", Method, 5, ""},
+ {"(*Float).IsInf", Method, 5, ""},
+ {"(*Float).IsInt", Method, 5, ""},
+ {"(*Float).MantExp", Method, 5, ""},
+ {"(*Float).MarshalText", Method, 6, ""},
+ {"(*Float).MinPrec", Method, 5, ""},
+ {"(*Float).Mode", Method, 5, ""},
+ {"(*Float).Mul", Method, 5, ""},
+ {"(*Float).Neg", Method, 5, ""},
+ {"(*Float).Parse", Method, 5, ""},
+ {"(*Float).Prec", Method, 5, ""},
+ {"(*Float).Quo", Method, 5, ""},
+ {"(*Float).Rat", Method, 5, ""},
+ {"(*Float).Scan", Method, 8, ""},
+ {"(*Float).Set", Method, 5, ""},
+ {"(*Float).SetFloat64", Method, 5, ""},
+ {"(*Float).SetInf", Method, 5, ""},
+ {"(*Float).SetInt", Method, 5, ""},
+ {"(*Float).SetInt64", Method, 5, ""},
+ {"(*Float).SetMantExp", Method, 5, ""},
+ {"(*Float).SetMode", Method, 5, ""},
+ {"(*Float).SetPrec", Method, 5, ""},
+ {"(*Float).SetRat", Method, 5, ""},
+ {"(*Float).SetString", Method, 5, ""},
+ {"(*Float).SetUint64", Method, 5, ""},
+ {"(*Float).Sign", Method, 5, ""},
+ {"(*Float).Signbit", Method, 5, ""},
+ {"(*Float).Sqrt", Method, 10, ""},
+ {"(*Float).String", Method, 5, ""},
+ {"(*Float).Sub", Method, 5, ""},
+ {"(*Float).Text", Method, 5, ""},
+ {"(*Float).Uint64", Method, 5, ""},
+ {"(*Float).UnmarshalText", Method, 6, ""},
+ {"(*Int).Abs", Method, 0, ""},
+ {"(*Int).Add", Method, 0, ""},
+ {"(*Int).And", Method, 0, ""},
+ {"(*Int).AndNot", Method, 0, ""},
+ {"(*Int).Append", Method, 6, ""},
+ {"(*Int).AppendText", Method, 24, ""},
+ {"(*Int).Binomial", Method, 0, ""},
+ {"(*Int).Bit", Method, 0, ""},
+ {"(*Int).BitLen", Method, 0, ""},
+ {"(*Int).Bits", Method, 0, ""},
+ {"(*Int).Bytes", Method, 0, ""},
+ {"(*Int).Cmp", Method, 0, ""},
+ {"(*Int).CmpAbs", Method, 10, ""},
+ {"(*Int).Div", Method, 0, ""},
+ {"(*Int).DivMod", Method, 0, ""},
+ {"(*Int).Exp", Method, 0, ""},
+ {"(*Int).FillBytes", Method, 15, ""},
+ {"(*Int).Float64", Method, 21, ""},
+ {"(*Int).Format", Method, 0, ""},
+ {"(*Int).GCD", Method, 0, ""},
+ {"(*Int).GobDecode", Method, 0, ""},
+ {"(*Int).GobEncode", Method, 0, ""},
+ {"(*Int).Int64", Method, 0, ""},
+ {"(*Int).IsInt64", Method, 9, ""},
+ {"(*Int).IsUint64", Method, 9, ""},
+ {"(*Int).Lsh", Method, 0, ""},
+ {"(*Int).MarshalJSON", Method, 1, ""},
+ {"(*Int).MarshalText", Method, 3, ""},
+ {"(*Int).Mod", Method, 0, ""},
+ {"(*Int).ModInverse", Method, 0, ""},
+ {"(*Int).ModSqrt", Method, 5, ""},
+ {"(*Int).Mul", Method, 0, ""},
+ {"(*Int).MulRange", Method, 0, ""},
+ {"(*Int).Neg", Method, 0, ""},
+ {"(*Int).Not", Method, 0, ""},
+ {"(*Int).Or", Method, 0, ""},
+ {"(*Int).ProbablyPrime", Method, 0, ""},
+ {"(*Int).Quo", Method, 0, ""},
+ {"(*Int).QuoRem", Method, 0, ""},
+ {"(*Int).Rand", Method, 0, ""},
+ {"(*Int).Rem", Method, 0, ""},
+ {"(*Int).Rsh", Method, 0, ""},
+ {"(*Int).Scan", Method, 0, ""},
+ {"(*Int).Set", Method, 0, ""},
+ {"(*Int).SetBit", Method, 0, ""},
+ {"(*Int).SetBits", Method, 0, ""},
+ {"(*Int).SetBytes", Method, 0, ""},
+ {"(*Int).SetInt64", Method, 0, ""},
+ {"(*Int).SetString", Method, 0, ""},
+ {"(*Int).SetUint64", Method, 1, ""},
+ {"(*Int).Sign", Method, 0, ""},
+ {"(*Int).Sqrt", Method, 8, ""},
+ {"(*Int).String", Method, 0, ""},
+ {"(*Int).Sub", Method, 0, ""},
+ {"(*Int).Text", Method, 6, ""},
+ {"(*Int).TrailingZeroBits", Method, 13, ""},
+ {"(*Int).Uint64", Method, 1, ""},
+ {"(*Int).UnmarshalJSON", Method, 1, ""},
+ {"(*Int).UnmarshalText", Method, 3, ""},
+ {"(*Int).Xor", Method, 0, ""},
+ {"(*Rat).Abs", Method, 0, ""},
+ {"(*Rat).Add", Method, 0, ""},
+ {"(*Rat).AppendText", Method, 24, ""},
+ {"(*Rat).Cmp", Method, 0, ""},
+ {"(*Rat).Denom", Method, 0, ""},
+ {"(*Rat).Float32", Method, 4, ""},
+ {"(*Rat).Float64", Method, 1, ""},
+ {"(*Rat).FloatPrec", Method, 22, ""},
+ {"(*Rat).FloatString", Method, 0, ""},
+ {"(*Rat).GobDecode", Method, 0, ""},
+ {"(*Rat).GobEncode", Method, 0, ""},
+ {"(*Rat).Inv", Method, 0, ""},
+ {"(*Rat).IsInt", Method, 0, ""},
+ {"(*Rat).MarshalText", Method, 3, ""},
+ {"(*Rat).Mul", Method, 0, ""},
+ {"(*Rat).Neg", Method, 0, ""},
+ {"(*Rat).Num", Method, 0, ""},
+ {"(*Rat).Quo", Method, 0, ""},
+ {"(*Rat).RatString", Method, 0, ""},
+ {"(*Rat).Scan", Method, 0, ""},
+ {"(*Rat).Set", Method, 0, ""},
+ {"(*Rat).SetFloat64", Method, 1, ""},
+ {"(*Rat).SetFrac", Method, 0, ""},
+ {"(*Rat).SetFrac64", Method, 0, ""},
+ {"(*Rat).SetInt", Method, 0, ""},
+ {"(*Rat).SetInt64", Method, 0, ""},
+ {"(*Rat).SetString", Method, 0, ""},
+ {"(*Rat).SetUint64", Method, 13, ""},
+ {"(*Rat).Sign", Method, 0, ""},
+ {"(*Rat).String", Method, 0, ""},
+ {"(*Rat).Sub", Method, 0, ""},
+ {"(*Rat).UnmarshalText", Method, 3, ""},
+ {"(Accuracy).String", Method, 5, ""},
+ {"(ErrNaN).Error", Method, 5, ""},
+ {"(RoundingMode).String", Method, 5, ""},
+ {"Above", Const, 5, ""},
+ {"Accuracy", Type, 5, ""},
+ {"AwayFromZero", Const, 5, ""},
+ {"Below", Const, 5, ""},
+ {"ErrNaN", Type, 5, ""},
+ {"Exact", Const, 5, ""},
+ {"Float", Type, 5, ""},
+ {"Int", Type, 0, ""},
+ {"Jacobi", Func, 5, "func(x *Int, y *Int) int"},
+ {"MaxBase", Const, 0, ""},
+ {"MaxExp", Const, 5, ""},
+ {"MaxPrec", Const, 5, ""},
+ {"MinExp", Const, 5, ""},
+ {"NewFloat", Func, 5, "func(x float64) *Float"},
+ {"NewInt", Func, 0, "func(x int64) *Int"},
+ {"NewRat", Func, 0, "func(a int64, b int64) *Rat"},
+ {"ParseFloat", Func, 5, "func(s string, base int, prec uint, mode RoundingMode) (f *Float, b int, err error)"},
+ {"Rat", Type, 0, ""},
+ {"RoundingMode", Type, 5, ""},
+ {"ToNearestAway", Const, 5, ""},
+ {"ToNearestEven", Const, 5, ""},
+ {"ToNegativeInf", Const, 5, ""},
+ {"ToPositiveInf", Const, 5, ""},
+ {"ToZero", Const, 5, ""},
+ {"Word", Type, 0, ""},
+ },
+ "math/bits": {
+ {"Add", Func, 12, "func(x uint, y uint, carry uint) (sum uint, carryOut uint)"},
+ {"Add32", Func, 12, "func(x uint32, y uint32, carry uint32) (sum uint32, carryOut uint32)"},
+ {"Add64", Func, 12, "func(x uint64, y uint64, carry uint64) (sum uint64, carryOut uint64)"},
+ {"Div", Func, 12, "func(hi uint, lo uint, y uint) (quo uint, rem uint)"},
+ {"Div32", Func, 12, "func(hi uint32, lo uint32, y uint32) (quo uint32, rem uint32)"},
+ {"Div64", Func, 12, "func(hi uint64, lo uint64, y uint64) (quo uint64, rem uint64)"},
+ {"LeadingZeros", Func, 9, "func(x uint) int"},
+ {"LeadingZeros16", Func, 9, "func(x uint16) int"},
+ {"LeadingZeros32", Func, 9, "func(x uint32) int"},
+ {"LeadingZeros64", Func, 9, "func(x uint64) int"},
+ {"LeadingZeros8", Func, 9, "func(x uint8) int"},
+ {"Len", Func, 9, "func(x uint) int"},
+ {"Len16", Func, 9, "func(x uint16) (n int)"},
+ {"Len32", Func, 9, "func(x uint32) (n int)"},
+ {"Len64", Func, 9, "func(x uint64) (n int)"},
+ {"Len8", Func, 9, "func(x uint8) int"},
+ {"Mul", Func, 12, "func(x uint, y uint) (hi uint, lo uint)"},
+ {"Mul32", Func, 12, "func(x uint32, y uint32) (hi uint32, lo uint32)"},
+ {"Mul64", Func, 12, "func(x uint64, y uint64) (hi uint64, lo uint64)"},
+ {"OnesCount", Func, 9, "func(x uint) int"},
+ {"OnesCount16", Func, 9, "func(x uint16) int"},
+ {"OnesCount32", Func, 9, "func(x uint32) int"},
+ {"OnesCount64", Func, 9, "func(x uint64) int"},
+ {"OnesCount8", Func, 9, "func(x uint8) int"},
+ {"Rem", Func, 14, "func(hi uint, lo uint, y uint) uint"},
+ {"Rem32", Func, 14, "func(hi uint32, lo uint32, y uint32) uint32"},
+ {"Rem64", Func, 14, "func(hi uint64, lo uint64, y uint64) uint64"},
+ {"Reverse", Func, 9, "func(x uint) uint"},
+ {"Reverse16", Func, 9, "func(x uint16) uint16"},
+ {"Reverse32", Func, 9, "func(x uint32) uint32"},
+ {"Reverse64", Func, 9, "func(x uint64) uint64"},
+ {"Reverse8", Func, 9, "func(x uint8) uint8"},
+ {"ReverseBytes", Func, 9, "func(x uint) uint"},
+ {"ReverseBytes16", Func, 9, "func(x uint16) uint16"},
+ {"ReverseBytes32", Func, 9, "func(x uint32) uint32"},
+ {"ReverseBytes64", Func, 9, "func(x uint64) uint64"},
+ {"RotateLeft", Func, 9, "func(x uint, k int) uint"},
+ {"RotateLeft16", Func, 9, "func(x uint16, k int) uint16"},
+ {"RotateLeft32", Func, 9, "func(x uint32, k int) uint32"},
+ {"RotateLeft64", Func, 9, "func(x uint64, k int) uint64"},
+ {"RotateLeft8", Func, 9, "func(x uint8, k int) uint8"},
+ {"Sub", Func, 12, "func(x uint, y uint, borrow uint) (diff uint, borrowOut uint)"},
+ {"Sub32", Func, 12, "func(x uint32, y uint32, borrow uint32) (diff uint32, borrowOut uint32)"},
+ {"Sub64", Func, 12, "func(x uint64, y uint64, borrow uint64) (diff uint64, borrowOut uint64)"},
+ {"TrailingZeros", Func, 9, "func(x uint) int"},
+ {"TrailingZeros16", Func, 9, "func(x uint16) int"},
+ {"TrailingZeros32", Func, 9, "func(x uint32) int"},
+ {"TrailingZeros64", Func, 9, "func(x uint64) int"},
+ {"TrailingZeros8", Func, 9, "func(x uint8) int"},
+ {"UintSize", Const, 9, ""},
+ },
+ "math/cmplx": {
+ {"Abs", Func, 0, "func(x complex128) float64"},
+ {"Acos", Func, 0, "func(x complex128) complex128"},
+ {"Acosh", Func, 0, "func(x complex128) complex128"},
+ {"Asin", Func, 0, "func(x complex128) complex128"},
+ {"Asinh", Func, 0, "func(x complex128) complex128"},
+ {"Atan", Func, 0, "func(x complex128) complex128"},
+ {"Atanh", Func, 0, "func(x complex128) complex128"},
+ {"Conj", Func, 0, "func(x complex128) complex128"},
+ {"Cos", Func, 0, "func(x complex128) complex128"},
+ {"Cosh", Func, 0, "func(x complex128) complex128"},
+ {"Cot", Func, 0, "func(x complex128) complex128"},
+ {"Exp", Func, 0, "func(x complex128) complex128"},
+ {"Inf", Func, 0, "func() complex128"},
+ {"IsInf", Func, 0, "func(x complex128) bool"},
+ {"IsNaN", Func, 0, "func(x complex128) bool"},
+ {"Log", Func, 0, "func(x complex128) complex128"},
+ {"Log10", Func, 0, "func(x complex128) complex128"},
+ {"NaN", Func, 0, "func() complex128"},
+ {"Phase", Func, 0, "func(x complex128) float64"},
+ {"Polar", Func, 0, "func(x complex128) (r float64, θ float64)"},
+ {"Pow", Func, 0, "func(x complex128, y complex128) complex128"},
+ {"Rect", Func, 0, "func(r float64, θ float64) complex128"},
+ {"Sin", Func, 0, "func(x complex128) complex128"},
+ {"Sinh", Func, 0, "func(x complex128) complex128"},
+ {"Sqrt", Func, 0, "func(x complex128) complex128"},
+ {"Tan", Func, 0, "func(x complex128) complex128"},
+ {"Tanh", Func, 0, "func(x complex128) complex128"},
+ },
+ "math/rand": {
+ {"(*Rand).ExpFloat64", Method, 0, ""},
+ {"(*Rand).Float32", Method, 0, ""},
+ {"(*Rand).Float64", Method, 0, ""},
+ {"(*Rand).Int", Method, 0, ""},
+ {"(*Rand).Int31", Method, 0, ""},
+ {"(*Rand).Int31n", Method, 0, ""},
+ {"(*Rand).Int63", Method, 0, ""},
+ {"(*Rand).Int63n", Method, 0, ""},
+ {"(*Rand).Intn", Method, 0, ""},
+ {"(*Rand).NormFloat64", Method, 0, ""},
+ {"(*Rand).Perm", Method, 0, ""},
+ {"(*Rand).Read", Method, 6, ""},
+ {"(*Rand).Seed", Method, 0, ""},
+ {"(*Rand).Shuffle", Method, 10, ""},
+ {"(*Rand).Uint32", Method, 0, ""},
+ {"(*Rand).Uint64", Method, 8, ""},
+ {"(*Zipf).Uint64", Method, 0, ""},
+ {"ExpFloat64", Func, 0, "func() float64"},
+ {"Float32", Func, 0, "func() float32"},
+ {"Float64", Func, 0, "func() float64"},
+ {"Int", Func, 0, "func() int"},
+ {"Int31", Func, 0, "func() int32"},
+ {"Int31n", Func, 0, "func(n int32) int32"},
+ {"Int63", Func, 0, "func() int64"},
+ {"Int63n", Func, 0, "func(n int64) int64"},
+ {"Intn", Func, 0, "func(n int) int"},
+ {"New", Func, 0, "func(src Source) *Rand"},
+ {"NewSource", Func, 0, "func(seed int64) Source"},
+ {"NewZipf", Func, 0, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
+ {"NormFloat64", Func, 0, "func() float64"},
+ {"Perm", Func, 0, "func(n int) []int"},
+ {"Rand", Type, 0, ""},
+ {"Read", Func, 6, "func(p []byte) (n int, err error)"},
+ {"Seed", Func, 0, "func(seed int64)"},
+ {"Shuffle", Func, 10, "func(n int, swap func(i int, j int))"},
+ {"Source", Type, 0, ""},
+ {"Source64", Type, 8, ""},
+ {"Uint32", Func, 0, "func() uint32"},
+ {"Uint64", Func, 8, "func() uint64"},
+ {"Zipf", Type, 0, ""},
+ },
+ "math/rand/v2": {
+ {"(*ChaCha8).AppendBinary", Method, 24, ""},
+ {"(*ChaCha8).MarshalBinary", Method, 22, ""},
+ {"(*ChaCha8).Read", Method, 23, ""},
+ {"(*ChaCha8).Seed", Method, 22, ""},
+ {"(*ChaCha8).Uint64", Method, 22, ""},
+ {"(*ChaCha8).UnmarshalBinary", Method, 22, ""},
+ {"(*PCG).AppendBinary", Method, 24, ""},
+ {"(*PCG).MarshalBinary", Method, 22, ""},
+ {"(*PCG).Seed", Method, 22, ""},
+ {"(*PCG).Uint64", Method, 22, ""},
+ {"(*PCG).UnmarshalBinary", Method, 22, ""},
+ {"(*Rand).ExpFloat64", Method, 22, ""},
+ {"(*Rand).Float32", Method, 22, ""},
+ {"(*Rand).Float64", Method, 22, ""},
+ {"(*Rand).Int", Method, 22, ""},
+ {"(*Rand).Int32", Method, 22, ""},
+ {"(*Rand).Int32N", Method, 22, ""},
+ {"(*Rand).Int64", Method, 22, ""},
+ {"(*Rand).Int64N", Method, 22, ""},
+ {"(*Rand).IntN", Method, 22, ""},
+ {"(*Rand).NormFloat64", Method, 22, ""},
+ {"(*Rand).Perm", Method, 22, ""},
+ {"(*Rand).Shuffle", Method, 22, ""},
+ {"(*Rand).Uint", Method, 23, ""},
+ {"(*Rand).Uint32", Method, 22, ""},
+ {"(*Rand).Uint32N", Method, 22, ""},
+ {"(*Rand).Uint64", Method, 22, ""},
+ {"(*Rand).Uint64N", Method, 22, ""},
+ {"(*Rand).UintN", Method, 22, ""},
+ {"(*Zipf).Uint64", Method, 22, ""},
+ {"ChaCha8", Type, 22, ""},
+ {"ExpFloat64", Func, 22, "func() float64"},
+ {"Float32", Func, 22, "func() float32"},
+ {"Float64", Func, 22, "func() float64"},
+ {"Int", Func, 22, "func() int"},
+ {"Int32", Func, 22, "func() int32"},
+ {"Int32N", Func, 22, "func(n int32) int32"},
+ {"Int64", Func, 22, "func() int64"},
+ {"Int64N", Func, 22, "func(n int64) int64"},
+ {"IntN", Func, 22, "func(n int) int"},
+ {"N", Func, 22, "func[Int intType](n Int) Int"},
+ {"New", Func, 22, "func(src Source) *Rand"},
+ {"NewChaCha8", Func, 22, "func(seed [32]byte) *ChaCha8"},
+ {"NewPCG", Func, 22, "func(seed1 uint64, seed2 uint64) *PCG"},
+ {"NewZipf", Func, 22, "func(r *Rand, s float64, v float64, imax uint64) *Zipf"},
+ {"NormFloat64", Func, 22, "func() float64"},
+ {"PCG", Type, 22, ""},
+ {"Perm", Func, 22, "func(n int) []int"},
+ {"Rand", Type, 22, ""},
+ {"Shuffle", Func, 22, "func(n int, swap func(i int, j int))"},
+ {"Source", Type, 22, ""},
+ {"Uint", Func, 23, "func() uint"},
+ {"Uint32", Func, 22, "func() uint32"},
+ {"Uint32N", Func, 22, "func(n uint32) uint32"},
+ {"Uint64", Func, 22, "func() uint64"},
+ {"Uint64N", Func, 22, "func(n uint64) uint64"},
+ {"UintN", Func, 22, "func(n uint) uint"},
+ {"Zipf", Type, 22, ""},
+ },
+ "mime": {
+ {"(*WordDecoder).Decode", Method, 5, ""},
+ {"(*WordDecoder).DecodeHeader", Method, 5, ""},
+ {"(WordEncoder).Encode", Method, 5, ""},
+ {"AddExtensionType", Func, 0, "func(ext string, typ string) error"},
+ {"BEncoding", Const, 5, ""},
+ {"ErrInvalidMediaParameter", Var, 9, ""},
+ {"ExtensionsByType", Func, 5, "func(typ string) ([]string, error)"},
+ {"FormatMediaType", Func, 0, "func(t string, param map[string]string) string"},
+ {"ParseMediaType", Func, 0, "func(v string) (mediatype string, params map[string]string, err error)"},
+ {"QEncoding", Const, 5, ""},
+ {"TypeByExtension", Func, 0, "func(ext string) string"},
+ {"WordDecoder", Type, 5, ""},
+ {"WordDecoder.CharsetReader", Field, 5, ""},
+ {"WordEncoder", Type, 5, ""},
+ },
+ "mime/multipart": {
+ {"(*FileHeader).Open", Method, 0, ""},
+ {"(*Form).RemoveAll", Method, 0, ""},
+ {"(*Part).Close", Method, 0, ""},
+ {"(*Part).FileName", Method, 0, ""},
+ {"(*Part).FormName", Method, 0, ""},
+ {"(*Part).Read", Method, 0, ""},
+ {"(*Reader).NextPart", Method, 0, ""},
+ {"(*Reader).NextRawPart", Method, 14, ""},
+ {"(*Reader).ReadForm", Method, 0, ""},
+ {"(*Writer).Boundary", Method, 0, ""},
+ {"(*Writer).Close", Method, 0, ""},
+ {"(*Writer).CreateFormField", Method, 0, ""},
+ {"(*Writer).CreateFormFile", Method, 0, ""},
+ {"(*Writer).CreatePart", Method, 0, ""},
+ {"(*Writer).FormDataContentType", Method, 0, ""},
+ {"(*Writer).SetBoundary", Method, 1, ""},
+ {"(*Writer).WriteField", Method, 0, ""},
+ {"ErrMessageTooLarge", Var, 9, ""},
+ {"File", Type, 0, ""},
+ {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"},
+ {"FileHeader", Type, 0, ""},
+ {"FileHeader.Filename", Field, 0, ""},
+ {"FileHeader.Header", Field, 0, ""},
+ {"FileHeader.Size", Field, 9, ""},
+ {"Form", Type, 0, ""},
+ {"Form.File", Field, 0, ""},
+ {"Form.Value", Field, 0, ""},
+ {"NewReader", Func, 0, "func(r io.Reader, boundary string) *Reader"},
+ {"NewWriter", Func, 0, "func(w io.Writer) *Writer"},
+ {"Part", Type, 0, ""},
+ {"Part.Header", Field, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "mime/quotedprintable": {
+ {"(*Reader).Read", Method, 5, ""},
+ {"(*Writer).Close", Method, 5, ""},
+ {"(*Writer).Write", Method, 5, ""},
+ {"NewReader", Func, 5, "func(r io.Reader) *Reader"},
+ {"NewWriter", Func, 5, "func(w io.Writer) *Writer"},
+ {"Reader", Type, 5, ""},
+ {"Writer", Type, 5, ""},
+ {"Writer.Binary", Field, 5, ""},
+ },
+ "net": {
+ {"(*AddrError).Error", Method, 0, ""},
+ {"(*AddrError).Temporary", Method, 0, ""},
+ {"(*AddrError).Timeout", Method, 0, ""},
+ {"(*Buffers).Read", Method, 8, ""},
+ {"(*Buffers).WriteTo", Method, 8, ""},
+ {"(*DNSConfigError).Error", Method, 0, ""},
+ {"(*DNSConfigError).Temporary", Method, 0, ""},
+ {"(*DNSConfigError).Timeout", Method, 0, ""},
+ {"(*DNSConfigError).Unwrap", Method, 13, ""},
+ {"(*DNSError).Error", Method, 0, ""},
+ {"(*DNSError).Temporary", Method, 0, ""},
+ {"(*DNSError).Timeout", Method, 0, ""},
+ {"(*DNSError).Unwrap", Method, 23, ""},
+ {"(*Dialer).Dial", Method, 1, ""},
+ {"(*Dialer).DialContext", Method, 7, ""},
+ {"(*Dialer).MultipathTCP", Method, 21, ""},
+ {"(*Dialer).SetMultipathTCP", Method, 21, ""},
+ {"(*IP).UnmarshalText", Method, 2, ""},
+ {"(*IPAddr).Network", Method, 0, ""},
+ {"(*IPAddr).String", Method, 0, ""},
+ {"(*IPConn).Close", Method, 0, ""},
+ {"(*IPConn).File", Method, 0, ""},
+ {"(*IPConn).LocalAddr", Method, 0, ""},
+ {"(*IPConn).Read", Method, 0, ""},
+ {"(*IPConn).ReadFrom", Method, 0, ""},
+ {"(*IPConn).ReadFromIP", Method, 0, ""},
+ {"(*IPConn).ReadMsgIP", Method, 1, ""},
+ {"(*IPConn).RemoteAddr", Method, 0, ""},
+ {"(*IPConn).SetDeadline", Method, 0, ""},
+ {"(*IPConn).SetReadBuffer", Method, 0, ""},
+ {"(*IPConn).SetReadDeadline", Method, 0, ""},
+ {"(*IPConn).SetWriteBuffer", Method, 0, ""},
+ {"(*IPConn).SetWriteDeadline", Method, 0, ""},
+ {"(*IPConn).SyscallConn", Method, 9, ""},
+ {"(*IPConn).Write", Method, 0, ""},
+ {"(*IPConn).WriteMsgIP", Method, 1, ""},
+ {"(*IPConn).WriteTo", Method, 0, ""},
+ {"(*IPConn).WriteToIP", Method, 0, ""},
+ {"(*IPNet).Contains", Method, 0, ""},
+ {"(*IPNet).Network", Method, 0, ""},
+ {"(*IPNet).String", Method, 0, ""},
+ {"(*Interface).Addrs", Method, 0, ""},
+ {"(*Interface).MulticastAddrs", Method, 0, ""},
+ {"(*ListenConfig).Listen", Method, 11, ""},
+ {"(*ListenConfig).ListenPacket", Method, 11, ""},
+ {"(*ListenConfig).MultipathTCP", Method, 21, ""},
+ {"(*ListenConfig).SetMultipathTCP", Method, 21, ""},
+ {"(*OpError).Error", Method, 0, ""},
+ {"(*OpError).Temporary", Method, 0, ""},
+ {"(*OpError).Timeout", Method, 0, ""},
+ {"(*OpError).Unwrap", Method, 13, ""},
+ {"(*ParseError).Error", Method, 0, ""},
+ {"(*ParseError).Temporary", Method, 17, ""},
+ {"(*ParseError).Timeout", Method, 17, ""},
+ {"(*Resolver).LookupAddr", Method, 8, ""},
+ {"(*Resolver).LookupCNAME", Method, 8, ""},
+ {"(*Resolver).LookupHost", Method, 8, ""},
+ {"(*Resolver).LookupIP", Method, 15, ""},
+ {"(*Resolver).LookupIPAddr", Method, 8, ""},
+ {"(*Resolver).LookupMX", Method, 8, ""},
+ {"(*Resolver).LookupNS", Method, 8, ""},
+ {"(*Resolver).LookupNetIP", Method, 18, ""},
+ {"(*Resolver).LookupPort", Method, 8, ""},
+ {"(*Resolver).LookupSRV", Method, 8, ""},
+ {"(*Resolver).LookupTXT", Method, 8, ""},
+ {"(*TCPAddr).AddrPort", Method, 18, ""},
+ {"(*TCPAddr).Network", Method, 0, ""},
+ {"(*TCPAddr).String", Method, 0, ""},
+ {"(*TCPConn).Close", Method, 0, ""},
+ {"(*TCPConn).CloseRead", Method, 0, ""},
+ {"(*TCPConn).CloseWrite", Method, 0, ""},
+ {"(*TCPConn).File", Method, 0, ""},
+ {"(*TCPConn).LocalAddr", Method, 0, ""},
+ {"(*TCPConn).MultipathTCP", Method, 21, ""},
+ {"(*TCPConn).Read", Method, 0, ""},
+ {"(*TCPConn).ReadFrom", Method, 0, ""},
+ {"(*TCPConn).RemoteAddr", Method, 0, ""},
+ {"(*TCPConn).SetDeadline", Method, 0, ""},
+ {"(*TCPConn).SetKeepAlive", Method, 0, ""},
+ {"(*TCPConn).SetKeepAliveConfig", Method, 23, ""},
+ {"(*TCPConn).SetKeepAlivePeriod", Method, 2, ""},
+ {"(*TCPConn).SetLinger", Method, 0, ""},
+ {"(*TCPConn).SetNoDelay", Method, 0, ""},
+ {"(*TCPConn).SetReadBuffer", Method, 0, ""},
+ {"(*TCPConn).SetReadDeadline", Method, 0, ""},
+ {"(*TCPConn).SetWriteBuffer", Method, 0, ""},
+ {"(*TCPConn).SetWriteDeadline", Method, 0, ""},
+ {"(*TCPConn).SyscallConn", Method, 9, ""},
+ {"(*TCPConn).Write", Method, 0, ""},
+ {"(*TCPConn).WriteTo", Method, 22, ""},
+ {"(*TCPListener).Accept", Method, 0, ""},
+ {"(*TCPListener).AcceptTCP", Method, 0, ""},
+ {"(*TCPListener).Addr", Method, 0, ""},
+ {"(*TCPListener).Close", Method, 0, ""},
+ {"(*TCPListener).File", Method, 0, ""},
+ {"(*TCPListener).SetDeadline", Method, 0, ""},
+ {"(*TCPListener).SyscallConn", Method, 10, ""},
+ {"(*UDPAddr).AddrPort", Method, 18, ""},
+ {"(*UDPAddr).Network", Method, 0, ""},
+ {"(*UDPAddr).String", Method, 0, ""},
+ {"(*UDPConn).Close", Method, 0, ""},
+ {"(*UDPConn).File", Method, 0, ""},
+ {"(*UDPConn).LocalAddr", Method, 0, ""},
+ {"(*UDPConn).Read", Method, 0, ""},
+ {"(*UDPConn).ReadFrom", Method, 0, ""},
+ {"(*UDPConn).ReadFromUDP", Method, 0, ""},
+ {"(*UDPConn).ReadFromUDPAddrPort", Method, 18, ""},
+ {"(*UDPConn).ReadMsgUDP", Method, 1, ""},
+ {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18, ""},
+ {"(*UDPConn).RemoteAddr", Method, 0, ""},
+ {"(*UDPConn).SetDeadline", Method, 0, ""},
+ {"(*UDPConn).SetReadBuffer", Method, 0, ""},
+ {"(*UDPConn).SetReadDeadline", Method, 0, ""},
+ {"(*UDPConn).SetWriteBuffer", Method, 0, ""},
+ {"(*UDPConn).SetWriteDeadline", Method, 0, ""},
+ {"(*UDPConn).SyscallConn", Method, 9, ""},
+ {"(*UDPConn).Write", Method, 0, ""},
+ {"(*UDPConn).WriteMsgUDP", Method, 1, ""},
+ {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18, ""},
+ {"(*UDPConn).WriteTo", Method, 0, ""},
+ {"(*UDPConn).WriteToUDP", Method, 0, ""},
+ {"(*UDPConn).WriteToUDPAddrPort", Method, 18, ""},
+ {"(*UnixAddr).Network", Method, 0, ""},
+ {"(*UnixAddr).String", Method, 0, ""},
+ {"(*UnixConn).Close", Method, 0, ""},
+ {"(*UnixConn).CloseRead", Method, 1, ""},
+ {"(*UnixConn).CloseWrite", Method, 1, ""},
+ {"(*UnixConn).File", Method, 0, ""},
+ {"(*UnixConn).LocalAddr", Method, 0, ""},
+ {"(*UnixConn).Read", Method, 0, ""},
+ {"(*UnixConn).ReadFrom", Method, 0, ""},
+ {"(*UnixConn).ReadFromUnix", Method, 0, ""},
+ {"(*UnixConn).ReadMsgUnix", Method, 0, ""},
+ {"(*UnixConn).RemoteAddr", Method, 0, ""},
+ {"(*UnixConn).SetDeadline", Method, 0, ""},
+ {"(*UnixConn).SetReadBuffer", Method, 0, ""},
+ {"(*UnixConn).SetReadDeadline", Method, 0, ""},
+ {"(*UnixConn).SetWriteBuffer", Method, 0, ""},
+ {"(*UnixConn).SetWriteDeadline", Method, 0, ""},
+ {"(*UnixConn).SyscallConn", Method, 9, ""},
+ {"(*UnixConn).Write", Method, 0, ""},
+ {"(*UnixConn).WriteMsgUnix", Method, 0, ""},
+ {"(*UnixConn).WriteTo", Method, 0, ""},
+ {"(*UnixConn).WriteToUnix", Method, 0, ""},
+ {"(*UnixListener).Accept", Method, 0, ""},
+ {"(*UnixListener).AcceptUnix", Method, 0, ""},
+ {"(*UnixListener).Addr", Method, 0, ""},
+ {"(*UnixListener).Close", Method, 0, ""},
+ {"(*UnixListener).File", Method, 0, ""},
+ {"(*UnixListener).SetDeadline", Method, 0, ""},
+ {"(*UnixListener).SetUnlinkOnClose", Method, 8, ""},
+ {"(*UnixListener).SyscallConn", Method, 10, ""},
+ {"(Flags).String", Method, 0, ""},
+ {"(HardwareAddr).String", Method, 0, ""},
+ {"(IP).AppendText", Method, 24, ""},
+ {"(IP).DefaultMask", Method, 0, ""},
+ {"(IP).Equal", Method, 0, ""},
+ {"(IP).IsGlobalUnicast", Method, 0, ""},
+ {"(IP).IsInterfaceLocalMulticast", Method, 0, ""},
+ {"(IP).IsLinkLocalMulticast", Method, 0, ""},
+ {"(IP).IsLinkLocalUnicast", Method, 0, ""},
+ {"(IP).IsLoopback", Method, 0, ""},
+ {"(IP).IsMulticast", Method, 0, ""},
+ {"(IP).IsPrivate", Method, 17, ""},
+ {"(IP).IsUnspecified", Method, 0, ""},
+ {"(IP).MarshalText", Method, 2, ""},
+ {"(IP).Mask", Method, 0, ""},
+ {"(IP).String", Method, 0, ""},
+ {"(IP).To16", Method, 0, ""},
+ {"(IP).To4", Method, 0, ""},
+ {"(IPMask).Size", Method, 0, ""},
+ {"(IPMask).String", Method, 0, ""},
+ {"(InvalidAddrError).Error", Method, 0, ""},
+ {"(InvalidAddrError).Temporary", Method, 0, ""},
+ {"(InvalidAddrError).Timeout", Method, 0, ""},
+ {"(UnknownNetworkError).Error", Method, 0, ""},
+ {"(UnknownNetworkError).Temporary", Method, 0, ""},
+ {"(UnknownNetworkError).Timeout", Method, 0, ""},
+ {"Addr", Type, 0, ""},
+ {"AddrError", Type, 0, ""},
+ {"AddrError.Addr", Field, 0, ""},
+ {"AddrError.Err", Field, 0, ""},
+ {"Buffers", Type, 8, ""},
+ {"CIDRMask", Func, 0, "func(ones int, bits int) IPMask"},
+ {"Conn", Type, 0, ""},
+ {"DNSConfigError", Type, 0, ""},
+ {"DNSConfigError.Err", Field, 0, ""},
+ {"DNSError", Type, 0, ""},
+ {"DNSError.Err", Field, 0, ""},
+ {"DNSError.IsNotFound", Field, 13, ""},
+ {"DNSError.IsTemporary", Field, 6, ""},
+ {"DNSError.IsTimeout", Field, 0, ""},
+ {"DNSError.Name", Field, 0, ""},
+ {"DNSError.Server", Field, 0, ""},
+ {"DNSError.UnwrapErr", Field, 23, ""},
+ {"DefaultResolver", Var, 8, ""},
+ {"Dial", Func, 0, "func(network string, address string) (Conn, error)"},
+ {"DialIP", Func, 0, "func(network string, laddr *IPAddr, raddr *IPAddr) (*IPConn, error)"},
+ {"DialTCP", Func, 0, "func(network string, laddr *TCPAddr, raddr *TCPAddr) (*TCPConn, error)"},
+ {"DialTimeout", Func, 0, "func(network string, address string, timeout time.Duration) (Conn, error)"},
+ {"DialUDP", Func, 0, "func(network string, laddr *UDPAddr, raddr *UDPAddr) (*UDPConn, error)"},
+ {"DialUnix", Func, 0, "func(network string, laddr *UnixAddr, raddr *UnixAddr) (*UnixConn, error)"},
+ {"Dialer", Type, 1, ""},
+ {"Dialer.Cancel", Field, 6, ""},
+ {"Dialer.Control", Field, 11, ""},
+ {"Dialer.ControlContext", Field, 20, ""},
+ {"Dialer.Deadline", Field, 1, ""},
+ {"Dialer.DualStack", Field, 2, ""},
+ {"Dialer.FallbackDelay", Field, 5, ""},
+ {"Dialer.KeepAlive", Field, 3, ""},
+ {"Dialer.KeepAliveConfig", Field, 23, ""},
+ {"Dialer.LocalAddr", Field, 1, ""},
+ {"Dialer.Resolver", Field, 8, ""},
+ {"Dialer.Timeout", Field, 1, ""},
+ {"ErrClosed", Var, 16, ""},
+ {"ErrWriteToConnected", Var, 0, ""},
+ {"Error", Type, 0, ""},
+ {"FileConn", Func, 0, "func(f *os.File) (c Conn, err error)"},
+ {"FileListener", Func, 0, "func(f *os.File) (ln Listener, err error)"},
+ {"FilePacketConn", Func, 0, "func(f *os.File) (c PacketConn, err error)"},
+ {"FlagBroadcast", Const, 0, ""},
+ {"FlagLoopback", Const, 0, ""},
+ {"FlagMulticast", Const, 0, ""},
+ {"FlagPointToPoint", Const, 0, ""},
+ {"FlagRunning", Const, 20, ""},
+ {"FlagUp", Const, 0, ""},
+ {"Flags", Type, 0, ""},
+ {"HardwareAddr", Type, 0, ""},
+ {"IP", Type, 0, ""},
+ {"IPAddr", Type, 0, ""},
+ {"IPAddr.IP", Field, 0, ""},
+ {"IPAddr.Zone", Field, 1, ""},
+ {"IPConn", Type, 0, ""},
+ {"IPMask", Type, 0, ""},
+ {"IPNet", Type, 0, ""},
+ {"IPNet.IP", Field, 0, ""},
+ {"IPNet.Mask", Field, 0, ""},
+ {"IPv4", Func, 0, "func(a byte, b byte, c byte, d byte) IP"},
+ {"IPv4Mask", Func, 0, "func(a byte, b byte, c byte, d byte) IPMask"},
+ {"IPv4allrouter", Var, 0, ""},
+ {"IPv4allsys", Var, 0, ""},
+ {"IPv4bcast", Var, 0, ""},
+ {"IPv4len", Const, 0, ""},
+ {"IPv4zero", Var, 0, ""},
+ {"IPv6interfacelocalallnodes", Var, 0, ""},
+ {"IPv6len", Const, 0, ""},
+ {"IPv6linklocalallnodes", Var, 0, ""},
+ {"IPv6linklocalallrouters", Var, 0, ""},
+ {"IPv6loopback", Var, 0, ""},
+ {"IPv6unspecified", Var, 0, ""},
+ {"IPv6zero", Var, 0, ""},
+ {"Interface", Type, 0, ""},
+ {"Interface.Flags", Field, 0, ""},
+ {"Interface.HardwareAddr", Field, 0, ""},
+ {"Interface.Index", Field, 0, ""},
+ {"Interface.MTU", Field, 0, ""},
+ {"Interface.Name", Field, 0, ""},
+ {"InterfaceAddrs", Func, 0, "func() ([]Addr, error)"},
+ {"InterfaceByIndex", Func, 0, "func(index int) (*Interface, error)"},
+ {"InterfaceByName", Func, 0, "func(name string) (*Interface, error)"},
+ {"Interfaces", Func, 0, "func() ([]Interface, error)"},
+ {"InvalidAddrError", Type, 0, ""},
+ {"JoinHostPort", Func, 0, "func(host string, port string) string"},
+ {"KeepAliveConfig", Type, 23, ""},
+ {"KeepAliveConfig.Count", Field, 23, ""},
+ {"KeepAliveConfig.Enable", Field, 23, ""},
+ {"KeepAliveConfig.Idle", Field, 23, ""},
+ {"KeepAliveConfig.Interval", Field, 23, ""},
+ {"Listen", Func, 0, "func(network string, address string) (Listener, error)"},
+ {"ListenConfig", Type, 11, ""},
+ {"ListenConfig.Control", Field, 11, ""},
+ {"ListenConfig.KeepAlive", Field, 13, ""},
+ {"ListenConfig.KeepAliveConfig", Field, 23, ""},
+ {"ListenIP", Func, 0, "func(network string, laddr *IPAddr) (*IPConn, error)"},
+ {"ListenMulticastUDP", Func, 0, "func(network string, ifi *Interface, gaddr *UDPAddr) (*UDPConn, error)"},
+ {"ListenPacket", Func, 0, "func(network string, address string) (PacketConn, error)"},
+ {"ListenTCP", Func, 0, "func(network string, laddr *TCPAddr) (*TCPListener, error)"},
+ {"ListenUDP", Func, 0, "func(network string, laddr *UDPAddr) (*UDPConn, error)"},
+ {"ListenUnix", Func, 0, "func(network string, laddr *UnixAddr) (*UnixListener, error)"},
+ {"ListenUnixgram", Func, 0, "func(network string, laddr *UnixAddr) (*UnixConn, error)"},
+ {"Listener", Type, 0, ""},
+ {"LookupAddr", Func, 0, "func(addr string) (names []string, err error)"},
+ {"LookupCNAME", Func, 0, "func(host string) (cname string, err error)"},
+ {"LookupHost", Func, 0, "func(host string) (addrs []string, err error)"},
+ {"LookupIP", Func, 0, "func(host string) ([]IP, error)"},
+ {"LookupMX", Func, 0, "func(name string) ([]*MX, error)"},
+ {"LookupNS", Func, 1, "func(name string) ([]*NS, error)"},
+ {"LookupPort", Func, 0, "func(network string, service string) (port int, err error)"},
+ {"LookupSRV", Func, 0, "func(service string, proto string, name string) (cname string, addrs []*SRV, err error)"},
+ {"LookupTXT", Func, 0, "func(name string) ([]string, error)"},
+ {"MX", Type, 0, ""},
+ {"MX.Host", Field, 0, ""},
+ {"MX.Pref", Field, 0, ""},
+ {"NS", Type, 1, ""},
+ {"NS.Host", Field, 1, ""},
+ {"OpError", Type, 0, ""},
+ {"OpError.Addr", Field, 0, ""},
+ {"OpError.Err", Field, 0, ""},
+ {"OpError.Net", Field, 0, ""},
+ {"OpError.Op", Field, 0, ""},
+ {"OpError.Source", Field, 5, ""},
+ {"PacketConn", Type, 0, ""},
+ {"ParseCIDR", Func, 0, "func(s string) (IP, *IPNet, error)"},
+ {"ParseError", Type, 0, ""},
+ {"ParseError.Text", Field, 0, ""},
+ {"ParseError.Type", Field, 0, ""},
+ {"ParseIP", Func, 0, "func(s string) IP"},
+ {"ParseMAC", Func, 0, "func(s string) (hw HardwareAddr, err error)"},
+ {"Pipe", Func, 0, "func() (Conn, Conn)"},
+ {"ResolveIPAddr", Func, 0, "func(network string, address string) (*IPAddr, error)"},
+ {"ResolveTCPAddr", Func, 0, "func(network string, address string) (*TCPAddr, error)"},
+ {"ResolveUDPAddr", Func, 0, "func(network string, address string) (*UDPAddr, error)"},
+ {"ResolveUnixAddr", Func, 0, "func(network string, address string) (*UnixAddr, error)"},
+ {"Resolver", Type, 8, ""},
+ {"Resolver.Dial", Field, 9, ""},
+ {"Resolver.PreferGo", Field, 8, ""},
+ {"Resolver.StrictErrors", Field, 9, ""},
+ {"SRV", Type, 0, ""},
+ {"SRV.Port", Field, 0, ""},
+ {"SRV.Priority", Field, 0, ""},
+ {"SRV.Target", Field, 0, ""},
+ {"SRV.Weight", Field, 0, ""},
+ {"SplitHostPort", Func, 0, "func(hostport string) (host string, port string, err error)"},
+ {"TCPAddr", Type, 0, ""},
+ {"TCPAddr.IP", Field, 0, ""},
+ {"TCPAddr.Port", Field, 0, ""},
+ {"TCPAddr.Zone", Field, 1, ""},
+ {"TCPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *TCPAddr"},
+ {"TCPConn", Type, 0, ""},
+ {"TCPListener", Type, 0, ""},
+ {"UDPAddr", Type, 0, ""},
+ {"UDPAddr.IP", Field, 0, ""},
+ {"UDPAddr.Port", Field, 0, ""},
+ {"UDPAddr.Zone", Field, 1, ""},
+ {"UDPAddrFromAddrPort", Func, 18, "func(addr netip.AddrPort) *UDPAddr"},
+ {"UDPConn", Type, 0, ""},
+ {"UnixAddr", Type, 0, ""},
+ {"UnixAddr.Name", Field, 0, ""},
+ {"UnixAddr.Net", Field, 0, ""},
+ {"UnixConn", Type, 0, ""},
+ {"UnixListener", Type, 0, ""},
+ {"UnknownNetworkError", Type, 0, ""},
+ },
+ "net/http": {
+ {"(*Client).CloseIdleConnections", Method, 12, ""},
+ {"(*Client).Do", Method, 0, ""},
+ {"(*Client).Get", Method, 0, ""},
+ {"(*Client).Head", Method, 0, ""},
+ {"(*Client).Post", Method, 0, ""},
+ {"(*Client).PostForm", Method, 0, ""},
+ {"(*Cookie).String", Method, 0, ""},
+ {"(*Cookie).Valid", Method, 18, ""},
+ {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""},
+ {"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""},
+ {"(*CrossOriginProtection).Check", Method, 25, ""},
+ {"(*CrossOriginProtection).Handler", Method, 25, ""},
+ {"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""},
+ {"(*MaxBytesError).Error", Method, 19, ""},
+ {"(*ProtocolError).Error", Method, 0, ""},
+ {"(*ProtocolError).Is", Method, 21, ""},
+ {"(*Protocols).SetHTTP1", Method, 24, ""},
+ {"(*Protocols).SetHTTP2", Method, 24, ""},
+ {"(*Protocols).SetUnencryptedHTTP2", Method, 24, ""},
+ {"(*Request).AddCookie", Method, 0, ""},
+ {"(*Request).BasicAuth", Method, 4, ""},
+ {"(*Request).Clone", Method, 13, ""},
+ {"(*Request).Context", Method, 7, ""},
+ {"(*Request).Cookie", Method, 0, ""},
+ {"(*Request).Cookies", Method, 0, ""},
+ {"(*Request).CookiesNamed", Method, 23, ""},
+ {"(*Request).FormFile", Method, 0, ""},
+ {"(*Request).FormValue", Method, 0, ""},
+ {"(*Request).MultipartReader", Method, 0, ""},
+ {"(*Request).ParseForm", Method, 0, ""},
+ {"(*Request).ParseMultipartForm", Method, 0, ""},
+ {"(*Request).PathValue", Method, 22, ""},
+ {"(*Request).PostFormValue", Method, 1, ""},
+ {"(*Request).ProtoAtLeast", Method, 0, ""},
+ {"(*Request).Referer", Method, 0, ""},
+ {"(*Request).SetBasicAuth", Method, 0, ""},
+ {"(*Request).SetPathValue", Method, 22, ""},
+ {"(*Request).UserAgent", Method, 0, ""},
+ {"(*Request).WithContext", Method, 7, ""},
+ {"(*Request).Write", Method, 0, ""},
+ {"(*Request).WriteProxy", Method, 0, ""},
+ {"(*Response).Cookies", Method, 0, ""},
+ {"(*Response).Location", Method, 0, ""},
+ {"(*Response).ProtoAtLeast", Method, 0, ""},
+ {"(*Response).Write", Method, 0, ""},
+ {"(*ResponseController).EnableFullDuplex", Method, 21, ""},
+ {"(*ResponseController).Flush", Method, 20, ""},
+ {"(*ResponseController).Hijack", Method, 20, ""},
+ {"(*ResponseController).SetReadDeadline", Method, 20, ""},
+ {"(*ResponseController).SetWriteDeadline", Method, 20, ""},
+ {"(*ServeMux).Handle", Method, 0, ""},
+ {"(*ServeMux).HandleFunc", Method, 0, ""},
+ {"(*ServeMux).Handler", Method, 1, ""},
+ {"(*ServeMux).ServeHTTP", Method, 0, ""},
+ {"(*Server).Close", Method, 8, ""},
+ {"(*Server).ListenAndServe", Method, 0, ""},
+ {"(*Server).ListenAndServeTLS", Method, 0, ""},
+ {"(*Server).RegisterOnShutdown", Method, 9, ""},
+ {"(*Server).Serve", Method, 0, ""},
+ {"(*Server).ServeTLS", Method, 9, ""},
+ {"(*Server).SetKeepAlivesEnabled", Method, 3, ""},
+ {"(*Server).Shutdown", Method, 8, ""},
+ {"(*Transport).CancelRequest", Method, 1, ""},
+ {"(*Transport).Clone", Method, 13, ""},
+ {"(*Transport).CloseIdleConnections", Method, 0, ""},
+ {"(*Transport).RegisterProtocol", Method, 0, ""},
+ {"(*Transport).RoundTrip", Method, 0, ""},
+ {"(ConnState).String", Method, 3, ""},
+ {"(Dir).Open", Method, 0, ""},
+ {"(HandlerFunc).ServeHTTP", Method, 0, ""},
+ {"(Header).Add", Method, 0, ""},
+ {"(Header).Clone", Method, 13, ""},
+ {"(Header).Del", Method, 0, ""},
+ {"(Header).Get", Method, 0, ""},
+ {"(Header).Set", Method, 0, ""},
+ {"(Header).Values", Method, 14, ""},
+ {"(Header).Write", Method, 0, ""},
+ {"(Header).WriteSubset", Method, 0, ""},
+ {"(Protocols).HTTP1", Method, 24, ""},
+ {"(Protocols).HTTP2", Method, 24, ""},
+ {"(Protocols).String", Method, 24, ""},
+ {"(Protocols).UnencryptedHTTP2", Method, 24, ""},
+ {"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"},
+ {"CanonicalHeaderKey", Func, 0, "func(s string) string"},
+ {"Client", Type, 0, ""},
+ {"Client.CheckRedirect", Field, 0, ""},
+ {"Client.Jar", Field, 0, ""},
+ {"Client.Timeout", Field, 3, ""},
+ {"Client.Transport", Field, 0, ""},
+ {"CloseNotifier", Type, 1, ""},
+ {"ConnState", Type, 3, ""},
+ {"Cookie", Type, 0, ""},
+ {"Cookie.Domain", Field, 0, ""},
+ {"Cookie.Expires", Field, 0, ""},
+ {"Cookie.HttpOnly", Field, 0, ""},
+ {"Cookie.MaxAge", Field, 0, ""},
+ {"Cookie.Name", Field, 0, ""},
+ {"Cookie.Partitioned", Field, 23, ""},
+ {"Cookie.Path", Field, 0, ""},
+ {"Cookie.Quoted", Field, 23, ""},
+ {"Cookie.Raw", Field, 0, ""},
+ {"Cookie.RawExpires", Field, 0, ""},
+ {"Cookie.SameSite", Field, 11, ""},
+ {"Cookie.Secure", Field, 0, ""},
+ {"Cookie.Unparsed", Field, 0, ""},
+ {"Cookie.Value", Field, 0, ""},
+ {"CookieJar", Type, 0, ""},
+ {"CrossOriginProtection", Type, 25, ""},
+ {"DefaultClient", Var, 0, ""},
+ {"DefaultMaxHeaderBytes", Const, 0, ""},
+ {"DefaultMaxIdleConnsPerHost", Const, 0, ""},
+ {"DefaultServeMux", Var, 0, ""},
+ {"DefaultTransport", Var, 0, ""},
+ {"DetectContentType", Func, 0, "func(data []byte) string"},
+ {"Dir", Type, 0, ""},
+ {"ErrAbortHandler", Var, 8, ""},
+ {"ErrBodyNotAllowed", Var, 0, ""},
+ {"ErrBodyReadAfterClose", Var, 0, ""},
+ {"ErrContentLength", Var, 0, ""},
+ {"ErrHandlerTimeout", Var, 0, ""},
+ {"ErrHeaderTooLong", Var, 0, ""},
+ {"ErrHijacked", Var, 0, ""},
+ {"ErrLineTooLong", Var, 0, ""},
+ {"ErrMissingBoundary", Var, 0, ""},
+ {"ErrMissingContentLength", Var, 0, ""},
+ {"ErrMissingFile", Var, 0, ""},
+ {"ErrNoCookie", Var, 0, ""},
+ {"ErrNoLocation", Var, 0, ""},
+ {"ErrNotMultipart", Var, 0, ""},
+ {"ErrNotSupported", Var, 0, ""},
+ {"ErrSchemeMismatch", Var, 21, ""},
+ {"ErrServerClosed", Var, 8, ""},
+ {"ErrShortBody", Var, 0, ""},
+ {"ErrSkipAltProtocol", Var, 6, ""},
+ {"ErrUnexpectedTrailer", Var, 0, ""},
+ {"ErrUseLastResponse", Var, 7, ""},
+ {"ErrWriteAfterFlush", Var, 0, ""},
+ {"Error", Func, 0, "func(w ResponseWriter, error string, code int)"},
+ {"FS", Func, 16, "func(fsys fs.FS) FileSystem"},
+ {"File", Type, 0, ""},
+ {"FileServer", Func, 0, "func(root FileSystem) Handler"},
+ {"FileServerFS", Func, 22, "func(root fs.FS) Handler"},
+ {"FileSystem", Type, 0, ""},
+ {"Flusher", Type, 0, ""},
+ {"Get", Func, 0, "func(url string) (resp *Response, err error)"},
+ {"HTTP2Config", Type, 24, ""},
+ {"HTTP2Config.CountError", Field, 24, ""},
+ {"HTTP2Config.MaxConcurrentStreams", Field, 24, ""},
+ {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24, ""},
+ {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24, ""},
+ {"HTTP2Config.MaxReadFrameSize", Field, 24, ""},
+ {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24, ""},
+ {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24, ""},
+ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24, ""},
+ {"HTTP2Config.PingTimeout", Field, 24, ""},
+ {"HTTP2Config.SendPingTimeout", Field, 24, ""},
+ {"HTTP2Config.WriteByteTimeout", Field, 24, ""},
+ {"Handle", Func, 0, "func(pattern string, handler Handler)"},
+ {"HandleFunc", Func, 0, "func(pattern string, handler func(ResponseWriter, *Request))"},
+ {"Handler", Type, 0, ""},
+ {"HandlerFunc", Type, 0, ""},
+ {"Head", Func, 0, "func(url string) (resp *Response, err error)"},
+ {"Header", Type, 0, ""},
+ {"Hijacker", Type, 0, ""},
+ {"ListenAndServe", Func, 0, "func(addr string, handler Handler) error"},
+ {"ListenAndServeTLS", Func, 0, "func(addr string, certFile string, keyFile string, handler Handler) error"},
+ {"LocalAddrContextKey", Var, 7, ""},
+ {"MaxBytesError", Type, 19, ""},
+ {"MaxBytesError.Limit", Field, 19, ""},
+ {"MaxBytesHandler", Func, 18, "func(h Handler, n int64) Handler"},
+ {"MaxBytesReader", Func, 0, "func(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser"},
+ {"MethodConnect", Const, 6, ""},
+ {"MethodDelete", Const, 6, ""},
+ {"MethodGet", Const, 6, ""},
+ {"MethodHead", Const, 6, ""},
+ {"MethodOptions", Const, 6, ""},
+ {"MethodPatch", Const, 6, ""},
+ {"MethodPost", Const, 6, ""},
+ {"MethodPut", Const, 6, ""},
+ {"MethodTrace", Const, 6, ""},
+ {"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"},
+ {"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"},
+ {"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"},
+ {"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"},
+ {"NewRequestWithContext", Func, 13, "func(ctx context.Context, method string, url string, body io.Reader) (*Request, error)"},
+ {"NewResponseController", Func, 20, "func(rw ResponseWriter) *ResponseController"},
+ {"NewServeMux", Func, 0, "func() *ServeMux"},
+ {"NoBody", Var, 8, ""},
+ {"NotFound", Func, 0, "func(w ResponseWriter, r *Request)"},
+ {"NotFoundHandler", Func, 0, "func() Handler"},
+ {"ParseCookie", Func, 23, "func(line string) ([]*Cookie, error)"},
+ {"ParseHTTPVersion", Func, 0, "func(vers string) (major int, minor int, ok bool)"},
+ {"ParseSetCookie", Func, 23, "func(line string) (*Cookie, error)"},
+ {"ParseTime", Func, 1, "func(text string) (t time.Time, err error)"},
+ {"Post", Func, 0, "func(url string, contentType string, body io.Reader) (resp *Response, err error)"},
+ {"PostForm", Func, 0, "func(url string, data url.Values) (resp *Response, err error)"},
+ {"ProtocolError", Type, 0, ""},
+ {"ProtocolError.ErrorString", Field, 0, ""},
+ {"Protocols", Type, 24, ""},
+ {"ProxyFromEnvironment", Func, 0, "func(req *Request) (*url.URL, error)"},
+ {"ProxyURL", Func, 0, "func(fixedURL *url.URL) func(*Request) (*url.URL, error)"},
+ {"PushOptions", Type, 8, ""},
+ {"PushOptions.Header", Field, 8, ""},
+ {"PushOptions.Method", Field, 8, ""},
+ {"Pusher", Type, 8, ""},
+ {"ReadRequest", Func, 0, "func(b *bufio.Reader) (*Request, error)"},
+ {"ReadResponse", Func, 0, "func(r *bufio.Reader, req *Request) (*Response, error)"},
+ {"Redirect", Func, 0, "func(w ResponseWriter, r *Request, url string, code int)"},
+ {"RedirectHandler", Func, 0, "func(url string, code int) Handler"},
+ {"Request", Type, 0, ""},
+ {"Request.Body", Field, 0, ""},
+ {"Request.Cancel", Field, 5, ""},
+ {"Request.Close", Field, 0, ""},
+ {"Request.ContentLength", Field, 0, ""},
+ {"Request.Form", Field, 0, ""},
+ {"Request.GetBody", Field, 8, ""},
+ {"Request.Header", Field, 0, ""},
+ {"Request.Host", Field, 0, ""},
+ {"Request.Method", Field, 0, ""},
+ {"Request.MultipartForm", Field, 0, ""},
+ {"Request.Pattern", Field, 23, ""},
+ {"Request.PostForm", Field, 1, ""},
+ {"Request.Proto", Field, 0, ""},
+ {"Request.ProtoMajor", Field, 0, ""},
+ {"Request.ProtoMinor", Field, 0, ""},
+ {"Request.RemoteAddr", Field, 0, ""},
+ {"Request.RequestURI", Field, 0, ""},
+ {"Request.Response", Field, 7, ""},
+ {"Request.TLS", Field, 0, ""},
+ {"Request.Trailer", Field, 0, ""},
+ {"Request.TransferEncoding", Field, 0, ""},
+ {"Request.URL", Field, 0, ""},
+ {"Response", Type, 0, ""},
+ {"Response.Body", Field, 0, ""},
+ {"Response.Close", Field, 0, ""},
+ {"Response.ContentLength", Field, 0, ""},
+ {"Response.Header", Field, 0, ""},
+ {"Response.Proto", Field, 0, ""},
+ {"Response.ProtoMajor", Field, 0, ""},
+ {"Response.ProtoMinor", Field, 0, ""},
+ {"Response.Request", Field, 0, ""},
+ {"Response.Status", Field, 0, ""},
+ {"Response.StatusCode", Field, 0, ""},
+ {"Response.TLS", Field, 3, ""},
+ {"Response.Trailer", Field, 0, ""},
+ {"Response.TransferEncoding", Field, 0, ""},
+ {"Response.Uncompressed", Field, 7, ""},
+ {"ResponseController", Type, 20, ""},
+ {"ResponseWriter", Type, 0, ""},
+ {"RoundTripper", Type, 0, ""},
+ {"SameSite", Type, 11, ""},
+ {"SameSiteDefaultMode", Const, 11, ""},
+ {"SameSiteLaxMode", Const, 11, ""},
+ {"SameSiteNoneMode", Const, 13, ""},
+ {"SameSiteStrictMode", Const, 11, ""},
+ {"Serve", Func, 0, "func(l net.Listener, handler Handler) error"},
+ {"ServeContent", Func, 0, "func(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker)"},
+ {"ServeFile", Func, 0, "func(w ResponseWriter, r *Request, name string)"},
+ {"ServeFileFS", Func, 22, "func(w ResponseWriter, r *Request, fsys fs.FS, name string)"},
+ {"ServeMux", Type, 0, ""},
+ {"ServeTLS", Func, 9, "func(l net.Listener, handler Handler, certFile string, keyFile string) error"},
+ {"Server", Type, 0, ""},
+ {"Server.Addr", Field, 0, ""},
+ {"Server.BaseContext", Field, 13, ""},
+ {"Server.ConnContext", Field, 13, ""},
+ {"Server.ConnState", Field, 3, ""},
+ {"Server.DisableGeneralOptionsHandler", Field, 20, ""},
+ {"Server.ErrorLog", Field, 3, ""},
+ {"Server.HTTP2", Field, 24, ""},
+ {"Server.Handler", Field, 0, ""},
+ {"Server.IdleTimeout", Field, 8, ""},
+ {"Server.MaxHeaderBytes", Field, 0, ""},
+ {"Server.Protocols", Field, 24, ""},
+ {"Server.ReadHeaderTimeout", Field, 8, ""},
+ {"Server.ReadTimeout", Field, 0, ""},
+ {"Server.TLSConfig", Field, 0, ""},
+ {"Server.TLSNextProto", Field, 1, ""},
+ {"Server.WriteTimeout", Field, 0, ""},
+ {"ServerContextKey", Var, 7, ""},
+ {"SetCookie", Func, 0, "func(w ResponseWriter, cookie *Cookie)"},
+ {"StateActive", Const, 3, ""},
+ {"StateClosed", Const, 3, ""},
+ {"StateHijacked", Const, 3, ""},
+ {"StateIdle", Const, 3, ""},
+ {"StateNew", Const, 3, ""},
+ {"StatusAccepted", Const, 0, ""},
+ {"StatusAlreadyReported", Const, 7, ""},
+ {"StatusBadGateway", Const, 0, ""},
+ {"StatusBadRequest", Const, 0, ""},
+ {"StatusConflict", Const, 0, ""},
+ {"StatusContinue", Const, 0, ""},
+ {"StatusCreated", Const, 0, ""},
+ {"StatusEarlyHints", Const, 13, ""},
+ {"StatusExpectationFailed", Const, 0, ""},
+ {"StatusFailedDependency", Const, 7, ""},
+ {"StatusForbidden", Const, 0, ""},
+ {"StatusFound", Const, 0, ""},
+ {"StatusGatewayTimeout", Const, 0, ""},
+ {"StatusGone", Const, 0, ""},
+ {"StatusHTTPVersionNotSupported", Const, 0, ""},
+ {"StatusIMUsed", Const, 7, ""},
+ {"StatusInsufficientStorage", Const, 7, ""},
+ {"StatusInternalServerError", Const, 0, ""},
+ {"StatusLengthRequired", Const, 0, ""},
+ {"StatusLocked", Const, 7, ""},
+ {"StatusLoopDetected", Const, 7, ""},
+ {"StatusMethodNotAllowed", Const, 0, ""},
+ {"StatusMisdirectedRequest", Const, 11, ""},
+ {"StatusMovedPermanently", Const, 0, ""},
+ {"StatusMultiStatus", Const, 7, ""},
+ {"StatusMultipleChoices", Const, 0, ""},
+ {"StatusNetworkAuthenticationRequired", Const, 6, ""},
+ {"StatusNoContent", Const, 0, ""},
+ {"StatusNonAuthoritativeInfo", Const, 0, ""},
+ {"StatusNotAcceptable", Const, 0, ""},
+ {"StatusNotExtended", Const, 7, ""},
+ {"StatusNotFound", Const, 0, ""},
+ {"StatusNotImplemented", Const, 0, ""},
+ {"StatusNotModified", Const, 0, ""},
+ {"StatusOK", Const, 0, ""},
+ {"StatusPartialContent", Const, 0, ""},
+ {"StatusPaymentRequired", Const, 0, ""},
+ {"StatusPermanentRedirect", Const, 7, ""},
+ {"StatusPreconditionFailed", Const, 0, ""},
+ {"StatusPreconditionRequired", Const, 6, ""},
+ {"StatusProcessing", Const, 7, ""},
+ {"StatusProxyAuthRequired", Const, 0, ""},
+ {"StatusRequestEntityTooLarge", Const, 0, ""},
+ {"StatusRequestHeaderFieldsTooLarge", Const, 6, ""},
+ {"StatusRequestTimeout", Const, 0, ""},
+ {"StatusRequestURITooLong", Const, 0, ""},
+ {"StatusRequestedRangeNotSatisfiable", Const, 0, ""},
+ {"StatusResetContent", Const, 0, ""},
+ {"StatusSeeOther", Const, 0, ""},
+ {"StatusServiceUnavailable", Const, 0, ""},
+ {"StatusSwitchingProtocols", Const, 0, ""},
+ {"StatusTeapot", Const, 0, ""},
+ {"StatusTemporaryRedirect", Const, 0, ""},
+ {"StatusText", Func, 0, "func(code int) string"},
+ {"StatusTooEarly", Const, 12, ""},
+ {"StatusTooManyRequests", Const, 6, ""},
+ {"StatusUnauthorized", Const, 0, ""},
+ {"StatusUnavailableForLegalReasons", Const, 6, ""},
+ {"StatusUnprocessableEntity", Const, 7, ""},
+ {"StatusUnsupportedMediaType", Const, 0, ""},
+ {"StatusUpgradeRequired", Const, 7, ""},
+ {"StatusUseProxy", Const, 0, ""},
+ {"StatusVariantAlsoNegotiates", Const, 7, ""},
+ {"StripPrefix", Func, 0, "func(prefix string, h Handler) Handler"},
+ {"TimeFormat", Const, 0, ""},
+ {"TimeoutHandler", Func, 0, "func(h Handler, dt time.Duration, msg string) Handler"},
+ {"TrailerPrefix", Const, 8, ""},
+ {"Transport", Type, 0, ""},
+ {"Transport.Dial", Field, 0, ""},
+ {"Transport.DialContext", Field, 7, ""},
+ {"Transport.DialTLS", Field, 4, ""},
+ {"Transport.DialTLSContext", Field, 14, ""},
+ {"Transport.DisableCompression", Field, 0, ""},
+ {"Transport.DisableKeepAlives", Field, 0, ""},
+ {"Transport.ExpectContinueTimeout", Field, 6, ""},
+ {"Transport.ForceAttemptHTTP2", Field, 13, ""},
+ {"Transport.GetProxyConnectHeader", Field, 16, ""},
+ {"Transport.HTTP2", Field, 24, ""},
+ {"Transport.IdleConnTimeout", Field, 7, ""},
+ {"Transport.MaxConnsPerHost", Field, 11, ""},
+ {"Transport.MaxIdleConns", Field, 7, ""},
+ {"Transport.MaxIdleConnsPerHost", Field, 0, ""},
+ {"Transport.MaxResponseHeaderBytes", Field, 7, ""},
+ {"Transport.OnProxyConnectResponse", Field, 20, ""},
+ {"Transport.Protocols", Field, 24, ""},
+ {"Transport.Proxy", Field, 0, ""},
+ {"Transport.ProxyConnectHeader", Field, 8, ""},
+ {"Transport.ReadBufferSize", Field, 13, ""},
+ {"Transport.ResponseHeaderTimeout", Field, 1, ""},
+ {"Transport.TLSClientConfig", Field, 0, ""},
+ {"Transport.TLSHandshakeTimeout", Field, 3, ""},
+ {"Transport.TLSNextProto", Field, 6, ""},
+ {"Transport.WriteBufferSize", Field, 13, ""},
+ },
+ "net/http/cgi": {
+ {"(*Handler).ServeHTTP", Method, 0, ""},
+ {"Handler", Type, 0, ""},
+ {"Handler.Args", Field, 0, ""},
+ {"Handler.Dir", Field, 0, ""},
+ {"Handler.Env", Field, 0, ""},
+ {"Handler.InheritEnv", Field, 0, ""},
+ {"Handler.Logger", Field, 0, ""},
+ {"Handler.Path", Field, 0, ""},
+ {"Handler.PathLocationHandler", Field, 0, ""},
+ {"Handler.Root", Field, 0, ""},
+ {"Handler.Stderr", Field, 7, ""},
+ {"Request", Func, 0, "func() (*http.Request, error)"},
+ {"RequestFromMap", Func, 0, "func(params map[string]string) (*http.Request, error)"},
+ {"Serve", Func, 0, "func(handler http.Handler) error"},
+ },
+ "net/http/cookiejar": {
+ {"(*Jar).Cookies", Method, 1, ""},
+ {"(*Jar).SetCookies", Method, 1, ""},
+ {"Jar", Type, 1, ""},
+ {"New", Func, 1, "func(o *Options) (*Jar, error)"},
+ {"Options", Type, 1, ""},
+ {"Options.PublicSuffixList", Field, 1, ""},
+ {"PublicSuffixList", Type, 1, ""},
+ },
+ "net/http/fcgi": {
+ {"ErrConnClosed", Var, 5, ""},
+ {"ErrRequestAborted", Var, 5, ""},
+ {"ProcessEnv", Func, 9, "func(r *http.Request) map[string]string"},
+ {"Serve", Func, 0, "func(l net.Listener, handler http.Handler) error"},
+ },
+ "net/http/httptest": {
+ {"(*ResponseRecorder).Flush", Method, 0, ""},
+ {"(*ResponseRecorder).Header", Method, 0, ""},
+ {"(*ResponseRecorder).Result", Method, 7, ""},
+ {"(*ResponseRecorder).Write", Method, 0, ""},
+ {"(*ResponseRecorder).WriteHeader", Method, 0, ""},
+ {"(*ResponseRecorder).WriteString", Method, 6, ""},
+ {"(*Server).Certificate", Method, 9, ""},
+ {"(*Server).Client", Method, 9, ""},
+ {"(*Server).Close", Method, 0, ""},
+ {"(*Server).CloseClientConnections", Method, 0, ""},
+ {"(*Server).Start", Method, 0, ""},
+ {"(*Server).StartTLS", Method, 0, ""},
+ {"DefaultRemoteAddr", Const, 0, ""},
+ {"NewRecorder", Func, 0, "func() *ResponseRecorder"},
+ {"NewRequest", Func, 7, "func(method string, target string, body io.Reader) *http.Request"},
+ {"NewRequestWithContext", Func, 23, "func(ctx context.Context, method string, target string, body io.Reader) *http.Request"},
+ {"NewServer", Func, 0, "func(handler http.Handler) *Server"},
+ {"NewTLSServer", Func, 0, "func(handler http.Handler) *Server"},
+ {"NewUnstartedServer", Func, 0, "func(handler http.Handler) *Server"},
+ {"ResponseRecorder", Type, 0, ""},
+ {"ResponseRecorder.Body", Field, 0, ""},
+ {"ResponseRecorder.Code", Field, 0, ""},
+ {"ResponseRecorder.Flushed", Field, 0, ""},
+ {"ResponseRecorder.HeaderMap", Field, 0, ""},
+ {"Server", Type, 0, ""},
+ {"Server.Config", Field, 0, ""},
+ {"Server.EnableHTTP2", Field, 14, ""},
+ {"Server.Listener", Field, 0, ""},
+ {"Server.TLS", Field, 0, ""},
+ {"Server.URL", Field, 0, ""},
+ },
+ "net/http/httptrace": {
+ {"ClientTrace", Type, 7, ""},
+ {"ClientTrace.ConnectDone", Field, 7, ""},
+ {"ClientTrace.ConnectStart", Field, 7, ""},
+ {"ClientTrace.DNSDone", Field, 7, ""},
+ {"ClientTrace.DNSStart", Field, 7, ""},
+ {"ClientTrace.GetConn", Field, 7, ""},
+ {"ClientTrace.Got100Continue", Field, 7, ""},
+ {"ClientTrace.Got1xxResponse", Field, 11, ""},
+ {"ClientTrace.GotConn", Field, 7, ""},
+ {"ClientTrace.GotFirstResponseByte", Field, 7, ""},
+ {"ClientTrace.PutIdleConn", Field, 7, ""},
+ {"ClientTrace.TLSHandshakeDone", Field, 8, ""},
+ {"ClientTrace.TLSHandshakeStart", Field, 8, ""},
+ {"ClientTrace.Wait100Continue", Field, 7, ""},
+ {"ClientTrace.WroteHeaderField", Field, 11, ""},
+ {"ClientTrace.WroteHeaders", Field, 7, ""},
+ {"ClientTrace.WroteRequest", Field, 7, ""},
+ {"ContextClientTrace", Func, 7, "func(ctx context.Context) *ClientTrace"},
+ {"DNSDoneInfo", Type, 7, ""},
+ {"DNSDoneInfo.Addrs", Field, 7, ""},
+ {"DNSDoneInfo.Coalesced", Field, 7, ""},
+ {"DNSDoneInfo.Err", Field, 7, ""},
+ {"DNSStartInfo", Type, 7, ""},
+ {"DNSStartInfo.Host", Field, 7, ""},
+ {"GotConnInfo", Type, 7, ""},
+ {"GotConnInfo.Conn", Field, 7, ""},
+ {"GotConnInfo.IdleTime", Field, 7, ""},
+ {"GotConnInfo.Reused", Field, 7, ""},
+ {"GotConnInfo.WasIdle", Field, 7, ""},
+ {"WithClientTrace", Func, 7, "func(ctx context.Context, trace *ClientTrace) context.Context"},
+ {"WroteRequestInfo", Type, 7, ""},
+ {"WroteRequestInfo.Err", Field, 7, ""},
+ },
+ "net/http/httputil": {
+ {"(*ClientConn).Close", Method, 0, ""},
+ {"(*ClientConn).Do", Method, 0, ""},
+ {"(*ClientConn).Hijack", Method, 0, ""},
+ {"(*ClientConn).Pending", Method, 0, ""},
+ {"(*ClientConn).Read", Method, 0, ""},
+ {"(*ClientConn).Write", Method, 0, ""},
+ {"(*ProxyRequest).SetURL", Method, 20, ""},
+ {"(*ProxyRequest).SetXForwarded", Method, 20, ""},
+ {"(*ReverseProxy).ServeHTTP", Method, 0, ""},
+ {"(*ServerConn).Close", Method, 0, ""},
+ {"(*ServerConn).Hijack", Method, 0, ""},
+ {"(*ServerConn).Pending", Method, 0, ""},
+ {"(*ServerConn).Read", Method, 0, ""},
+ {"(*ServerConn).Write", Method, 0, ""},
+ {"BufferPool", Type, 6, ""},
+ {"ClientConn", Type, 0, ""},
+ {"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
+ {"DumpRequestOut", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"},
+ {"DumpResponse", Func, 0, "func(resp *http.Response, body bool) ([]byte, error)"},
+ {"ErrClosed", Var, 0, ""},
+ {"ErrLineTooLong", Var, 0, ""},
+ {"ErrPersistEOF", Var, 0, ""},
+ {"ErrPipeline", Var, 0, ""},
+ {"NewChunkedReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"NewChunkedWriter", Func, 0, "func(w io.Writer) io.WriteCloser"},
+ {"NewClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
+ {"NewProxyClientConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ClientConn"},
+ {"NewServerConn", Func, 0, "func(c net.Conn, r *bufio.Reader) *ServerConn"},
+ {"NewSingleHostReverseProxy", Func, 0, "func(target *url.URL) *ReverseProxy"},
+ {"ProxyRequest", Type, 20, ""},
+ {"ProxyRequest.In", Field, 20, ""},
+ {"ProxyRequest.Out", Field, 20, ""},
+ {"ReverseProxy", Type, 0, ""},
+ {"ReverseProxy.BufferPool", Field, 6, ""},
+ {"ReverseProxy.Director", Field, 0, ""},
+ {"ReverseProxy.ErrorHandler", Field, 11, ""},
+ {"ReverseProxy.ErrorLog", Field, 4, ""},
+ {"ReverseProxy.FlushInterval", Field, 0, ""},
+ {"ReverseProxy.ModifyResponse", Field, 8, ""},
+ {"ReverseProxy.Rewrite", Field, 20, ""},
+ {"ReverseProxy.Transport", Field, 0, ""},
+ {"ServerConn", Type, 0, ""},
+ },
+ "net/http/pprof": {
+ {"Cmdline", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Handler", Func, 0, "func(name string) http.Handler"},
+ {"Index", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Profile", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Symbol", Func, 0, "func(w http.ResponseWriter, r *http.Request)"},
+ {"Trace", Func, 5, "func(w http.ResponseWriter, r *http.Request)"},
+ },
+ "net/mail": {
+ {"(*Address).String", Method, 0, ""},
+ {"(*AddressParser).Parse", Method, 5, ""},
+ {"(*AddressParser).ParseList", Method, 5, ""},
+ {"(Header).AddressList", Method, 0, ""},
+ {"(Header).Date", Method, 0, ""},
+ {"(Header).Get", Method, 0, ""},
+ {"Address", Type, 0, ""},
+ {"Address.Address", Field, 0, ""},
+ {"Address.Name", Field, 0, ""},
+ {"AddressParser", Type, 5, ""},
+ {"AddressParser.WordDecoder", Field, 5, ""},
+ {"ErrHeaderNotPresent", Var, 0, ""},
+ {"Header", Type, 0, ""},
+ {"Message", Type, 0, ""},
+ {"Message.Body", Field, 0, ""},
+ {"Message.Header", Field, 0, ""},
+ {"ParseAddress", Func, 1, "func(address string) (*Address, error)"},
+ {"ParseAddressList", Func, 1, "func(list string) ([]*Address, error)"},
+ {"ParseDate", Func, 8, "func(date string) (time.Time, error)"},
+ {"ReadMessage", Func, 0, "func(r io.Reader) (msg *Message, err error)"},
+ },
+ "net/netip": {
+ {"(*Addr).UnmarshalBinary", Method, 18, ""},
+ {"(*Addr).UnmarshalText", Method, 18, ""},
+ {"(*AddrPort).UnmarshalBinary", Method, 18, ""},
+ {"(*AddrPort).UnmarshalText", Method, 18, ""},
+ {"(*Prefix).UnmarshalBinary", Method, 18, ""},
+ {"(*Prefix).UnmarshalText", Method, 18, ""},
+ {"(Addr).AppendBinary", Method, 24, ""},
+ {"(Addr).AppendText", Method, 24, ""},
+ {"(Addr).AppendTo", Method, 18, ""},
+ {"(Addr).As16", Method, 18, ""},
+ {"(Addr).As4", Method, 18, ""},
+ {"(Addr).AsSlice", Method, 18, ""},
+ {"(Addr).BitLen", Method, 18, ""},
+ {"(Addr).Compare", Method, 18, ""},
+ {"(Addr).Is4", Method, 18, ""},
+ {"(Addr).Is4In6", Method, 18, ""},
+ {"(Addr).Is6", Method, 18, ""},
+ {"(Addr).IsGlobalUnicast", Method, 18, ""},
+ {"(Addr).IsInterfaceLocalMulticast", Method, 18, ""},
+ {"(Addr).IsLinkLocalMulticast", Method, 18, ""},
+ {"(Addr).IsLinkLocalUnicast", Method, 18, ""},
+ {"(Addr).IsLoopback", Method, 18, ""},
+ {"(Addr).IsMulticast", Method, 18, ""},
+ {"(Addr).IsPrivate", Method, 18, ""},
+ {"(Addr).IsUnspecified", Method, 18, ""},
+ {"(Addr).IsValid", Method, 18, ""},
+ {"(Addr).Less", Method, 18, ""},
+ {"(Addr).MarshalBinary", Method, 18, ""},
+ {"(Addr).MarshalText", Method, 18, ""},
+ {"(Addr).Next", Method, 18, ""},
+ {"(Addr).Prefix", Method, 18, ""},
+ {"(Addr).Prev", Method, 18, ""},
+ {"(Addr).String", Method, 18, ""},
+ {"(Addr).StringExpanded", Method, 18, ""},
+ {"(Addr).Unmap", Method, 18, ""},
+ {"(Addr).WithZone", Method, 18, ""},
+ {"(Addr).Zone", Method, 18, ""},
+ {"(AddrPort).Addr", Method, 18, ""},
+ {"(AddrPort).AppendBinary", Method, 24, ""},
+ {"(AddrPort).AppendText", Method, 24, ""},
+ {"(AddrPort).AppendTo", Method, 18, ""},
+ {"(AddrPort).Compare", Method, 22, ""},
+ {"(AddrPort).IsValid", Method, 18, ""},
+ {"(AddrPort).MarshalBinary", Method, 18, ""},
+ {"(AddrPort).MarshalText", Method, 18, ""},
+ {"(AddrPort).Port", Method, 18, ""},
+ {"(AddrPort).String", Method, 18, ""},
+ {"(Prefix).Addr", Method, 18, ""},
+ {"(Prefix).AppendBinary", Method, 24, ""},
+ {"(Prefix).AppendText", Method, 24, ""},
+ {"(Prefix).AppendTo", Method, 18, ""},
+ {"(Prefix).Bits", Method, 18, ""},
+ {"(Prefix).Contains", Method, 18, ""},
+ {"(Prefix).IsSingleIP", Method, 18, ""},
+ {"(Prefix).IsValid", Method, 18, ""},
+ {"(Prefix).MarshalBinary", Method, 18, ""},
+ {"(Prefix).MarshalText", Method, 18, ""},
+ {"(Prefix).Masked", Method, 18, ""},
+ {"(Prefix).Overlaps", Method, 18, ""},
+ {"(Prefix).String", Method, 18, ""},
+ {"Addr", Type, 18, ""},
+ {"AddrFrom16", Func, 18, "func(addr [16]byte) Addr"},
+ {"AddrFrom4", Func, 18, "func(addr [4]byte) Addr"},
+ {"AddrFromSlice", Func, 18, "func(slice []byte) (ip Addr, ok bool)"},
+ {"AddrPort", Type, 18, ""},
+ {"AddrPortFrom", Func, 18, "func(ip Addr, port uint16) AddrPort"},
+ {"IPv4Unspecified", Func, 18, "func() Addr"},
+ {"IPv6LinkLocalAllNodes", Func, 18, "func() Addr"},
+ {"IPv6LinkLocalAllRouters", Func, 20, "func() Addr"},
+ {"IPv6Loopback", Func, 20, "func() Addr"},
+ {"IPv6Unspecified", Func, 18, "func() Addr"},
+ {"MustParseAddr", Func, 18, "func(s string) Addr"},
+ {"MustParseAddrPort", Func, 18, "func(s string) AddrPort"},
+ {"MustParsePrefix", Func, 18, "func(s string) Prefix"},
+ {"ParseAddr", Func, 18, "func(s string) (Addr, error)"},
+ {"ParseAddrPort", Func, 18, "func(s string) (AddrPort, error)"},
+ {"ParsePrefix", Func, 18, "func(s string) (Prefix, error)"},
+ {"Prefix", Type, 18, ""},
+ {"PrefixFrom", Func, 18, "func(ip Addr, bits int) Prefix"},
+ },
+ "net/rpc": {
+ {"(*Client).Call", Method, 0, ""},
+ {"(*Client).Close", Method, 0, ""},
+ {"(*Client).Go", Method, 0, ""},
+ {"(*Server).Accept", Method, 0, ""},
+ {"(*Server).HandleHTTP", Method, 0, ""},
+ {"(*Server).Register", Method, 0, ""},
+ {"(*Server).RegisterName", Method, 0, ""},
+ {"(*Server).ServeCodec", Method, 0, ""},
+ {"(*Server).ServeConn", Method, 0, ""},
+ {"(*Server).ServeHTTP", Method, 0, ""},
+ {"(*Server).ServeRequest", Method, 0, ""},
+ {"(ServerError).Error", Method, 0, ""},
+ {"Accept", Func, 0, "func(lis net.Listener)"},
+ {"Call", Type, 0, ""},
+ {"Call.Args", Field, 0, ""},
+ {"Call.Done", Field, 0, ""},
+ {"Call.Error", Field, 0, ""},
+ {"Call.Reply", Field, 0, ""},
+ {"Call.ServiceMethod", Field, 0, ""},
+ {"Client", Type, 0, ""},
+ {"ClientCodec", Type, 0, ""},
+ {"DefaultDebugPath", Const, 0, ""},
+ {"DefaultRPCPath", Const, 0, ""},
+ {"DefaultServer", Var, 0, ""},
+ {"Dial", Func, 0, "func(network string, address string) (*Client, error)"},
+ {"DialHTTP", Func, 0, "func(network string, address string) (*Client, error)"},
+ {"DialHTTPPath", Func, 0, "func(network string, address string, path string) (*Client, error)"},
+ {"ErrShutdown", Var, 0, ""},
+ {"HandleHTTP", Func, 0, "func()"},
+ {"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *Client"},
+ {"NewClientWithCodec", Func, 0, "func(codec ClientCodec) *Client"},
+ {"NewServer", Func, 0, "func() *Server"},
+ {"Register", Func, 0, "func(rcvr any) error"},
+ {"RegisterName", Func, 0, "func(name string, rcvr any) error"},
+ {"Request", Type, 0, ""},
+ {"Request.Seq", Field, 0, ""},
+ {"Request.ServiceMethod", Field, 0, ""},
+ {"Response", Type, 0, ""},
+ {"Response.Error", Field, 0, ""},
+ {"Response.Seq", Field, 0, ""},
+ {"Response.ServiceMethod", Field, 0, ""},
+ {"ServeCodec", Func, 0, "func(codec ServerCodec)"},
+ {"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
+ {"ServeRequest", Func, 0, "func(codec ServerCodec) error"},
+ {"Server", Type, 0, ""},
+ {"ServerCodec", Type, 0, ""},
+ {"ServerError", Type, 0, ""},
+ },
+ "net/rpc/jsonrpc": {
+ {"Dial", Func, 0, "func(network string, address string) (*rpc.Client, error)"},
+ {"NewClient", Func, 0, "func(conn io.ReadWriteCloser) *rpc.Client"},
+ {"NewClientCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ClientCodec"},
+ {"NewServerCodec", Func, 0, "func(conn io.ReadWriteCloser) rpc.ServerCodec"},
+ {"ServeConn", Func, 0, "func(conn io.ReadWriteCloser)"},
+ },
+ "net/smtp": {
+ {"(*Client).Auth", Method, 0, ""},
+ {"(*Client).Close", Method, 2, ""},
+ {"(*Client).Data", Method, 0, ""},
+ {"(*Client).Extension", Method, 0, ""},
+ {"(*Client).Hello", Method, 1, ""},
+ {"(*Client).Mail", Method, 0, ""},
+ {"(*Client).Noop", Method, 10, ""},
+ {"(*Client).Quit", Method, 0, ""},
+ {"(*Client).Rcpt", Method, 0, ""},
+ {"(*Client).Reset", Method, 0, ""},
+ {"(*Client).StartTLS", Method, 0, ""},
+ {"(*Client).TLSConnectionState", Method, 5, ""},
+ {"(*Client).Verify", Method, 0, ""},
+ {"Auth", Type, 0, ""},
+ {"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"},
+ {"Client", Type, 0, ""},
+ {"Client.Text", Field, 0, ""},
+ {"Dial", Func, 0, "func(addr string) (*Client, error)"},
+ {"NewClient", Func, 0, "func(conn net.Conn, host string) (*Client, error)"},
+ {"PlainAuth", Func, 0, "func(identity string, username string, password string, host string) Auth"},
+ {"SendMail", Func, 0, "func(addr string, a Auth, from string, to []string, msg []byte) error"},
+ {"ServerInfo", Type, 0, ""},
+ {"ServerInfo.Auth", Field, 0, ""},
+ {"ServerInfo.Name", Field, 0, ""},
+ {"ServerInfo.TLS", Field, 0, ""},
+ },
+ "net/textproto": {
+ {"(*Conn).Close", Method, 0, ""},
+ {"(*Conn).Cmd", Method, 0, ""},
+ {"(*Conn).DotReader", Method, 0, ""},
+ {"(*Conn).DotWriter", Method, 0, ""},
+ {"(*Conn).EndRequest", Method, 0, ""},
+ {"(*Conn).EndResponse", Method, 0, ""},
+ {"(*Conn).Next", Method, 0, ""},
+ {"(*Conn).PrintfLine", Method, 0, ""},
+ {"(*Conn).ReadCodeLine", Method, 0, ""},
+ {"(*Conn).ReadContinuedLine", Method, 0, ""},
+ {"(*Conn).ReadContinuedLineBytes", Method, 0, ""},
+ {"(*Conn).ReadDotBytes", Method, 0, ""},
+ {"(*Conn).ReadDotLines", Method, 0, ""},
+ {"(*Conn).ReadLine", Method, 0, ""},
+ {"(*Conn).ReadLineBytes", Method, 0, ""},
+ {"(*Conn).ReadMIMEHeader", Method, 0, ""},
+ {"(*Conn).ReadResponse", Method, 0, ""},
+ {"(*Conn).StartRequest", Method, 0, ""},
+ {"(*Conn).StartResponse", Method, 0, ""},
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Pipeline).EndRequest", Method, 0, ""},
+ {"(*Pipeline).EndResponse", Method, 0, ""},
+ {"(*Pipeline).Next", Method, 0, ""},
+ {"(*Pipeline).StartRequest", Method, 0, ""},
+ {"(*Pipeline).StartResponse", Method, 0, ""},
+ {"(*Reader).DotReader", Method, 0, ""},
+ {"(*Reader).ReadCodeLine", Method, 0, ""},
+ {"(*Reader).ReadContinuedLine", Method, 0, ""},
+ {"(*Reader).ReadContinuedLineBytes", Method, 0, ""},
+ {"(*Reader).ReadDotBytes", Method, 0, ""},
+ {"(*Reader).ReadDotLines", Method, 0, ""},
+ {"(*Reader).ReadLine", Method, 0, ""},
+ {"(*Reader).ReadLineBytes", Method, 0, ""},
+ {"(*Reader).ReadMIMEHeader", Method, 0, ""},
+ {"(*Reader).ReadResponse", Method, 0, ""},
+ {"(*Writer).DotWriter", Method, 0, ""},
+ {"(*Writer).PrintfLine", Method, 0, ""},
+ {"(MIMEHeader).Add", Method, 0, ""},
+ {"(MIMEHeader).Del", Method, 0, ""},
+ {"(MIMEHeader).Get", Method, 0, ""},
+ {"(MIMEHeader).Set", Method, 0, ""},
+ {"(MIMEHeader).Values", Method, 14, ""},
+ {"(ProtocolError).Error", Method, 0, ""},
+ {"CanonicalMIMEHeaderKey", Func, 0, "func(s string) string"},
+ {"Conn", Type, 0, ""},
+ {"Conn.Pipeline", Field, 0, ""},
+ {"Conn.Reader", Field, 0, ""},
+ {"Conn.Writer", Field, 0, ""},
+ {"Dial", Func, 0, "func(network string, addr string) (*Conn, error)"},
+ {"Error", Type, 0, ""},
+ {"Error.Code", Field, 0, ""},
+ {"Error.Msg", Field, 0, ""},
+ {"MIMEHeader", Type, 0, ""},
+ {"NewConn", Func, 0, "func(conn io.ReadWriteCloser) *Conn"},
+ {"NewReader", Func, 0, "func(r *bufio.Reader) *Reader"},
+ {"NewWriter", Func, 0, "func(w *bufio.Writer) *Writer"},
+ {"Pipeline", Type, 0, ""},
+ {"ProtocolError", Type, 0, ""},
+ {"Reader", Type, 0, ""},
+ {"Reader.R", Field, 0, ""},
+ {"TrimBytes", Func, 1, "func(b []byte) []byte"},
+ {"TrimString", Func, 1, "func(s string) string"},
+ {"Writer", Type, 0, ""},
+ {"Writer.W", Field, 0, ""},
+ },
+ "net/url": {
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Error).Temporary", Method, 6, ""},
+ {"(*Error).Timeout", Method, 6, ""},
+ {"(*Error).Unwrap", Method, 13, ""},
+ {"(*URL).AppendBinary", Method, 24, ""},
+ {"(*URL).EscapedFragment", Method, 15, ""},
+ {"(*URL).EscapedPath", Method, 5, ""},
+ {"(*URL).Hostname", Method, 8, ""},
+ {"(*URL).IsAbs", Method, 0, ""},
+ {"(*URL).JoinPath", Method, 19, ""},
+ {"(*URL).MarshalBinary", Method, 8, ""},
+ {"(*URL).Parse", Method, 0, ""},
+ {"(*URL).Port", Method, 8, ""},
+ {"(*URL).Query", Method, 0, ""},
+ {"(*URL).Redacted", Method, 15, ""},
+ {"(*URL).RequestURI", Method, 0, ""},
+ {"(*URL).ResolveReference", Method, 0, ""},
+ {"(*URL).String", Method, 0, ""},
+ {"(*URL).UnmarshalBinary", Method, 8, ""},
+ {"(*Userinfo).Password", Method, 0, ""},
+ {"(*Userinfo).String", Method, 0, ""},
+ {"(*Userinfo).Username", Method, 0, ""},
+ {"(EscapeError).Error", Method, 0, ""},
+ {"(InvalidHostError).Error", Method, 6, ""},
+ {"(Values).Add", Method, 0, ""},
+ {"(Values).Del", Method, 0, ""},
+ {"(Values).Encode", Method, 0, ""},
+ {"(Values).Get", Method, 0, ""},
+ {"(Values).Has", Method, 17, ""},
+ {"(Values).Set", Method, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Err", Field, 0, ""},
+ {"Error.Op", Field, 0, ""},
+ {"Error.URL", Field, 0, ""},
+ {"EscapeError", Type, 0, ""},
+ {"InvalidHostError", Type, 6, ""},
+ {"JoinPath", Func, 19, "func(base string, elem ...string) (result string, err error)"},
+ {"Parse", Func, 0, "func(rawURL string) (*URL, error)"},
+ {"ParseQuery", Func, 0, "func(query string) (Values, error)"},
+ {"ParseRequestURI", Func, 0, "func(rawURL string) (*URL, error)"},
+ {"PathEscape", Func, 8, "func(s string) string"},
+ {"PathUnescape", Func, 8, "func(s string) (string, error)"},
+ {"QueryEscape", Func, 0, "func(s string) string"},
+ {"QueryUnescape", Func, 0, "func(s string) (string, error)"},
+ {"URL", Type, 0, ""},
+ {"URL.ForceQuery", Field, 7, ""},
+ {"URL.Fragment", Field, 0, ""},
+ {"URL.Host", Field, 0, ""},
+ {"URL.OmitHost", Field, 19, ""},
+ {"URL.Opaque", Field, 0, ""},
+ {"URL.Path", Field, 0, ""},
+ {"URL.RawFragment", Field, 15, ""},
+ {"URL.RawPath", Field, 5, ""},
+ {"URL.RawQuery", Field, 0, ""},
+ {"URL.Scheme", Field, 0, ""},
+ {"URL.User", Field, 0, ""},
+ {"User", Func, 0, "func(username string) *Userinfo"},
+ {"UserPassword", Func, 0, "func(username string, password string) *Userinfo"},
+ {"Userinfo", Type, 0, ""},
+ {"Values", Type, 0, ""},
+ },
+ "os": {
+ {"(*File).Chdir", Method, 0, ""},
+ {"(*File).Chmod", Method, 0, ""},
+ {"(*File).Chown", Method, 0, ""},
+ {"(*File).Close", Method, 0, ""},
+ {"(*File).Fd", Method, 0, ""},
+ {"(*File).Name", Method, 0, ""},
+ {"(*File).Read", Method, 0, ""},
+ {"(*File).ReadAt", Method, 0, ""},
+ {"(*File).ReadDir", Method, 16, ""},
+ {"(*File).ReadFrom", Method, 15, ""},
+ {"(*File).Readdir", Method, 0, ""},
+ {"(*File).Readdirnames", Method, 0, ""},
+ {"(*File).Seek", Method, 0, ""},
+ {"(*File).SetDeadline", Method, 10, ""},
+ {"(*File).SetReadDeadline", Method, 10, ""},
+ {"(*File).SetWriteDeadline", Method, 10, ""},
+ {"(*File).Stat", Method, 0, ""},
+ {"(*File).Sync", Method, 0, ""},
+ {"(*File).SyscallConn", Method, 12, ""},
+ {"(*File).Truncate", Method, 0, ""},
+ {"(*File).Write", Method, 0, ""},
+ {"(*File).WriteAt", Method, 0, ""},
+ {"(*File).WriteString", Method, 0, ""},
+ {"(*File).WriteTo", Method, 22, ""},
+ {"(*LinkError).Error", Method, 0, ""},
+ {"(*LinkError).Unwrap", Method, 13, ""},
+ {"(*PathError).Error", Method, 0, ""},
+ {"(*PathError).Timeout", Method, 10, ""},
+ {"(*PathError).Unwrap", Method, 13, ""},
+ {"(*Process).Kill", Method, 0, ""},
+ {"(*Process).Release", Method, 0, ""},
+ {"(*Process).Signal", Method, 0, ""},
+ {"(*Process).Wait", Method, 0, ""},
+ {"(*ProcessState).ExitCode", Method, 12, ""},
+ {"(*ProcessState).Exited", Method, 0, ""},
+ {"(*ProcessState).Pid", Method, 0, ""},
+ {"(*ProcessState).String", Method, 0, ""},
+ {"(*ProcessState).Success", Method, 0, ""},
+ {"(*ProcessState).Sys", Method, 0, ""},
+ {"(*ProcessState).SysUsage", Method, 0, ""},
+ {"(*ProcessState).SystemTime", Method, 0, ""},
+ {"(*ProcessState).UserTime", Method, 0, ""},
+ {"(*Root).Chmod", Method, 25, ""},
+ {"(*Root).Chown", Method, 25, ""},
+ {"(*Root).Chtimes", Method, 25, ""},
+ {"(*Root).Close", Method, 24, ""},
+ {"(*Root).Create", Method, 24, ""},
+ {"(*Root).FS", Method, 24, ""},
+ {"(*Root).Lchown", Method, 25, ""},
+ {"(*Root).Link", Method, 25, ""},
+ {"(*Root).Lstat", Method, 24, ""},
+ {"(*Root).Mkdir", Method, 24, ""},
+ {"(*Root).MkdirAll", Method, 25, ""},
+ {"(*Root).Name", Method, 24, ""},
+ {"(*Root).Open", Method, 24, ""},
+ {"(*Root).OpenFile", Method, 24, ""},
+ {"(*Root).OpenRoot", Method, 24, ""},
+ {"(*Root).ReadFile", Method, 25, ""},
+ {"(*Root).Readlink", Method, 25, ""},
+ {"(*Root).Remove", Method, 24, ""},
+ {"(*Root).RemoveAll", Method, 25, ""},
+ {"(*Root).Rename", Method, 25, ""},
+ {"(*Root).Stat", Method, 24, ""},
+ {"(*Root).Symlink", Method, 25, ""},
+ {"(*Root).WriteFile", Method, 25, ""},
+ {"(*SyscallError).Error", Method, 0, ""},
+ {"(*SyscallError).Timeout", Method, 10, ""},
+ {"(*SyscallError).Unwrap", Method, 13, ""},
+ {"(FileMode).IsDir", Method, 0, ""},
+ {"(FileMode).IsRegular", Method, 1, ""},
+ {"(FileMode).Perm", Method, 0, ""},
+ {"(FileMode).String", Method, 0, ""},
+ {"Args", Var, 0, ""},
+ {"Chdir", Func, 0, "func(dir string) error"},
+ {"Chmod", Func, 0, "func(name string, mode FileMode) error"},
+ {"Chown", Func, 0, "func(name string, uid int, gid int) error"},
+ {"Chtimes", Func, 0, "func(name string, atime time.Time, mtime time.Time) error"},
+ {"Clearenv", Func, 0, "func()"},
+ {"CopyFS", Func, 23, "func(dir string, fsys fs.FS) error"},
+ {"Create", Func, 0, "func(name string) (*File, error)"},
+ {"CreateTemp", Func, 16, "func(dir string, pattern string) (*File, error)"},
+ {"DevNull", Const, 0, ""},
+ {"DirEntry", Type, 16, ""},
+ {"DirFS", Func, 16, "func(dir string) fs.FS"},
+ {"Environ", Func, 0, "func() []string"},
+ {"ErrClosed", Var, 8, ""},
+ {"ErrDeadlineExceeded", Var, 15, ""},
+ {"ErrExist", Var, 0, ""},
+ {"ErrInvalid", Var, 0, ""},
+ {"ErrNoDeadline", Var, 10, ""},
+ {"ErrNotExist", Var, 0, ""},
+ {"ErrPermission", Var, 0, ""},
+ {"ErrProcessDone", Var, 16, ""},
+ {"Executable", Func, 8, "func() (string, error)"},
+ {"Exit", Func, 0, "func(code int)"},
+ {"Expand", Func, 0, "func(s string, mapping func(string) string) string"},
+ {"ExpandEnv", Func, 0, "func(s string) string"},
+ {"File", Type, 0, ""},
+ {"FileInfo", Type, 0, ""},
+ {"FileMode", Type, 0, ""},
+ {"FindProcess", Func, 0, "func(pid int) (*Process, error)"},
+ {"Getegid", Func, 0, "func() int"},
+ {"Getenv", Func, 0, "func(key string) string"},
+ {"Geteuid", Func, 0, "func() int"},
+ {"Getgid", Func, 0, "func() int"},
+ {"Getgroups", Func, 0, "func() ([]int, error)"},
+ {"Getpagesize", Func, 0, "func() int"},
+ {"Getpid", Func, 0, "func() int"},
+ {"Getppid", Func, 0, "func() int"},
+ {"Getuid", Func, 0, "func() int"},
+ {"Getwd", Func, 0, "func() (dir string, err error)"},
+ {"Hostname", Func, 0, "func() (name string, err error)"},
+ {"Interrupt", Var, 0, ""},
+ {"IsExist", Func, 0, "func(err error) bool"},
+ {"IsNotExist", Func, 0, "func(err error) bool"},
+ {"IsPathSeparator", Func, 0, "func(c uint8) bool"},
+ {"IsPermission", Func, 0, "func(err error) bool"},
+ {"IsTimeout", Func, 10, "func(err error) bool"},
+ {"Kill", Var, 0, ""},
+ {"Lchown", Func, 0, "func(name string, uid int, gid int) error"},
+ {"Link", Func, 0, "func(oldname string, newname string) error"},
+ {"LinkError", Type, 0, ""},
+ {"LinkError.Err", Field, 0, ""},
+ {"LinkError.New", Field, 0, ""},
+ {"LinkError.Old", Field, 0, ""},
+ {"LinkError.Op", Field, 0, ""},
+ {"LookupEnv", Func, 5, "func(key string) (string, bool)"},
+ {"Lstat", Func, 0, "func(name string) (FileInfo, error)"},
+ {"Mkdir", Func, 0, "func(name string, perm FileMode) error"},
+ {"MkdirAll", Func, 0, "func(path string, perm FileMode) error"},
+ {"MkdirTemp", Func, 16, "func(dir string, pattern string) (string, error)"},
+ {"ModeAppend", Const, 0, ""},
+ {"ModeCharDevice", Const, 0, ""},
+ {"ModeDevice", Const, 0, ""},
+ {"ModeDir", Const, 0, ""},
+ {"ModeExclusive", Const, 0, ""},
+ {"ModeIrregular", Const, 11, ""},
+ {"ModeNamedPipe", Const, 0, ""},
+ {"ModePerm", Const, 0, ""},
+ {"ModeSetgid", Const, 0, ""},
+ {"ModeSetuid", Const, 0, ""},
+ {"ModeSocket", Const, 0, ""},
+ {"ModeSticky", Const, 0, ""},
+ {"ModeSymlink", Const, 0, ""},
+ {"ModeTemporary", Const, 0, ""},
+ {"ModeType", Const, 0, ""},
+ {"NewFile", Func, 0, "func(fd uintptr, name string) *File"},
+ {"NewSyscallError", Func, 0, "func(syscall string, err error) error"},
+ {"O_APPEND", Const, 0, ""},
+ {"O_CREATE", Const, 0, ""},
+ {"O_EXCL", Const, 0, ""},
+ {"O_RDONLY", Const, 0, ""},
+ {"O_RDWR", Const, 0, ""},
+ {"O_SYNC", Const, 0, ""},
+ {"O_TRUNC", Const, 0, ""},
+ {"O_WRONLY", Const, 0, ""},
+ {"Open", Func, 0, "func(name string) (*File, error)"},
+ {"OpenFile", Func, 0, "func(name string, flag int, perm FileMode) (*File, error)"},
+ {"OpenInRoot", Func, 24, "func(dir string, name string) (*File, error)"},
+ {"OpenRoot", Func, 24, "func(name string) (*Root, error)"},
+ {"PathError", Type, 0, ""},
+ {"PathError.Err", Field, 0, ""},
+ {"PathError.Op", Field, 0, ""},
+ {"PathError.Path", Field, 0, ""},
+ {"PathListSeparator", Const, 0, ""},
+ {"PathSeparator", Const, 0, ""},
+ {"Pipe", Func, 0, "func() (r *File, w *File, err error)"},
+ {"ProcAttr", Type, 0, ""},
+ {"ProcAttr.Dir", Field, 0, ""},
+ {"ProcAttr.Env", Field, 0, ""},
+ {"ProcAttr.Files", Field, 0, ""},
+ {"ProcAttr.Sys", Field, 0, ""},
+ {"Process", Type, 0, ""},
+ {"Process.Pid", Field, 0, ""},
+ {"ProcessState", Type, 0, ""},
+ {"ReadDir", Func, 16, "func(name string) ([]DirEntry, error)"},
+ {"ReadFile", Func, 16, "func(name string) ([]byte, error)"},
+ {"Readlink", Func, 0, "func(name string) (string, error)"},
+ {"Remove", Func, 0, "func(name string) error"},
+ {"RemoveAll", Func, 0, "func(path string) error"},
+ {"Rename", Func, 0, "func(oldpath string, newpath string) error"},
+ {"Root", Type, 24, ""},
+ {"SEEK_CUR", Const, 0, ""},
+ {"SEEK_END", Const, 0, ""},
+ {"SEEK_SET", Const, 0, ""},
+ {"SameFile", Func, 0, "func(fi1 FileInfo, fi2 FileInfo) bool"},
+ {"Setenv", Func, 0, "func(key string, value string) error"},
+ {"Signal", Type, 0, ""},
+ {"StartProcess", Func, 0, "func(name string, argv []string, attr *ProcAttr) (*Process, error)"},
+ {"Stat", Func, 0, "func(name string) (FileInfo, error)"},
+ {"Stderr", Var, 0, ""},
+ {"Stdin", Var, 0, ""},
+ {"Stdout", Var, 0, ""},
+ {"Symlink", Func, 0, "func(oldname string, newname string) error"},
+ {"SyscallError", Type, 0, ""},
+ {"SyscallError.Err", Field, 0, ""},
+ {"SyscallError.Syscall", Field, 0, ""},
+ {"TempDir", Func, 0, "func() string"},
+ {"Truncate", Func, 0, "func(name string, size int64) error"},
+ {"Unsetenv", Func, 4, "func(key string) error"},
+ {"UserCacheDir", Func, 11, "func() (string, error)"},
+ {"UserConfigDir", Func, 13, "func() (string, error)"},
+ {"UserHomeDir", Func, 12, "func() (string, error)"},
+ {"WriteFile", Func, 16, "func(name string, data []byte, perm FileMode) error"},
+ },
+ "os/exec": {
+ {"(*Cmd).CombinedOutput", Method, 0, ""},
+ {"(*Cmd).Environ", Method, 19, ""},
+ {"(*Cmd).Output", Method, 0, ""},
+ {"(*Cmd).Run", Method, 0, ""},
+ {"(*Cmd).Start", Method, 0, ""},
+ {"(*Cmd).StderrPipe", Method, 0, ""},
+ {"(*Cmd).StdinPipe", Method, 0, ""},
+ {"(*Cmd).StdoutPipe", Method, 0, ""},
+ {"(*Cmd).String", Method, 13, ""},
+ {"(*Cmd).Wait", Method, 0, ""},
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Error).Unwrap", Method, 13, ""},
+ {"(*ExitError).Error", Method, 0, ""},
+ {"(ExitError).ExitCode", Method, 12, ""},
+ {"(ExitError).Exited", Method, 0, ""},
+ {"(ExitError).Pid", Method, 0, ""},
+ {"(ExitError).String", Method, 0, ""},
+ {"(ExitError).Success", Method, 0, ""},
+ {"(ExitError).Sys", Method, 0, ""},
+ {"(ExitError).SysUsage", Method, 0, ""},
+ {"(ExitError).SystemTime", Method, 0, ""},
+ {"(ExitError).UserTime", Method, 0, ""},
+ {"Cmd", Type, 0, ""},
+ {"Cmd.Args", Field, 0, ""},
+ {"Cmd.Cancel", Field, 20, ""},
+ {"Cmd.Dir", Field, 0, ""},
+ {"Cmd.Env", Field, 0, ""},
+ {"Cmd.Err", Field, 19, ""},
+ {"Cmd.ExtraFiles", Field, 0, ""},
+ {"Cmd.Path", Field, 0, ""},
+ {"Cmd.Process", Field, 0, ""},
+ {"Cmd.ProcessState", Field, 0, ""},
+ {"Cmd.Stderr", Field, 0, ""},
+ {"Cmd.Stdin", Field, 0, ""},
+ {"Cmd.Stdout", Field, 0, ""},
+ {"Cmd.SysProcAttr", Field, 0, ""},
+ {"Cmd.WaitDelay", Field, 20, ""},
+ {"Command", Func, 0, "func(name string, arg ...string) *Cmd"},
+ {"CommandContext", Func, 7, "func(ctx context.Context, name string, arg ...string) *Cmd"},
+ {"ErrDot", Var, 19, ""},
+ {"ErrNotFound", Var, 0, ""},
+ {"ErrWaitDelay", Var, 20, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Err", Field, 0, ""},
+ {"Error.Name", Field, 0, ""},
+ {"ExitError", Type, 0, ""},
+ {"ExitError.ProcessState", Field, 0, ""},
+ {"ExitError.Stderr", Field, 6, ""},
+ {"LookPath", Func, 0, "func(file string) (string, error)"},
+ },
+ "os/signal": {
+ {"Ignore", Func, 5, "func(sig ...os.Signal)"},
+ {"Ignored", Func, 11, "func(sig os.Signal) bool"},
+ {"Notify", Func, 0, "func(c chan<- os.Signal, sig ...os.Signal)"},
+ {"NotifyContext", Func, 16, "func(parent context.Context, signals ...os.Signal) (ctx context.Context, stop context.CancelFunc)"},
+ {"Reset", Func, 5, "func(sig ...os.Signal)"},
+ {"Stop", Func, 1, "func(c chan<- os.Signal)"},
+ },
+ "os/user": {
+ {"(*User).GroupIds", Method, 7, ""},
+ {"(UnknownGroupError).Error", Method, 7, ""},
+ {"(UnknownGroupIdError).Error", Method, 7, ""},
+ {"(UnknownUserError).Error", Method, 0, ""},
+ {"(UnknownUserIdError).Error", Method, 0, ""},
+ {"Current", Func, 0, "func() (*User, error)"},
+ {"Group", Type, 7, ""},
+ {"Group.Gid", Field, 7, ""},
+ {"Group.Name", Field, 7, ""},
+ {"Lookup", Func, 0, "func(username string) (*User, error)"},
+ {"LookupGroup", Func, 7, "func(name string) (*Group, error)"},
+ {"LookupGroupId", Func, 7, "func(gid string) (*Group, error)"},
+ {"LookupId", Func, 0, "func(uid string) (*User, error)"},
+ {"UnknownGroupError", Type, 7, ""},
+ {"UnknownGroupIdError", Type, 7, ""},
+ {"UnknownUserError", Type, 0, ""},
+ {"UnknownUserIdError", Type, 0, ""},
+ {"User", Type, 0, ""},
+ {"User.Gid", Field, 0, ""},
+ {"User.HomeDir", Field, 0, ""},
+ {"User.Name", Field, 0, ""},
+ {"User.Uid", Field, 0, ""},
+ {"User.Username", Field, 0, ""},
+ },
+ "path": {
+ {"Base", Func, 0, "func(path string) string"},
+ {"Clean", Func, 0, "func(path string) string"},
+ {"Dir", Func, 0, "func(path string) string"},
+ {"ErrBadPattern", Var, 0, ""},
+ {"Ext", Func, 0, "func(path string) string"},
+ {"IsAbs", Func, 0, "func(path string) bool"},
+ {"Join", Func, 0, "func(elem ...string) string"},
+ {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
+ {"Split", Func, 0, "func(path string) (dir string, file string)"},
+ },
+ "path/filepath": {
+ {"Abs", Func, 0, "func(path string) (string, error)"},
+ {"Base", Func, 0, "func(path string) string"},
+ {"Clean", Func, 0, "func(path string) string"},
+ {"Dir", Func, 0, "func(path string) string"},
+ {"ErrBadPattern", Var, 0, ""},
+ {"EvalSymlinks", Func, 0, "func(path string) (string, error)"},
+ {"Ext", Func, 0, "func(path string) string"},
+ {"FromSlash", Func, 0, "func(path string) string"},
+ {"Glob", Func, 0, "func(pattern string) (matches []string, err error)"},
+ {"HasPrefix", Func, 0, "func(p string, prefix string) bool"},
+ {"IsAbs", Func, 0, "func(path string) bool"},
+ {"IsLocal", Func, 20, "func(path string) bool"},
+ {"Join", Func, 0, "func(elem ...string) string"},
+ {"ListSeparator", Const, 0, ""},
+ {"Localize", Func, 23, "func(path string) (string, error)"},
+ {"Match", Func, 0, "func(pattern string, name string) (matched bool, err error)"},
+ {"Rel", Func, 0, "func(basepath string, targpath string) (string, error)"},
+ {"Separator", Const, 0, ""},
+ {"SkipAll", Var, 20, ""},
+ {"SkipDir", Var, 0, ""},
+ {"Split", Func, 0, "func(path string) (dir string, file string)"},
+ {"SplitList", Func, 0, "func(path string) []string"},
+ {"ToSlash", Func, 0, "func(path string) string"},
+ {"VolumeName", Func, 0, "func(path string) string"},
+ {"Walk", Func, 0, "func(root string, fn WalkFunc) error"},
+ {"WalkDir", Func, 16, "func(root string, fn fs.WalkDirFunc) error"},
+ {"WalkFunc", Type, 0, ""},
+ },
+ "plugin": {
+ {"(*Plugin).Lookup", Method, 8, ""},
+ {"Open", Func, 8, "func(path string) (*Plugin, error)"},
+ {"Plugin", Type, 8, ""},
+ {"Symbol", Type, 8, ""},
+ },
+ "reflect": {
+ {"(*MapIter).Key", Method, 12, ""},
+ {"(*MapIter).Next", Method, 12, ""},
+ {"(*MapIter).Reset", Method, 18, ""},
+ {"(*MapIter).Value", Method, 12, ""},
+ {"(*ValueError).Error", Method, 0, ""},
+ {"(ChanDir).String", Method, 0, ""},
+ {"(Kind).String", Method, 0, ""},
+ {"(Method).IsExported", Method, 17, ""},
+ {"(StructField).IsExported", Method, 17, ""},
+ {"(StructTag).Get", Method, 0, ""},
+ {"(StructTag).Lookup", Method, 7, ""},
+ {"(Value).Addr", Method, 0, ""},
+ {"(Value).Bool", Method, 0, ""},
+ {"(Value).Bytes", Method, 0, ""},
+ {"(Value).Call", Method, 0, ""},
+ {"(Value).CallSlice", Method, 0, ""},
+ {"(Value).CanAddr", Method, 0, ""},
+ {"(Value).CanComplex", Method, 18, ""},
+ {"(Value).CanConvert", Method, 17, ""},
+ {"(Value).CanFloat", Method, 18, ""},
+ {"(Value).CanInt", Method, 18, ""},
+ {"(Value).CanInterface", Method, 0, ""},
+ {"(Value).CanSet", Method, 0, ""},
+ {"(Value).CanUint", Method, 18, ""},
+ {"(Value).Cap", Method, 0, ""},
+ {"(Value).Clear", Method, 21, ""},
+ {"(Value).Close", Method, 0, ""},
+ {"(Value).Comparable", Method, 20, ""},
+ {"(Value).Complex", Method, 0, ""},
+ {"(Value).Convert", Method, 1, ""},
+ {"(Value).Elem", Method, 0, ""},
+ {"(Value).Equal", Method, 20, ""},
+ {"(Value).Field", Method, 0, ""},
+ {"(Value).FieldByIndex", Method, 0, ""},
+ {"(Value).FieldByIndexErr", Method, 18, ""},
+ {"(Value).FieldByName", Method, 0, ""},
+ {"(Value).FieldByNameFunc", Method, 0, ""},
+ {"(Value).Float", Method, 0, ""},
+ {"(Value).Grow", Method, 20, ""},
+ {"(Value).Index", Method, 0, ""},
+ {"(Value).Int", Method, 0, ""},
+ {"(Value).Interface", Method, 0, ""},
+ {"(Value).InterfaceData", Method, 0, ""},
+ {"(Value).IsNil", Method, 0, ""},
+ {"(Value).IsValid", Method, 0, ""},
+ {"(Value).IsZero", Method, 13, ""},
+ {"(Value).Kind", Method, 0, ""},
+ {"(Value).Len", Method, 0, ""},
+ {"(Value).MapIndex", Method, 0, ""},
+ {"(Value).MapKeys", Method, 0, ""},
+ {"(Value).MapRange", Method, 12, ""},
+ {"(Value).Method", Method, 0, ""},
+ {"(Value).MethodByName", Method, 0, ""},
+ {"(Value).NumField", Method, 0, ""},
+ {"(Value).NumMethod", Method, 0, ""},
+ {"(Value).OverflowComplex", Method, 0, ""},
+ {"(Value).OverflowFloat", Method, 0, ""},
+ {"(Value).OverflowInt", Method, 0, ""},
+ {"(Value).OverflowUint", Method, 0, ""},
+ {"(Value).Pointer", Method, 0, ""},
+ {"(Value).Recv", Method, 0, ""},
+ {"(Value).Send", Method, 0, ""},
+ {"(Value).Seq", Method, 23, ""},
+ {"(Value).Seq2", Method, 23, ""},
+ {"(Value).Set", Method, 0, ""},
+ {"(Value).SetBool", Method, 0, ""},
+ {"(Value).SetBytes", Method, 0, ""},
+ {"(Value).SetCap", Method, 2, ""},
+ {"(Value).SetComplex", Method, 0, ""},
+ {"(Value).SetFloat", Method, 0, ""},
+ {"(Value).SetInt", Method, 0, ""},
+ {"(Value).SetIterKey", Method, 18, ""},
+ {"(Value).SetIterValue", Method, 18, ""},
+ {"(Value).SetLen", Method, 0, ""},
+ {"(Value).SetMapIndex", Method, 0, ""},
+ {"(Value).SetPointer", Method, 0, ""},
+ {"(Value).SetString", Method, 0, ""},
+ {"(Value).SetUint", Method, 0, ""},
+ {"(Value).SetZero", Method, 20, ""},
+ {"(Value).Slice", Method, 0, ""},
+ {"(Value).Slice3", Method, 2, ""},
+ {"(Value).String", Method, 0, ""},
+ {"(Value).TryRecv", Method, 0, ""},
+ {"(Value).TrySend", Method, 0, ""},
+ {"(Value).Type", Method, 0, ""},
+ {"(Value).Uint", Method, 0, ""},
+ {"(Value).UnsafeAddr", Method, 0, ""},
+ {"(Value).UnsafePointer", Method, 18, ""},
+ {"Append", Func, 0, "func(s Value, x ...Value) Value"},
+ {"AppendSlice", Func, 0, "func(s Value, t Value) Value"},
+ {"Array", Const, 0, ""},
+ {"ArrayOf", Func, 5, "func(length int, elem Type) Type"},
+ {"Bool", Const, 0, ""},
+ {"BothDir", Const, 0, ""},
+ {"Chan", Const, 0, ""},
+ {"ChanDir", Type, 0, ""},
+ {"ChanOf", Func, 1, "func(dir ChanDir, t Type) Type"},
+ {"Complex128", Const, 0, ""},
+ {"Complex64", Const, 0, ""},
+ {"Copy", Func, 0, "func(dst Value, src Value) int"},
+ {"DeepEqual", Func, 0, "func(x any, y any) bool"},
+ {"Float32", Const, 0, ""},
+ {"Float64", Const, 0, ""},
+ {"Func", Const, 0, ""},
+ {"FuncOf", Func, 5, "func(in []Type, out []Type, variadic bool) Type"},
+ {"Indirect", Func, 0, "func(v Value) Value"},
+ {"Int", Const, 0, ""},
+ {"Int16", Const, 0, ""},
+ {"Int32", Const, 0, ""},
+ {"Int64", Const, 0, ""},
+ {"Int8", Const, 0, ""},
+ {"Interface", Const, 0, ""},
+ {"Invalid", Const, 0, ""},
+ {"Kind", Type, 0, ""},
+ {"MakeChan", Func, 0, "func(typ Type, buffer int) Value"},
+ {"MakeFunc", Func, 1, "func(typ Type, fn func(args []Value) (results []Value)) Value"},
+ {"MakeMap", Func, 0, "func(typ Type) Value"},
+ {"MakeMapWithSize", Func, 9, "func(typ Type, n int) Value"},
+ {"MakeSlice", Func, 0, "func(typ Type, len int, cap int) Value"},
+ {"Map", Const, 0, ""},
+ {"MapIter", Type, 12, ""},
+ {"MapOf", Func, 1, "func(key Type, elem Type) Type"},
+ {"Method", Type, 0, ""},
+ {"Method.Func", Field, 0, ""},
+ {"Method.Index", Field, 0, ""},
+ {"Method.Name", Field, 0, ""},
+ {"Method.PkgPath", Field, 0, ""},
+ {"Method.Type", Field, 0, ""},
+ {"New", Func, 0, "func(typ Type) Value"},
+ {"NewAt", Func, 0, "func(typ Type, p unsafe.Pointer) Value"},
+ {"Pointer", Const, 18, ""},
+ {"PointerTo", Func, 18, "func(t Type) Type"},
+ {"Ptr", Const, 0, ""},
+ {"PtrTo", Func, 0, "func(t Type) Type"},
+ {"RecvDir", Const, 0, ""},
+ {"Select", Func, 1, "func(cases []SelectCase) (chosen int, recv Value, recvOK bool)"},
+ {"SelectCase", Type, 1, ""},
+ {"SelectCase.Chan", Field, 1, ""},
+ {"SelectCase.Dir", Field, 1, ""},
+ {"SelectCase.Send", Field, 1, ""},
+ {"SelectDefault", Const, 1, ""},
+ {"SelectDir", Type, 1, ""},
+ {"SelectRecv", Const, 1, ""},
+ {"SelectSend", Const, 1, ""},
+ {"SendDir", Const, 0, ""},
+ {"Slice", Const, 0, ""},
+ {"SliceAt", Func, 23, "func(typ Type, p unsafe.Pointer, n int) Value"},
+ {"SliceHeader", Type, 0, ""},
+ {"SliceHeader.Cap", Field, 0, ""},
+ {"SliceHeader.Data", Field, 0, ""},
+ {"SliceHeader.Len", Field, 0, ""},
+ {"SliceOf", Func, 1, "func(t Type) Type"},
+ {"String", Const, 0, ""},
+ {"StringHeader", Type, 0, ""},
+ {"StringHeader.Data", Field, 0, ""},
+ {"StringHeader.Len", Field, 0, ""},
+ {"Struct", Const, 0, ""},
+ {"StructField", Type, 0, ""},
+ {"StructField.Anonymous", Field, 0, ""},
+ {"StructField.Index", Field, 0, ""},
+ {"StructField.Name", Field, 0, ""},
+ {"StructField.Offset", Field, 0, ""},
+ {"StructField.PkgPath", Field, 0, ""},
+ {"StructField.Tag", Field, 0, ""},
+ {"StructField.Type", Field, 0, ""},
+ {"StructOf", Func, 7, "func(fields []StructField) Type"},
+ {"StructTag", Type, 0, ""},
+ {"Swapper", Func, 8, "func(slice any) func(i int, j int)"},
+ {"Type", Type, 0, ""},
+ {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"},
+ {"TypeFor", Func, 22, "func[T any]() Type"},
+ {"TypeOf", Func, 0, "func(i any) Type"},
+ {"Uint", Const, 0, ""},
+ {"Uint16", Const, 0, ""},
+ {"Uint32", Const, 0, ""},
+ {"Uint64", Const, 0, ""},
+ {"Uint8", Const, 0, ""},
+ {"Uintptr", Const, 0, ""},
+ {"UnsafePointer", Const, 0, ""},
+ {"Value", Type, 0, ""},
+ {"ValueError", Type, 0, ""},
+ {"ValueError.Kind", Field, 0, ""},
+ {"ValueError.Method", Field, 0, ""},
+ {"ValueOf", Func, 0, "func(i any) Value"},
+ {"VisibleFields", Func, 17, "func(t Type) []StructField"},
+ {"Zero", Func, 0, "func(typ Type) Value"},
+ },
+ "regexp": {
+ {"(*Regexp).AppendText", Method, 24, ""},
+ {"(*Regexp).Copy", Method, 6, ""},
+ {"(*Regexp).Expand", Method, 0, ""},
+ {"(*Regexp).ExpandString", Method, 0, ""},
+ {"(*Regexp).Find", Method, 0, ""},
+ {"(*Regexp).FindAll", Method, 0, ""},
+ {"(*Regexp).FindAllIndex", Method, 0, ""},
+ {"(*Regexp).FindAllString", Method, 0, ""},
+ {"(*Regexp).FindAllStringIndex", Method, 0, ""},
+ {"(*Regexp).FindAllStringSubmatch", Method, 0, ""},
+ {"(*Regexp).FindAllStringSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindAllSubmatch", Method, 0, ""},
+ {"(*Regexp).FindAllSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindIndex", Method, 0, ""},
+ {"(*Regexp).FindReaderIndex", Method, 0, ""},
+ {"(*Regexp).FindReaderSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindString", Method, 0, ""},
+ {"(*Regexp).FindStringIndex", Method, 0, ""},
+ {"(*Regexp).FindStringSubmatch", Method, 0, ""},
+ {"(*Regexp).FindStringSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).FindSubmatch", Method, 0, ""},
+ {"(*Regexp).FindSubmatchIndex", Method, 0, ""},
+ {"(*Regexp).LiteralPrefix", Method, 0, ""},
+ {"(*Regexp).Longest", Method, 1, ""},
+ {"(*Regexp).MarshalText", Method, 21, ""},
+ {"(*Regexp).Match", Method, 0, ""},
+ {"(*Regexp).MatchReader", Method, 0, ""},
+ {"(*Regexp).MatchString", Method, 0, ""},
+ {"(*Regexp).NumSubexp", Method, 0, ""},
+ {"(*Regexp).ReplaceAll", Method, 0, ""},
+ {"(*Regexp).ReplaceAllFunc", Method, 0, ""},
+ {"(*Regexp).ReplaceAllLiteral", Method, 0, ""},
+ {"(*Regexp).ReplaceAllLiteralString", Method, 0, ""},
+ {"(*Regexp).ReplaceAllString", Method, 0, ""},
+ {"(*Regexp).ReplaceAllStringFunc", Method, 0, ""},
+ {"(*Regexp).Split", Method, 1, ""},
+ {"(*Regexp).String", Method, 0, ""},
+ {"(*Regexp).SubexpIndex", Method, 15, ""},
+ {"(*Regexp).SubexpNames", Method, 0, ""},
+ {"(*Regexp).UnmarshalText", Method, 21, ""},
+ {"Compile", Func, 0, "func(expr string) (*Regexp, error)"},
+ {"CompilePOSIX", Func, 0, "func(expr string) (*Regexp, error)"},
+ {"Match", Func, 0, "func(pattern string, b []byte) (matched bool, err error)"},
+ {"MatchReader", Func, 0, "func(pattern string, r io.RuneReader) (matched bool, err error)"},
+ {"MatchString", Func, 0, "func(pattern string, s string) (matched bool, err error)"},
+ {"MustCompile", Func, 0, "func(str string) *Regexp"},
+ {"MustCompilePOSIX", Func, 0, "func(str string) *Regexp"},
+ {"QuoteMeta", Func, 0, "func(s string) string"},
+ {"Regexp", Type, 0, ""},
+ },
+ "regexp/syntax": {
+ {"(*Error).Error", Method, 0, ""},
+ {"(*Inst).MatchEmptyWidth", Method, 0, ""},
+ {"(*Inst).MatchRune", Method, 0, ""},
+ {"(*Inst).MatchRunePos", Method, 3, ""},
+ {"(*Inst).String", Method, 0, ""},
+ {"(*Prog).Prefix", Method, 0, ""},
+ {"(*Prog).StartCond", Method, 0, ""},
+ {"(*Prog).String", Method, 0, ""},
+ {"(*Regexp).CapNames", Method, 0, ""},
+ {"(*Regexp).Equal", Method, 0, ""},
+ {"(*Regexp).MaxCap", Method, 0, ""},
+ {"(*Regexp).Simplify", Method, 0, ""},
+ {"(*Regexp).String", Method, 0, ""},
+ {"(ErrorCode).String", Method, 0, ""},
+ {"(InstOp).String", Method, 3, ""},
+ {"(Op).String", Method, 11, ""},
+ {"ClassNL", Const, 0, ""},
+ {"Compile", Func, 0, "func(re *Regexp) (*Prog, error)"},
+ {"DotNL", Const, 0, ""},
+ {"EmptyBeginLine", Const, 0, ""},
+ {"EmptyBeginText", Const, 0, ""},
+ {"EmptyEndLine", Const, 0, ""},
+ {"EmptyEndText", Const, 0, ""},
+ {"EmptyNoWordBoundary", Const, 0, ""},
+ {"EmptyOp", Type, 0, ""},
+ {"EmptyOpContext", Func, 0, "func(r1 rune, r2 rune) EmptyOp"},
+ {"EmptyWordBoundary", Const, 0, ""},
+ {"ErrInternalError", Const, 0, ""},
+ {"ErrInvalidCharClass", Const, 0, ""},
+ {"ErrInvalidCharRange", Const, 0, ""},
+ {"ErrInvalidEscape", Const, 0, ""},
+ {"ErrInvalidNamedCapture", Const, 0, ""},
+ {"ErrInvalidPerlOp", Const, 0, ""},
+ {"ErrInvalidRepeatOp", Const, 0, ""},
+ {"ErrInvalidRepeatSize", Const, 0, ""},
+ {"ErrInvalidUTF8", Const, 0, ""},
+ {"ErrLarge", Const, 20, ""},
+ {"ErrMissingBracket", Const, 0, ""},
+ {"ErrMissingParen", Const, 0, ""},
+ {"ErrMissingRepeatArgument", Const, 0, ""},
+ {"ErrNestingDepth", Const, 19, ""},
+ {"ErrTrailingBackslash", Const, 0, ""},
+ {"ErrUnexpectedParen", Const, 1, ""},
+ {"Error", Type, 0, ""},
+ {"Error.Code", Field, 0, ""},
+ {"Error.Expr", Field, 0, ""},
+ {"ErrorCode", Type, 0, ""},
+ {"Flags", Type, 0, ""},
+ {"FoldCase", Const, 0, ""},
+ {"Inst", Type, 0, ""},
+ {"Inst.Arg", Field, 0, ""},
+ {"Inst.Op", Field, 0, ""},
+ {"Inst.Out", Field, 0, ""},
+ {"Inst.Rune", Field, 0, ""},
+ {"InstAlt", Const, 0, ""},
+ {"InstAltMatch", Const, 0, ""},
+ {"InstCapture", Const, 0, ""},
+ {"InstEmptyWidth", Const, 0, ""},
+ {"InstFail", Const, 0, ""},
+ {"InstMatch", Const, 0, ""},
+ {"InstNop", Const, 0, ""},
+ {"InstOp", Type, 0, ""},
+ {"InstRune", Const, 0, ""},
+ {"InstRune1", Const, 0, ""},
+ {"InstRuneAny", Const, 0, ""},
+ {"InstRuneAnyNotNL", Const, 0, ""},
+ {"IsWordChar", Func, 0, "func(r rune) bool"},
+ {"Literal", Const, 0, ""},
+ {"MatchNL", Const, 0, ""},
+ {"NonGreedy", Const, 0, ""},
+ {"OneLine", Const, 0, ""},
+ {"Op", Type, 0, ""},
+ {"OpAlternate", Const, 0, ""},
+ {"OpAnyChar", Const, 0, ""},
+ {"OpAnyCharNotNL", Const, 0, ""},
+ {"OpBeginLine", Const, 0, ""},
+ {"OpBeginText", Const, 0, ""},
+ {"OpCapture", Const, 0, ""},
+ {"OpCharClass", Const, 0, ""},
+ {"OpConcat", Const, 0, ""},
+ {"OpEmptyMatch", Const, 0, ""},
+ {"OpEndLine", Const, 0, ""},
+ {"OpEndText", Const, 0, ""},
+ {"OpLiteral", Const, 0, ""},
+ {"OpNoMatch", Const, 0, ""},
+ {"OpNoWordBoundary", Const, 0, ""},
+ {"OpPlus", Const, 0, ""},
+ {"OpQuest", Const, 0, ""},
+ {"OpRepeat", Const, 0, ""},
+ {"OpStar", Const, 0, ""},
+ {"OpWordBoundary", Const, 0, ""},
+ {"POSIX", Const, 0, ""},
+ {"Parse", Func, 0, "func(s string, flags Flags) (*Regexp, error)"},
+ {"Perl", Const, 0, ""},
+ {"PerlX", Const, 0, ""},
+ {"Prog", Type, 0, ""},
+ {"Prog.Inst", Field, 0, ""},
+ {"Prog.NumCap", Field, 0, ""},
+ {"Prog.Start", Field, 0, ""},
+ {"Regexp", Type, 0, ""},
+ {"Regexp.Cap", Field, 0, ""},
+ {"Regexp.Flags", Field, 0, ""},
+ {"Regexp.Max", Field, 0, ""},
+ {"Regexp.Min", Field, 0, ""},
+ {"Regexp.Name", Field, 0, ""},
+ {"Regexp.Op", Field, 0, ""},
+ {"Regexp.Rune", Field, 0, ""},
+ {"Regexp.Rune0", Field, 0, ""},
+ {"Regexp.Sub", Field, 0, ""},
+ {"Regexp.Sub0", Field, 0, ""},
+ {"Simple", Const, 0, ""},
+ {"UnicodeGroups", Const, 0, ""},
+ {"WasDollar", Const, 0, ""},
+ },
+ "runtime": {
+ {"(*BlockProfileRecord).Stack", Method, 1, ""},
+ {"(*Frames).Next", Method, 7, ""},
+ {"(*Func).Entry", Method, 0, ""},
+ {"(*Func).FileLine", Method, 0, ""},
+ {"(*Func).Name", Method, 0, ""},
+ {"(*MemProfileRecord).InUseBytes", Method, 0, ""},
+ {"(*MemProfileRecord).InUseObjects", Method, 0, ""},
+ {"(*MemProfileRecord).Stack", Method, 0, ""},
+ {"(*PanicNilError).Error", Method, 21, ""},
+ {"(*PanicNilError).RuntimeError", Method, 21, ""},
+ {"(*Pinner).Pin", Method, 21, ""},
+ {"(*Pinner).Unpin", Method, 21, ""},
+ {"(*StackRecord).Stack", Method, 0, ""},
+ {"(*TypeAssertionError).Error", Method, 0, ""},
+ {"(*TypeAssertionError).RuntimeError", Method, 0, ""},
+ {"(Cleanup).Stop", Method, 24, ""},
+ {"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"},
+ {"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"},
+ {"BlockProfileRecord", Type, 1, ""},
+ {"BlockProfileRecord.Count", Field, 1, ""},
+ {"BlockProfileRecord.Cycles", Field, 1, ""},
+ {"BlockProfileRecord.StackRecord", Field, 1, ""},
+ {"Breakpoint", Func, 0, "func()"},
+ {"CPUProfile", Func, 0, "func() []byte"},
+ {"Caller", Func, 0, "func(skip int) (pc uintptr, file string, line int, ok bool)"},
+ {"Callers", Func, 0, "func(skip int, pc []uintptr) int"},
+ {"CallersFrames", Func, 7, "func(callers []uintptr) *Frames"},
+ {"Cleanup", Type, 24, ""},
+ {"Compiler", Const, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Frame", Type, 7, ""},
+ {"Frame.Entry", Field, 7, ""},
+ {"Frame.File", Field, 7, ""},
+ {"Frame.Func", Field, 7, ""},
+ {"Frame.Function", Field, 7, ""},
+ {"Frame.Line", Field, 7, ""},
+ {"Frame.PC", Field, 7, ""},
+ {"Frames", Type, 7, ""},
+ {"Func", Type, 0, ""},
+ {"FuncForPC", Func, 0, "func(pc uintptr) *Func"},
+ {"GC", Func, 0, "func()"},
+ {"GOARCH", Const, 0, ""},
+ {"GOMAXPROCS", Func, 0, "func(n int) int"},
+ {"GOOS", Const, 0, ""},
+ {"GOROOT", Func, 0, "func() string"},
+ {"Goexit", Func, 0, "func()"},
+ {"GoroutineProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
+ {"Gosched", Func, 0, "func()"},
+ {"KeepAlive", Func, 7, "func(x any)"},
+ {"LockOSThread", Func, 0, "func()"},
+ {"MemProfile", Func, 0, "func(p []MemProfileRecord, inuseZero bool) (n int, ok bool)"},
+ {"MemProfileRate", Var, 0, ""},
+ {"MemProfileRecord", Type, 0, ""},
+ {"MemProfileRecord.AllocBytes", Field, 0, ""},
+ {"MemProfileRecord.AllocObjects", Field, 0, ""},
+ {"MemProfileRecord.FreeBytes", Field, 0, ""},
+ {"MemProfileRecord.FreeObjects", Field, 0, ""},
+ {"MemProfileRecord.Stack0", Field, 0, ""},
+ {"MemStats", Type, 0, ""},
+ {"MemStats.Alloc", Field, 0, ""},
+ {"MemStats.BuckHashSys", Field, 0, ""},
+ {"MemStats.BySize", Field, 0, ""},
+ {"MemStats.DebugGC", Field, 0, ""},
+ {"MemStats.EnableGC", Field, 0, ""},
+ {"MemStats.Frees", Field, 0, ""},
+ {"MemStats.GCCPUFraction", Field, 5, ""},
+ {"MemStats.GCSys", Field, 2, ""},
+ {"MemStats.HeapAlloc", Field, 0, ""},
+ {"MemStats.HeapIdle", Field, 0, ""},
+ {"MemStats.HeapInuse", Field, 0, ""},
+ {"MemStats.HeapObjects", Field, 0, ""},
+ {"MemStats.HeapReleased", Field, 0, ""},
+ {"MemStats.HeapSys", Field, 0, ""},
+ {"MemStats.LastGC", Field, 0, ""},
+ {"MemStats.Lookups", Field, 0, ""},
+ {"MemStats.MCacheInuse", Field, 0, ""},
+ {"MemStats.MCacheSys", Field, 0, ""},
+ {"MemStats.MSpanInuse", Field, 0, ""},
+ {"MemStats.MSpanSys", Field, 0, ""},
+ {"MemStats.Mallocs", Field, 0, ""},
+ {"MemStats.NextGC", Field, 0, ""},
+ {"MemStats.NumForcedGC", Field, 8, ""},
+ {"MemStats.NumGC", Field, 0, ""},
+ {"MemStats.OtherSys", Field, 2, ""},
+ {"MemStats.PauseEnd", Field, 4, ""},
+ {"MemStats.PauseNs", Field, 0, ""},
+ {"MemStats.PauseTotalNs", Field, 0, ""},
+ {"MemStats.StackInuse", Field, 0, ""},
+ {"MemStats.StackSys", Field, 0, ""},
+ {"MemStats.Sys", Field, 0, ""},
+ {"MemStats.TotalAlloc", Field, 0, ""},
+ {"MutexProfile", Func, 8, "func(p []BlockProfileRecord) (n int, ok bool)"},
+ {"NumCPU", Func, 0, "func() int"},
+ {"NumCgoCall", Func, 0, "func() int64"},
+ {"NumGoroutine", Func, 0, "func() int"},
+ {"PanicNilError", Type, 21, ""},
+ {"Pinner", Type, 21, ""},
+ {"ReadMemStats", Func, 0, "func(m *MemStats)"},
+ {"ReadTrace", Func, 5, "func() []byte"},
+ {"SetBlockProfileRate", Func, 1, "func(rate int)"},
+ {"SetCPUProfileRate", Func, 0, "func(hz int)"},
+ {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"},
+ {"SetDefaultGOMAXPROCS", Func, 25, "func()"},
+ {"SetFinalizer", Func, 0, "func(obj any, finalizer any)"},
+ {"SetMutexProfileFraction", Func, 8, "func(rate int) int"},
+ {"Stack", Func, 0, "func(buf []byte, all bool) int"},
+ {"StackRecord", Type, 0, ""},
+ {"StackRecord.Stack0", Field, 0, ""},
+ {"StartTrace", Func, 5, "func() error"},
+ {"StopTrace", Func, 5, "func()"},
+ {"ThreadCreateProfile", Func, 0, "func(p []StackRecord) (n int, ok bool)"},
+ {"TypeAssertionError", Type, 0, ""},
+ {"UnlockOSThread", Func, 0, "func()"},
+ {"Version", Func, 0, "func() string"},
+ },
+ "runtime/cgo": {
+ {"(Handle).Delete", Method, 17, ""},
+ {"(Handle).Value", Method, 17, ""},
+ {"Handle", Type, 17, ""},
+ {"Incomplete", Type, 20, ""},
+ {"NewHandle", Func, 17, ""},
+ },
+ "runtime/coverage": {
+ {"ClearCounters", Func, 20, "func() error"},
+ {"WriteCounters", Func, 20, "func(w io.Writer) error"},
+ {"WriteCountersDir", Func, 20, "func(dir string) error"},
+ {"WriteMeta", Func, 20, "func(w io.Writer) error"},
+ {"WriteMetaDir", Func, 20, "func(dir string) error"},
+ },
+ "runtime/debug": {
+ {"(*BuildInfo).String", Method, 18, ""},
+ {"BuildInfo", Type, 12, ""},
+ {"BuildInfo.Deps", Field, 12, ""},
+ {"BuildInfo.GoVersion", Field, 18, ""},
+ {"BuildInfo.Main", Field, 12, ""},
+ {"BuildInfo.Path", Field, 12, ""},
+ {"BuildInfo.Settings", Field, 18, ""},
+ {"BuildSetting", Type, 18, ""},
+ {"BuildSetting.Key", Field, 18, ""},
+ {"BuildSetting.Value", Field, 18, ""},
+ {"CrashOptions", Type, 23, ""},
+ {"FreeOSMemory", Func, 1, "func()"},
+ {"GCStats", Type, 1, ""},
+ {"GCStats.LastGC", Field, 1, ""},
+ {"GCStats.NumGC", Field, 1, ""},
+ {"GCStats.Pause", Field, 1, ""},
+ {"GCStats.PauseEnd", Field, 4, ""},
+ {"GCStats.PauseQuantiles", Field, 1, ""},
+ {"GCStats.PauseTotal", Field, 1, ""},
+ {"Module", Type, 12, ""},
+ {"Module.Path", Field, 12, ""},
+ {"Module.Replace", Field, 12, ""},
+ {"Module.Sum", Field, 12, ""},
+ {"Module.Version", Field, 12, ""},
+ {"ParseBuildInfo", Func, 18, "func(data string) (bi *BuildInfo, err error)"},
+ {"PrintStack", Func, 0, "func()"},
+ {"ReadBuildInfo", Func, 12, "func() (info *BuildInfo, ok bool)"},
+ {"ReadGCStats", Func, 1, "func(stats *GCStats)"},
+ {"SetCrashOutput", Func, 23, "func(f *os.File, opts CrashOptions) error"},
+ {"SetGCPercent", Func, 1, "func(percent int) int"},
+ {"SetMaxStack", Func, 2, "func(bytes int) int"},
+ {"SetMaxThreads", Func, 2, "func(threads int) int"},
+ {"SetMemoryLimit", Func, 19, "func(limit int64) int64"},
+ {"SetPanicOnFault", Func, 3, "func(enabled bool) bool"},
+ {"SetTraceback", Func, 6, "func(level string)"},
+ {"Stack", Func, 0, "func() []byte"},
+ {"WriteHeapDump", Func, 3, "func(fd uintptr)"},
+ },
+ "runtime/metrics": {
+ {"(Value).Float64", Method, 16, ""},
+ {"(Value).Float64Histogram", Method, 16, ""},
+ {"(Value).Kind", Method, 16, ""},
+ {"(Value).Uint64", Method, 16, ""},
+ {"All", Func, 16, "func() []Description"},
+ {"Description", Type, 16, ""},
+ {"Description.Cumulative", Field, 16, ""},
+ {"Description.Description", Field, 16, ""},
+ {"Description.Kind", Field, 16, ""},
+ {"Description.Name", Field, 16, ""},
+ {"Float64Histogram", Type, 16, ""},
+ {"Float64Histogram.Buckets", Field, 16, ""},
+ {"Float64Histogram.Counts", Field, 16, ""},
+ {"KindBad", Const, 16, ""},
+ {"KindFloat64", Const, 16, ""},
+ {"KindFloat64Histogram", Const, 16, ""},
+ {"KindUint64", Const, 16, ""},
+ {"Read", Func, 16, "func(m []Sample)"},
+ {"Sample", Type, 16, ""},
+ {"Sample.Name", Field, 16, ""},
+ {"Sample.Value", Field, 16, ""},
+ {"Value", Type, 16, ""},
+ {"ValueKind", Type, 16, ""},
+ },
+ "runtime/pprof": {
+ {"(*Profile).Add", Method, 0, ""},
+ {"(*Profile).Count", Method, 0, ""},
+ {"(*Profile).Name", Method, 0, ""},
+ {"(*Profile).Remove", Method, 0, ""},
+ {"(*Profile).WriteTo", Method, 0, ""},
+ {"Do", Func, 9, "func(ctx context.Context, labels LabelSet, f func(context.Context))"},
+ {"ForLabels", Func, 9, "func(ctx context.Context, f func(key string, value string) bool)"},
+ {"Label", Func, 9, "func(ctx context.Context, key string) (string, bool)"},
+ {"LabelSet", Type, 9, ""},
+ {"Labels", Func, 9, "func(args ...string) LabelSet"},
+ {"Lookup", Func, 0, "func(name string) *Profile"},
+ {"NewProfile", Func, 0, "func(name string) *Profile"},
+ {"Profile", Type, 0, ""},
+ {"Profiles", Func, 0, "func() []*Profile"},
+ {"SetGoroutineLabels", Func, 9, "func(ctx context.Context)"},
+ {"StartCPUProfile", Func, 0, "func(w io.Writer) error"},
+ {"StopCPUProfile", Func, 0, "func()"},
+ {"WithLabels", Func, 9, "func(ctx context.Context, labels LabelSet) context.Context"},
+ {"WriteHeapProfile", Func, 0, "func(w io.Writer) error"},
+ },
+ "runtime/trace": {
+ {"(*FlightRecorder).Enabled", Method, 25, ""},
+ {"(*FlightRecorder).Start", Method, 25, ""},
+ {"(*FlightRecorder).Stop", Method, 25, ""},
+ {"(*FlightRecorder).WriteTo", Method, 25, ""},
+ {"(*Region).End", Method, 11, ""},
+ {"(*Task).End", Method, 11, ""},
+ {"FlightRecorder", Type, 25, ""},
+ {"FlightRecorderConfig", Type, 25, ""},
+ {"FlightRecorderConfig.MaxBytes", Field, 25, ""},
+ {"FlightRecorderConfig.MinAge", Field, 25, ""},
+ {"IsEnabled", Func, 11, "func() bool"},
+ {"Log", Func, 11, "func(ctx context.Context, category string, message string)"},
+ {"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"},
+ {"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"},
+ {"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"},
+ {"Region", Type, 11, ""},
+ {"Start", Func, 5, "func(w io.Writer) error"},
+ {"StartRegion", Func, 11, "func(ctx context.Context, regionType string) *Region"},
+ {"Stop", Func, 5, "func()"},
+ {"Task", Type, 11, ""},
+ {"WithRegion", Func, 11, "func(ctx context.Context, regionType string, fn func())"},
+ },
+ "slices": {
+ {"All", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
+ {"AppendSeq", Func, 23, "func[Slice ~[]E, E any](s Slice, seq iter.Seq[E]) Slice"},
+ {"Backward", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq2[int, E]"},
+ {"BinarySearch", Func, 21, "func[S ~[]E, E cmp.Ordered](x S, target E) (int, bool)"},
+ {"BinarySearchFunc", Func, 21, "func[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool)"},
+ {"Chunk", Func, 23, "func[Slice ~[]E, E any](s Slice, n int) iter.Seq[Slice]"},
+ {"Clip", Func, 21, "func[S ~[]E, E any](s S) S"},
+ {"Clone", Func, 21, "func[S ~[]E, E any](s S) S"},
+ {"Collect", Func, 23, "func[E any](seq iter.Seq[E]) []E"},
+ {"Compact", Func, 21, "func[S ~[]E, E comparable](s S) S"},
+ {"CompactFunc", Func, 21, "func[S ~[]E, E any](s S, eq func(E, E) bool) S"},
+ {"Compare", Func, 21, "func[S ~[]E, E cmp.Ordered](s1 S, s2 S) int"},
+ {"CompareFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int"},
+ {"Concat", Func, 22, "func[S ~[]E, E any](slices ...S) S"},
+ {"Contains", Func, 21, "func[S ~[]E, E comparable](s S, v E) bool"},
+ {"ContainsFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) bool"},
+ {"Delete", Func, 21, "func[S ~[]E, E any](s S, i int, j int) S"},
+ {"DeleteFunc", Func, 21, "func[S ~[]E, E any](s S, del func(E) bool) S"},
+ {"Equal", Func, 21, "func[S ~[]E, E comparable](s1 S, s2 S) bool"},
+ {"EqualFunc", Func, 21, "func[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool"},
+ {"Grow", Func, 21, "func[S ~[]E, E any](s S, n int) S"},
+ {"Index", Func, 21, "func[S ~[]E, E comparable](s S, v E) int"},
+ {"IndexFunc", Func, 21, "func[S ~[]E, E any](s S, f func(E) bool) int"},
+ {"Insert", Func, 21, "func[S ~[]E, E any](s S, i int, v ...E) S"},
+ {"IsSorted", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) bool"},
+ {"IsSortedFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) bool"},
+ {"Max", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
+ {"MaxFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
+ {"Min", Func, 21, "func[S ~[]E, E cmp.Ordered](x S) E"},
+ {"MinFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int) E"},
+ {"Repeat", Func, 23, "func[S ~[]E, E any](x S, count int) S"},
+ {"Replace", Func, 21, "func[S ~[]E, E any](s S, i int, j int, v ...E) S"},
+ {"Reverse", Func, 21, "func[S ~[]E, E any](s S)"},
+ {"Sort", Func, 21, "func[S ~[]E, E cmp.Ordered](x S)"},
+ {"SortFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
+ {"SortStableFunc", Func, 21, "func[S ~[]E, E any](x S, cmp func(a E, b E) int)"},
+ {"Sorted", Func, 23, "func[E cmp.Ordered](seq iter.Seq[E]) []E"},
+ {"SortedFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
+ {"SortedStableFunc", Func, 23, "func[E any](seq iter.Seq[E], cmp func(E, E) int) []E"},
+ {"Values", Func, 23, "func[Slice ~[]E, E any](s Slice) iter.Seq[E]"},
+ },
+ "sort": {
+ {"(Float64Slice).Len", Method, 0, ""},
+ {"(Float64Slice).Less", Method, 0, ""},
+ {"(Float64Slice).Search", Method, 0, ""},
+ {"(Float64Slice).Sort", Method, 0, ""},
+ {"(Float64Slice).Swap", Method, 0, ""},
+ {"(IntSlice).Len", Method, 0, ""},
+ {"(IntSlice).Less", Method, 0, ""},
+ {"(IntSlice).Search", Method, 0, ""},
+ {"(IntSlice).Sort", Method, 0, ""},
+ {"(IntSlice).Swap", Method, 0, ""},
+ {"(StringSlice).Len", Method, 0, ""},
+ {"(StringSlice).Less", Method, 0, ""},
+ {"(StringSlice).Search", Method, 0, ""},
+ {"(StringSlice).Sort", Method, 0, ""},
+ {"(StringSlice).Swap", Method, 0, ""},
+ {"Find", Func, 19, "func(n int, cmp func(int) int) (i int, found bool)"},
+ {"Float64Slice", Type, 0, ""},
+ {"Float64s", Func, 0, "func(x []float64)"},
+ {"Float64sAreSorted", Func, 0, "func(x []float64) bool"},
+ {"IntSlice", Type, 0, ""},
+ {"Interface", Type, 0, ""},
+ {"Ints", Func, 0, "func(x []int)"},
+ {"IntsAreSorted", Func, 0, "func(x []int) bool"},
+ {"IsSorted", Func, 0, "func(data Interface) bool"},
+ {"Reverse", Func, 1, "func(data Interface) Interface"},
+ {"Search", Func, 0, "func(n int, f func(int) bool) int"},
+ {"SearchFloat64s", Func, 0, "func(a []float64, x float64) int"},
+ {"SearchInts", Func, 0, "func(a []int, x int) int"},
+ {"SearchStrings", Func, 0, "func(a []string, x string) int"},
+ {"Slice", Func, 8, "func(x any, less func(i int, j int) bool)"},
+ {"SliceIsSorted", Func, 8, "func(x any, less func(i int, j int) bool) bool"},
+ {"SliceStable", Func, 8, "func(x any, less func(i int, j int) bool)"},
+ {"Sort", Func, 0, "func(data Interface)"},
+ {"Stable", Func, 2, "func(data Interface)"},
+ {"StringSlice", Type, 0, ""},
+ {"Strings", Func, 0, "func(x []string)"},
+ {"StringsAreSorted", Func, 0, "func(x []string) bool"},
+ },
+ "strconv": {
+ {"(*NumError).Error", Method, 0, ""},
+ {"(*NumError).Unwrap", Method, 14, ""},
+ {"AppendBool", Func, 0, "func(dst []byte, b bool) []byte"},
+ {"AppendFloat", Func, 0, "func(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte"},
+ {"AppendInt", Func, 0, "func(dst []byte, i int64, base int) []byte"},
+ {"AppendQuote", Func, 0, "func(dst []byte, s string) []byte"},
+ {"AppendQuoteRune", Func, 0, "func(dst []byte, r rune) []byte"},
+ {"AppendQuoteRuneToASCII", Func, 0, "func(dst []byte, r rune) []byte"},
+ {"AppendQuoteRuneToGraphic", Func, 6, "func(dst []byte, r rune) []byte"},
+ {"AppendQuoteToASCII", Func, 0, "func(dst []byte, s string) []byte"},
+ {"AppendQuoteToGraphic", Func, 6, "func(dst []byte, s string) []byte"},
+ {"AppendUint", Func, 0, "func(dst []byte, i uint64, base int) []byte"},
+ {"Atoi", Func, 0, "func(s string) (int, error)"},
+ {"CanBackquote", Func, 0, "func(s string) bool"},
+ {"ErrRange", Var, 0, ""},
+ {"ErrSyntax", Var, 0, ""},
+ {"FormatBool", Func, 0, "func(b bool) string"},
+ {"FormatComplex", Func, 15, "func(c complex128, fmt byte, prec int, bitSize int) string"},
+ {"FormatFloat", Func, 0, "func(f float64, fmt byte, prec int, bitSize int) string"},
+ {"FormatInt", Func, 0, "func(i int64, base int) string"},
+ {"FormatUint", Func, 0, "func(i uint64, base int) string"},
+ {"IntSize", Const, 0, ""},
+ {"IsGraphic", Func, 6, "func(r rune) bool"},
+ {"IsPrint", Func, 0, "func(r rune) bool"},
+ {"Itoa", Func, 0, "func(i int) string"},
+ {"NumError", Type, 0, ""},
+ {"NumError.Err", Field, 0, ""},
+ {"NumError.Func", Field, 0, ""},
+ {"NumError.Num", Field, 0, ""},
+ {"ParseBool", Func, 0, "func(str string) (bool, error)"},
+ {"ParseComplex", Func, 15, "func(s string, bitSize int) (complex128, error)"},
+ {"ParseFloat", Func, 0, "func(s string, bitSize int) (float64, error)"},
+ {"ParseInt", Func, 0, "func(s string, base int, bitSize int) (i int64, err error)"},
+ {"ParseUint", Func, 0, "func(s string, base int, bitSize int) (uint64, error)"},
+ {"Quote", Func, 0, "func(s string) string"},
+ {"QuoteRune", Func, 0, "func(r rune) string"},
+ {"QuoteRuneToASCII", Func, 0, "func(r rune) string"},
+ {"QuoteRuneToGraphic", Func, 6, "func(r rune) string"},
+ {"QuoteToASCII", Func, 0, "func(s string) string"},
+ {"QuoteToGraphic", Func, 6, "func(s string) string"},
+ {"QuotedPrefix", Func, 17, "func(s string) (string, error)"},
+ {"Unquote", Func, 0, "func(s string) (string, error)"},
+ {"UnquoteChar", Func, 0, "func(s string, quote byte) (value rune, multibyte bool, tail string, err error)"},
+ },
+ "strings": {
+ {"(*Builder).Cap", Method, 12, ""},
+ {"(*Builder).Grow", Method, 10, ""},
+ {"(*Builder).Len", Method, 10, ""},
+ {"(*Builder).Reset", Method, 10, ""},
+ {"(*Builder).String", Method, 10, ""},
+ {"(*Builder).Write", Method, 10, ""},
+ {"(*Builder).WriteByte", Method, 10, ""},
+ {"(*Builder).WriteRune", Method, 10, ""},
+ {"(*Builder).WriteString", Method, 10, ""},
+ {"(*Reader).Len", Method, 0, ""},
+ {"(*Reader).Read", Method, 0, ""},
+ {"(*Reader).ReadAt", Method, 0, ""},
+ {"(*Reader).ReadByte", Method, 0, ""},
+ {"(*Reader).ReadRune", Method, 0, ""},
+ {"(*Reader).Reset", Method, 7, ""},
+ {"(*Reader).Seek", Method, 0, ""},
+ {"(*Reader).Size", Method, 5, ""},
+ {"(*Reader).UnreadByte", Method, 0, ""},
+ {"(*Reader).UnreadRune", Method, 0, ""},
+ {"(*Reader).WriteTo", Method, 1, ""},
+ {"(*Replacer).Replace", Method, 0, ""},
+ {"(*Replacer).WriteString", Method, 0, ""},
+ {"Builder", Type, 10, ""},
+ {"Clone", Func, 18, "func(s string) string"},
+ {"Compare", Func, 5, "func(a string, b string) int"},
+ {"Contains", Func, 0, "func(s string, substr string) bool"},
+ {"ContainsAny", Func, 0, "func(s string, chars string) bool"},
+ {"ContainsFunc", Func, 21, "func(s string, f func(rune) bool) bool"},
+ {"ContainsRune", Func, 0, "func(s string, r rune) bool"},
+ {"Count", Func, 0, "func(s string, substr string) int"},
+ {"Cut", Func, 18, "func(s string, sep string) (before string, after string, found bool)"},
+ {"CutPrefix", Func, 20, "func(s string, prefix string) (after string, found bool)"},
+ {"CutSuffix", Func, 20, "func(s string, suffix string) (before string, found bool)"},
+ {"EqualFold", Func, 0, "func(s string, t string) bool"},
+ {"Fields", Func, 0, "func(s string) []string"},
+ {"FieldsFunc", Func, 0, "func(s string, f func(rune) bool) []string"},
+ {"FieldsFuncSeq", Func, 24, "func(s string, f func(rune) bool) iter.Seq[string]"},
+ {"FieldsSeq", Func, 24, "func(s string) iter.Seq[string]"},
+ {"HasPrefix", Func, 0, "func(s string, prefix string) bool"},
+ {"HasSuffix", Func, 0, "func(s string, suffix string) bool"},
+ {"Index", Func, 0, "func(s string, substr string) int"},
+ {"IndexAny", Func, 0, "func(s string, chars string) int"},
+ {"IndexByte", Func, 2, "func(s string, c byte) int"},
+ {"IndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
+ {"IndexRune", Func, 0, "func(s string, r rune) int"},
+ {"Join", Func, 0, "func(elems []string, sep string) string"},
+ {"LastIndex", Func, 0, "func(s string, substr string) int"},
+ {"LastIndexAny", Func, 0, "func(s string, chars string) int"},
+ {"LastIndexByte", Func, 5, "func(s string, c byte) int"},
+ {"LastIndexFunc", Func, 0, "func(s string, f func(rune) bool) int"},
+ {"Lines", Func, 24, "func(s string) iter.Seq[string]"},
+ {"Map", Func, 0, "func(mapping func(rune) rune, s string) string"},
+ {"NewReader", Func, 0, "func(s string) *Reader"},
+ {"NewReplacer", Func, 0, "func(oldnew ...string) *Replacer"},
+ {"Reader", Type, 0, ""},
+ {"Repeat", Func, 0, "func(s string, count int) string"},
+ {"Replace", Func, 0, "func(s string, old string, new string, n int) string"},
+ {"ReplaceAll", Func, 12, "func(s string, old string, new string) string"},
+ {"Replacer", Type, 0, ""},
+ {"Split", Func, 0, "func(s string, sep string) []string"},
+ {"SplitAfter", Func, 0, "func(s string, sep string) []string"},
+ {"SplitAfterN", Func, 0, "func(s string, sep string, n int) []string"},
+ {"SplitAfterSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
+ {"SplitN", Func, 0, "func(s string, sep string, n int) []string"},
+ {"SplitSeq", Func, 24, "func(s string, sep string) iter.Seq[string]"},
+ {"Title", Func, 0, "func(s string) string"},
+ {"ToLower", Func, 0, "func(s string) string"},
+ {"ToLowerSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+ {"ToTitle", Func, 0, "func(s string) string"},
+ {"ToTitleSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+ {"ToUpper", Func, 0, "func(s string) string"},
+ {"ToUpperSpecial", Func, 0, "func(c unicode.SpecialCase, s string) string"},
+ {"ToValidUTF8", Func, 13, "func(s string, replacement string) string"},
+ {"Trim", Func, 0, "func(s string, cutset string) string"},
+ {"TrimFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+ {"TrimLeft", Func, 0, "func(s string, cutset string) string"},
+ {"TrimLeftFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+ {"TrimPrefix", Func, 1, "func(s string, prefix string) string"},
+ {"TrimRight", Func, 0, "func(s string, cutset string) string"},
+ {"TrimRightFunc", Func, 0, "func(s string, f func(rune) bool) string"},
+ {"TrimSpace", Func, 0, "func(s string) string"},
+ {"TrimSuffix", Func, 1, "func(s string, suffix string) string"},
+ },
+ "structs": {
+ {"HostLayout", Type, 23, ""},
+ },
+ "sync": {
+ {"(*Cond).Broadcast", Method, 0, ""},
+ {"(*Cond).Signal", Method, 0, ""},
+ {"(*Cond).Wait", Method, 0, ""},
+ {"(*Map).Clear", Method, 23, ""},
+ {"(*Map).CompareAndDelete", Method, 20, ""},
+ {"(*Map).CompareAndSwap", Method, 20, ""},
+ {"(*Map).Delete", Method, 9, ""},
+ {"(*Map).Load", Method, 9, ""},
+ {"(*Map).LoadAndDelete", Method, 15, ""},
+ {"(*Map).LoadOrStore", Method, 9, ""},
+ {"(*Map).Range", Method, 9, ""},
+ {"(*Map).Store", Method, 9, ""},
+ {"(*Map).Swap", Method, 20, ""},
+ {"(*Mutex).Lock", Method, 0, ""},
+ {"(*Mutex).TryLock", Method, 18, ""},
+ {"(*Mutex).Unlock", Method, 0, ""},
+ {"(*Once).Do", Method, 0, ""},
+ {"(*Pool).Get", Method, 3, ""},
+ {"(*Pool).Put", Method, 3, ""},
+ {"(*RWMutex).Lock", Method, 0, ""},
+ {"(*RWMutex).RLock", Method, 0, ""},
+ {"(*RWMutex).RLocker", Method, 0, ""},
+ {"(*RWMutex).RUnlock", Method, 0, ""},
+ {"(*RWMutex).TryLock", Method, 18, ""},
+ {"(*RWMutex).TryRLock", Method, 18, ""},
+ {"(*RWMutex).Unlock", Method, 0, ""},
+ {"(*WaitGroup).Add", Method, 0, ""},
+ {"(*WaitGroup).Done", Method, 0, ""},
+ {"(*WaitGroup).Go", Method, 25, ""},
+ {"(*WaitGroup).Wait", Method, 0, ""},
+ {"Cond", Type, 0, ""},
+ {"Cond.L", Field, 0, ""},
+ {"Locker", Type, 0, ""},
+ {"Map", Type, 9, ""},
+ {"Mutex", Type, 0, ""},
+ {"NewCond", Func, 0, "func(l Locker) *Cond"},
+ {"Once", Type, 0, ""},
+ {"OnceFunc", Func, 21, "func(f func()) func()"},
+ {"OnceValue", Func, 21, "func[T any](f func() T) func() T"},
+ {"OnceValues", Func, 21, "func[T1, T2 any](f func() (T1, T2)) func() (T1, T2)"},
+ {"Pool", Type, 3, ""},
+ {"Pool.New", Field, 3, ""},
+ {"RWMutex", Type, 0, ""},
+ {"WaitGroup", Type, 0, ""},
+ },
+ "sync/atomic": {
+ {"(*Bool).CompareAndSwap", Method, 19, ""},
+ {"(*Bool).Load", Method, 19, ""},
+ {"(*Bool).Store", Method, 19, ""},
+ {"(*Bool).Swap", Method, 19, ""},
+ {"(*Int32).Add", Method, 19, ""},
+ {"(*Int32).And", Method, 23, ""},
+ {"(*Int32).CompareAndSwap", Method, 19, ""},
+ {"(*Int32).Load", Method, 19, ""},
+ {"(*Int32).Or", Method, 23, ""},
+ {"(*Int32).Store", Method, 19, ""},
+ {"(*Int32).Swap", Method, 19, ""},
+ {"(*Int64).Add", Method, 19, ""},
+ {"(*Int64).And", Method, 23, ""},
+ {"(*Int64).CompareAndSwap", Method, 19, ""},
+ {"(*Int64).Load", Method, 19, ""},
+ {"(*Int64).Or", Method, 23, ""},
+ {"(*Int64).Store", Method, 19, ""},
+ {"(*Int64).Swap", Method, 19, ""},
+ {"(*Pointer).CompareAndSwap", Method, 19, ""},
+ {"(*Pointer).Load", Method, 19, ""},
+ {"(*Pointer).Store", Method, 19, ""},
+ {"(*Pointer).Swap", Method, 19, ""},
+ {"(*Uint32).Add", Method, 19, ""},
+ {"(*Uint32).And", Method, 23, ""},
+ {"(*Uint32).CompareAndSwap", Method, 19, ""},
+ {"(*Uint32).Load", Method, 19, ""},
+ {"(*Uint32).Or", Method, 23, ""},
+ {"(*Uint32).Store", Method, 19, ""},
+ {"(*Uint32).Swap", Method, 19, ""},
+ {"(*Uint64).Add", Method, 19, ""},
+ {"(*Uint64).And", Method, 23, ""},
+ {"(*Uint64).CompareAndSwap", Method, 19, ""},
+ {"(*Uint64).Load", Method, 19, ""},
+ {"(*Uint64).Or", Method, 23, ""},
+ {"(*Uint64).Store", Method, 19, ""},
+ {"(*Uint64).Swap", Method, 19, ""},
+ {"(*Uintptr).Add", Method, 19, ""},
+ {"(*Uintptr).And", Method, 23, ""},
+ {"(*Uintptr).CompareAndSwap", Method, 19, ""},
+ {"(*Uintptr).Load", Method, 19, ""},
+ {"(*Uintptr).Or", Method, 23, ""},
+ {"(*Uintptr).Store", Method, 19, ""},
+ {"(*Uintptr).Swap", Method, 19, ""},
+ {"(*Value).CompareAndSwap", Method, 17, ""},
+ {"(*Value).Load", Method, 4, ""},
+ {"(*Value).Store", Method, 4, ""},
+ {"(*Value).Swap", Method, 17, ""},
+ {"AddInt32", Func, 0, "func(addr *int32, delta int32) (new int32)"},
+ {"AddInt64", Func, 0, "func(addr *int64, delta int64) (new int64)"},
+ {"AddUint32", Func, 0, "func(addr *uint32, delta uint32) (new uint32)"},
+ {"AddUint64", Func, 0, "func(addr *uint64, delta uint64) (new uint64)"},
+ {"AddUintptr", Func, 0, "func(addr *uintptr, delta uintptr) (new uintptr)"},
+ {"AndInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
+ {"AndInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
+ {"AndUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
+ {"AndUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
+ {"AndUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
+ {"Bool", Type, 19, ""},
+ {"CompareAndSwapInt32", Func, 0, "func(addr *int32, old int32, new int32) (swapped bool)"},
+ {"CompareAndSwapInt64", Func, 0, "func(addr *int64, old int64, new int64) (swapped bool)"},
+ {"CompareAndSwapPointer", Func, 0, "func(addr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) (swapped bool)"},
+ {"CompareAndSwapUint32", Func, 0, "func(addr *uint32, old uint32, new uint32) (swapped bool)"},
+ {"CompareAndSwapUint64", Func, 0, "func(addr *uint64, old uint64, new uint64) (swapped bool)"},
+ {"CompareAndSwapUintptr", Func, 0, "func(addr *uintptr, old uintptr, new uintptr) (swapped bool)"},
+ {"Int32", Type, 19, ""},
+ {"Int64", Type, 19, ""},
+ {"LoadInt32", Func, 0, "func(addr *int32) (val int32)"},
+ {"LoadInt64", Func, 0, "func(addr *int64) (val int64)"},
+ {"LoadPointer", Func, 0, "func(addr *unsafe.Pointer) (val unsafe.Pointer)"},
+ {"LoadUint32", Func, 0, "func(addr *uint32) (val uint32)"},
+ {"LoadUint64", Func, 0, "func(addr *uint64) (val uint64)"},
+ {"LoadUintptr", Func, 0, "func(addr *uintptr) (val uintptr)"},
+ {"OrInt32", Func, 23, "func(addr *int32, mask int32) (old int32)"},
+ {"OrInt64", Func, 23, "func(addr *int64, mask int64) (old int64)"},
+ {"OrUint32", Func, 23, "func(addr *uint32, mask uint32) (old uint32)"},
+ {"OrUint64", Func, 23, "func(addr *uint64, mask uint64) (old uint64)"},
+ {"OrUintptr", Func, 23, "func(addr *uintptr, mask uintptr) (old uintptr)"},
+ {"Pointer", Type, 19, ""},
+ {"StoreInt32", Func, 0, "func(addr *int32, val int32)"},
+ {"StoreInt64", Func, 0, "func(addr *int64, val int64)"},
+ {"StorePointer", Func, 0, "func(addr *unsafe.Pointer, val unsafe.Pointer)"},
+ {"StoreUint32", Func, 0, "func(addr *uint32, val uint32)"},
+ {"StoreUint64", Func, 0, "func(addr *uint64, val uint64)"},
+ {"StoreUintptr", Func, 0, "func(addr *uintptr, val uintptr)"},
+ {"SwapInt32", Func, 2, "func(addr *int32, new int32) (old int32)"},
+ {"SwapInt64", Func, 2, "func(addr *int64, new int64) (old int64)"},
+ {"SwapPointer", Func, 2, "func(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer)"},
+ {"SwapUint32", Func, 2, "func(addr *uint32, new uint32) (old uint32)"},
+ {"SwapUint64", Func, 2, "func(addr *uint64, new uint64) (old uint64)"},
+ {"SwapUintptr", Func, 2, "func(addr *uintptr, new uintptr) (old uintptr)"},
+ {"Uint32", Type, 19, ""},
+ {"Uint64", Type, 19, ""},
+ {"Uintptr", Type, 19, ""},
+ {"Value", Type, 4, ""},
+ },
+ "syscall": {
+ {"(*Cmsghdr).SetLen", Method, 0, ""},
+ {"(*DLL).FindProc", Method, 0, ""},
+ {"(*DLL).MustFindProc", Method, 0, ""},
+ {"(*DLL).Release", Method, 0, ""},
+ {"(*DLLError).Error", Method, 0, ""},
+ {"(*DLLError).Unwrap", Method, 16, ""},
+ {"(*Filetime).Nanoseconds", Method, 0, ""},
+ {"(*Iovec).SetLen", Method, 0, ""},
+ {"(*LazyDLL).Handle", Method, 0, ""},
+ {"(*LazyDLL).Load", Method, 0, ""},
+ {"(*LazyDLL).NewProc", Method, 0, ""},
+ {"(*LazyProc).Addr", Method, 0, ""},
+ {"(*LazyProc).Call", Method, 0, ""},
+ {"(*LazyProc).Find", Method, 0, ""},
+ {"(*Msghdr).SetControllen", Method, 0, ""},
+ {"(*Proc).Addr", Method, 0, ""},
+ {"(*Proc).Call", Method, 0, ""},
+ {"(*PtraceRegs).PC", Method, 0, ""},
+ {"(*PtraceRegs).SetPC", Method, 0, ""},
+ {"(*RawSockaddrAny).Sockaddr", Method, 0, ""},
+ {"(*SID).Copy", Method, 0, ""},
+ {"(*SID).Len", Method, 0, ""},
+ {"(*SID).LookupAccount", Method, 0, ""},
+ {"(*SID).String", Method, 0, ""},
+ {"(*Timespec).Nano", Method, 0, ""},
+ {"(*Timespec).Unix", Method, 0, ""},
+ {"(*Timeval).Nano", Method, 0, ""},
+ {"(*Timeval).Nanoseconds", Method, 0, ""},
+ {"(*Timeval).Unix", Method, 0, ""},
+ {"(Errno).Error", Method, 0, ""},
+ {"(Errno).Is", Method, 13, ""},
+ {"(Errno).Temporary", Method, 0, ""},
+ {"(Errno).Timeout", Method, 0, ""},
+ {"(Signal).Signal", Method, 0, ""},
+ {"(Signal).String", Method, 0, ""},
+ {"(Token).Close", Method, 0, ""},
+ {"(Token).GetTokenPrimaryGroup", Method, 0, ""},
+ {"(Token).GetTokenUser", Method, 0, ""},
+ {"(Token).GetUserProfileDirectory", Method, 0, ""},
+ {"(WaitStatus).Continued", Method, 0, ""},
+ {"(WaitStatus).CoreDump", Method, 0, ""},
+ {"(WaitStatus).ExitStatus", Method, 0, ""},
+ {"(WaitStatus).Exited", Method, 0, ""},
+ {"(WaitStatus).Signal", Method, 0, ""},
+ {"(WaitStatus).Signaled", Method, 0, ""},
+ {"(WaitStatus).StopSignal", Method, 0, ""},
+ {"(WaitStatus).Stopped", Method, 0, ""},
+ {"(WaitStatus).TrapCause", Method, 0, ""},
+ {"AF_ALG", Const, 0, ""},
+ {"AF_APPLETALK", Const, 0, ""},
+ {"AF_ARP", Const, 0, ""},
+ {"AF_ASH", Const, 0, ""},
+ {"AF_ATM", Const, 0, ""},
+ {"AF_ATMPVC", Const, 0, ""},
+ {"AF_ATMSVC", Const, 0, ""},
+ {"AF_AX25", Const, 0, ""},
+ {"AF_BLUETOOTH", Const, 0, ""},
+ {"AF_BRIDGE", Const, 0, ""},
+ {"AF_CAIF", Const, 0, ""},
+ {"AF_CAN", Const, 0, ""},
+ {"AF_CCITT", Const, 0, ""},
+ {"AF_CHAOS", Const, 0, ""},
+ {"AF_CNT", Const, 0, ""},
+ {"AF_COIP", Const, 0, ""},
+ {"AF_DATAKIT", Const, 0, ""},
+ {"AF_DECnet", Const, 0, ""},
+ {"AF_DLI", Const, 0, ""},
+ {"AF_E164", Const, 0, ""},
+ {"AF_ECMA", Const, 0, ""},
+ {"AF_ECONET", Const, 0, ""},
+ {"AF_ENCAP", Const, 1, ""},
+ {"AF_FILE", Const, 0, ""},
+ {"AF_HYLINK", Const, 0, ""},
+ {"AF_IEEE80211", Const, 0, ""},
+ {"AF_IEEE802154", Const, 0, ""},
+ {"AF_IMPLINK", Const, 0, ""},
+ {"AF_INET", Const, 0, ""},
+ {"AF_INET6", Const, 0, ""},
+ {"AF_INET6_SDP", Const, 3, ""},
+ {"AF_INET_SDP", Const, 3, ""},
+ {"AF_IPX", Const, 0, ""},
+ {"AF_IRDA", Const, 0, ""},
+ {"AF_ISDN", Const, 0, ""},
+ {"AF_ISO", Const, 0, ""},
+ {"AF_IUCV", Const, 0, ""},
+ {"AF_KEY", Const, 0, ""},
+ {"AF_LAT", Const, 0, ""},
+ {"AF_LINK", Const, 0, ""},
+ {"AF_LLC", Const, 0, ""},
+ {"AF_LOCAL", Const, 0, ""},
+ {"AF_MAX", Const, 0, ""},
+ {"AF_MPLS", Const, 1, ""},
+ {"AF_NATM", Const, 0, ""},
+ {"AF_NDRV", Const, 0, ""},
+ {"AF_NETBEUI", Const, 0, ""},
+ {"AF_NETBIOS", Const, 0, ""},
+ {"AF_NETGRAPH", Const, 0, ""},
+ {"AF_NETLINK", Const, 0, ""},
+ {"AF_NETROM", Const, 0, ""},
+ {"AF_NS", Const, 0, ""},
+ {"AF_OROUTE", Const, 1, ""},
+ {"AF_OSI", Const, 0, ""},
+ {"AF_PACKET", Const, 0, ""},
+ {"AF_PHONET", Const, 0, ""},
+ {"AF_PPP", Const, 0, ""},
+ {"AF_PPPOX", Const, 0, ""},
+ {"AF_PUP", Const, 0, ""},
+ {"AF_RDS", Const, 0, ""},
+ {"AF_RESERVED_36", Const, 0, ""},
+ {"AF_ROSE", Const, 0, ""},
+ {"AF_ROUTE", Const, 0, ""},
+ {"AF_RXRPC", Const, 0, ""},
+ {"AF_SCLUSTER", Const, 0, ""},
+ {"AF_SECURITY", Const, 0, ""},
+ {"AF_SIP", Const, 0, ""},
+ {"AF_SLOW", Const, 0, ""},
+ {"AF_SNA", Const, 0, ""},
+ {"AF_SYSTEM", Const, 0, ""},
+ {"AF_TIPC", Const, 0, ""},
+ {"AF_UNIX", Const, 0, ""},
+ {"AF_UNSPEC", Const, 0, ""},
+ {"AF_UTUN", Const, 16, ""},
+ {"AF_VENDOR00", Const, 0, ""},
+ {"AF_VENDOR01", Const, 0, ""},
+ {"AF_VENDOR02", Const, 0, ""},
+ {"AF_VENDOR03", Const, 0, ""},
+ {"AF_VENDOR04", Const, 0, ""},
+ {"AF_VENDOR05", Const, 0, ""},
+ {"AF_VENDOR06", Const, 0, ""},
+ {"AF_VENDOR07", Const, 0, ""},
+ {"AF_VENDOR08", Const, 0, ""},
+ {"AF_VENDOR09", Const, 0, ""},
+ {"AF_VENDOR10", Const, 0, ""},
+ {"AF_VENDOR11", Const, 0, ""},
+ {"AF_VENDOR12", Const, 0, ""},
+ {"AF_VENDOR13", Const, 0, ""},
+ {"AF_VENDOR14", Const, 0, ""},
+ {"AF_VENDOR15", Const, 0, ""},
+ {"AF_VENDOR16", Const, 0, ""},
+ {"AF_VENDOR17", Const, 0, ""},
+ {"AF_VENDOR18", Const, 0, ""},
+ {"AF_VENDOR19", Const, 0, ""},
+ {"AF_VENDOR20", Const, 0, ""},
+ {"AF_VENDOR21", Const, 0, ""},
+ {"AF_VENDOR22", Const, 0, ""},
+ {"AF_VENDOR23", Const, 0, ""},
+ {"AF_VENDOR24", Const, 0, ""},
+ {"AF_VENDOR25", Const, 0, ""},
+ {"AF_VENDOR26", Const, 0, ""},
+ {"AF_VENDOR27", Const, 0, ""},
+ {"AF_VENDOR28", Const, 0, ""},
+ {"AF_VENDOR29", Const, 0, ""},
+ {"AF_VENDOR30", Const, 0, ""},
+ {"AF_VENDOR31", Const, 0, ""},
+ {"AF_VENDOR32", Const, 0, ""},
+ {"AF_VENDOR33", Const, 0, ""},
+ {"AF_VENDOR34", Const, 0, ""},
+ {"AF_VENDOR35", Const, 0, ""},
+ {"AF_VENDOR36", Const, 0, ""},
+ {"AF_VENDOR37", Const, 0, ""},
+ {"AF_VENDOR38", Const, 0, ""},
+ {"AF_VENDOR39", Const, 0, ""},
+ {"AF_VENDOR40", Const, 0, ""},
+ {"AF_VENDOR41", Const, 0, ""},
+ {"AF_VENDOR42", Const, 0, ""},
+ {"AF_VENDOR43", Const, 0, ""},
+ {"AF_VENDOR44", Const, 0, ""},
+ {"AF_VENDOR45", Const, 0, ""},
+ {"AF_VENDOR46", Const, 0, ""},
+ {"AF_VENDOR47", Const, 0, ""},
+ {"AF_WANPIPE", Const, 0, ""},
+ {"AF_X25", Const, 0, ""},
+ {"AI_CANONNAME", Const, 1, ""},
+ {"AI_NUMERICHOST", Const, 1, ""},
+ {"AI_PASSIVE", Const, 1, ""},
+ {"APPLICATION_ERROR", Const, 0, ""},
+ {"ARPHRD_ADAPT", Const, 0, ""},
+ {"ARPHRD_APPLETLK", Const, 0, ""},
+ {"ARPHRD_ARCNET", Const, 0, ""},
+ {"ARPHRD_ASH", Const, 0, ""},
+ {"ARPHRD_ATM", Const, 0, ""},
+ {"ARPHRD_AX25", Const, 0, ""},
+ {"ARPHRD_BIF", Const, 0, ""},
+ {"ARPHRD_CHAOS", Const, 0, ""},
+ {"ARPHRD_CISCO", Const, 0, ""},
+ {"ARPHRD_CSLIP", Const, 0, ""},
+ {"ARPHRD_CSLIP6", Const, 0, ""},
+ {"ARPHRD_DDCMP", Const, 0, ""},
+ {"ARPHRD_DLCI", Const, 0, ""},
+ {"ARPHRD_ECONET", Const, 0, ""},
+ {"ARPHRD_EETHER", Const, 0, ""},
+ {"ARPHRD_ETHER", Const, 0, ""},
+ {"ARPHRD_EUI64", Const, 0, ""},
+ {"ARPHRD_FCAL", Const, 0, ""},
+ {"ARPHRD_FCFABRIC", Const, 0, ""},
+ {"ARPHRD_FCPL", Const, 0, ""},
+ {"ARPHRD_FCPP", Const, 0, ""},
+ {"ARPHRD_FDDI", Const, 0, ""},
+ {"ARPHRD_FRAD", Const, 0, ""},
+ {"ARPHRD_FRELAY", Const, 1, ""},
+ {"ARPHRD_HDLC", Const, 0, ""},
+ {"ARPHRD_HIPPI", Const, 0, ""},
+ {"ARPHRD_HWX25", Const, 0, ""},
+ {"ARPHRD_IEEE1394", Const, 0, ""},
+ {"ARPHRD_IEEE802", Const, 0, ""},
+ {"ARPHRD_IEEE80211", Const, 0, ""},
+ {"ARPHRD_IEEE80211_PRISM", Const, 0, ""},
+ {"ARPHRD_IEEE80211_RADIOTAP", Const, 0, ""},
+ {"ARPHRD_IEEE802154", Const, 0, ""},
+ {"ARPHRD_IEEE802154_PHY", Const, 0, ""},
+ {"ARPHRD_IEEE802_TR", Const, 0, ""},
+ {"ARPHRD_INFINIBAND", Const, 0, ""},
+ {"ARPHRD_IPDDP", Const, 0, ""},
+ {"ARPHRD_IPGRE", Const, 0, ""},
+ {"ARPHRD_IRDA", Const, 0, ""},
+ {"ARPHRD_LAPB", Const, 0, ""},
+ {"ARPHRD_LOCALTLK", Const, 0, ""},
+ {"ARPHRD_LOOPBACK", Const, 0, ""},
+ {"ARPHRD_METRICOM", Const, 0, ""},
+ {"ARPHRD_NETROM", Const, 0, ""},
+ {"ARPHRD_NONE", Const, 0, ""},
+ {"ARPHRD_PIMREG", Const, 0, ""},
+ {"ARPHRD_PPP", Const, 0, ""},
+ {"ARPHRD_PRONET", Const, 0, ""},
+ {"ARPHRD_RAWHDLC", Const, 0, ""},
+ {"ARPHRD_ROSE", Const, 0, ""},
+ {"ARPHRD_RSRVD", Const, 0, ""},
+ {"ARPHRD_SIT", Const, 0, ""},
+ {"ARPHRD_SKIP", Const, 0, ""},
+ {"ARPHRD_SLIP", Const, 0, ""},
+ {"ARPHRD_SLIP6", Const, 0, ""},
+ {"ARPHRD_STRIP", Const, 1, ""},
+ {"ARPHRD_TUNNEL", Const, 0, ""},
+ {"ARPHRD_TUNNEL6", Const, 0, ""},
+ {"ARPHRD_VOID", Const, 0, ""},
+ {"ARPHRD_X25", Const, 0, ""},
+ {"AUTHTYPE_CLIENT", Const, 0, ""},
+ {"AUTHTYPE_SERVER", Const, 0, ""},
+ {"Accept", Func, 0, "func(fd int) (nfd int, sa Sockaddr, err error)"},
+ {"Accept4", Func, 1, "func(fd int, flags int) (nfd int, sa Sockaddr, err error)"},
+ {"AcceptEx", Func, 0, ""},
+ {"Access", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Acct", Func, 0, "func(path string) (err error)"},
+ {"AddrinfoW", Type, 1, ""},
+ {"AddrinfoW.Addr", Field, 1, ""},
+ {"AddrinfoW.Addrlen", Field, 1, ""},
+ {"AddrinfoW.Canonname", Field, 1, ""},
+ {"AddrinfoW.Family", Field, 1, ""},
+ {"AddrinfoW.Flags", Field, 1, ""},
+ {"AddrinfoW.Next", Field, 1, ""},
+ {"AddrinfoW.Protocol", Field, 1, ""},
+ {"AddrinfoW.Socktype", Field, 1, ""},
+ {"Adjtime", Func, 0, ""},
+ {"Adjtimex", Func, 0, "func(buf *Timex) (state int, err error)"},
+ {"AllThreadsSyscall", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"AllThreadsSyscall6", Func, 16, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"AttachLsf", Func, 0, "func(fd int, i []SockFilter) error"},
+ {"B0", Const, 0, ""},
+ {"B1000000", Const, 0, ""},
+ {"B110", Const, 0, ""},
+ {"B115200", Const, 0, ""},
+ {"B1152000", Const, 0, ""},
+ {"B1200", Const, 0, ""},
+ {"B134", Const, 0, ""},
+ {"B14400", Const, 1, ""},
+ {"B150", Const, 0, ""},
+ {"B1500000", Const, 0, ""},
+ {"B1800", Const, 0, ""},
+ {"B19200", Const, 0, ""},
+ {"B200", Const, 0, ""},
+ {"B2000000", Const, 0, ""},
+ {"B230400", Const, 0, ""},
+ {"B2400", Const, 0, ""},
+ {"B2500000", Const, 0, ""},
+ {"B28800", Const, 1, ""},
+ {"B300", Const, 0, ""},
+ {"B3000000", Const, 0, ""},
+ {"B3500000", Const, 0, ""},
+ {"B38400", Const, 0, ""},
+ {"B4000000", Const, 0, ""},
+ {"B460800", Const, 0, ""},
+ {"B4800", Const, 0, ""},
+ {"B50", Const, 0, ""},
+ {"B500000", Const, 0, ""},
+ {"B57600", Const, 0, ""},
+ {"B576000", Const, 0, ""},
+ {"B600", Const, 0, ""},
+ {"B7200", Const, 1, ""},
+ {"B75", Const, 0, ""},
+ {"B76800", Const, 1, ""},
+ {"B921600", Const, 0, ""},
+ {"B9600", Const, 0, ""},
+ {"BASE_PROTOCOL", Const, 2, ""},
+ {"BIOCFEEDBACK", Const, 0, ""},
+ {"BIOCFLUSH", Const, 0, ""},
+ {"BIOCGBLEN", Const, 0, ""},
+ {"BIOCGDIRECTION", Const, 0, ""},
+ {"BIOCGDIRFILT", Const, 1, ""},
+ {"BIOCGDLT", Const, 0, ""},
+ {"BIOCGDLTLIST", Const, 0, ""},
+ {"BIOCGETBUFMODE", Const, 0, ""},
+ {"BIOCGETIF", Const, 0, ""},
+ {"BIOCGETZMAX", Const, 0, ""},
+ {"BIOCGFEEDBACK", Const, 1, ""},
+ {"BIOCGFILDROP", Const, 1, ""},
+ {"BIOCGHDRCMPLT", Const, 0, ""},
+ {"BIOCGRSIG", Const, 0, ""},
+ {"BIOCGRTIMEOUT", Const, 0, ""},
+ {"BIOCGSEESENT", Const, 0, ""},
+ {"BIOCGSTATS", Const, 0, ""},
+ {"BIOCGSTATSOLD", Const, 1, ""},
+ {"BIOCGTSTAMP", Const, 1, ""},
+ {"BIOCIMMEDIATE", Const, 0, ""},
+ {"BIOCLOCK", Const, 0, ""},
+ {"BIOCPROMISC", Const, 0, ""},
+ {"BIOCROTZBUF", Const, 0, ""},
+ {"BIOCSBLEN", Const, 0, ""},
+ {"BIOCSDIRECTION", Const, 0, ""},
+ {"BIOCSDIRFILT", Const, 1, ""},
+ {"BIOCSDLT", Const, 0, ""},
+ {"BIOCSETBUFMODE", Const, 0, ""},
+ {"BIOCSETF", Const, 0, ""},
+ {"BIOCSETFNR", Const, 0, ""},
+ {"BIOCSETIF", Const, 0, ""},
+ {"BIOCSETWF", Const, 0, ""},
+ {"BIOCSETZBUF", Const, 0, ""},
+ {"BIOCSFEEDBACK", Const, 1, ""},
+ {"BIOCSFILDROP", Const, 1, ""},
+ {"BIOCSHDRCMPLT", Const, 0, ""},
+ {"BIOCSRSIG", Const, 0, ""},
+ {"BIOCSRTIMEOUT", Const, 0, ""},
+ {"BIOCSSEESENT", Const, 0, ""},
+ {"BIOCSTCPF", Const, 1, ""},
+ {"BIOCSTSTAMP", Const, 1, ""},
+ {"BIOCSUDPF", Const, 1, ""},
+ {"BIOCVERSION", Const, 0, ""},
+ {"BPF_A", Const, 0, ""},
+ {"BPF_ABS", Const, 0, ""},
+ {"BPF_ADD", Const, 0, ""},
+ {"BPF_ALIGNMENT", Const, 0, ""},
+ {"BPF_ALIGNMENT32", Const, 1, ""},
+ {"BPF_ALU", Const, 0, ""},
+ {"BPF_AND", Const, 0, ""},
+ {"BPF_B", Const, 0, ""},
+ {"BPF_BUFMODE_BUFFER", Const, 0, ""},
+ {"BPF_BUFMODE_ZBUF", Const, 0, ""},
+ {"BPF_DFLTBUFSIZE", Const, 1, ""},
+ {"BPF_DIRECTION_IN", Const, 1, ""},
+ {"BPF_DIRECTION_OUT", Const, 1, ""},
+ {"BPF_DIV", Const, 0, ""},
+ {"BPF_H", Const, 0, ""},
+ {"BPF_IMM", Const, 0, ""},
+ {"BPF_IND", Const, 0, ""},
+ {"BPF_JA", Const, 0, ""},
+ {"BPF_JEQ", Const, 0, ""},
+ {"BPF_JGE", Const, 0, ""},
+ {"BPF_JGT", Const, 0, ""},
+ {"BPF_JMP", Const, 0, ""},
+ {"BPF_JSET", Const, 0, ""},
+ {"BPF_K", Const, 0, ""},
+ {"BPF_LD", Const, 0, ""},
+ {"BPF_LDX", Const, 0, ""},
+ {"BPF_LEN", Const, 0, ""},
+ {"BPF_LSH", Const, 0, ""},
+ {"BPF_MAJOR_VERSION", Const, 0, ""},
+ {"BPF_MAXBUFSIZE", Const, 0, ""},
+ {"BPF_MAXINSNS", Const, 0, ""},
+ {"BPF_MEM", Const, 0, ""},
+ {"BPF_MEMWORDS", Const, 0, ""},
+ {"BPF_MINBUFSIZE", Const, 0, ""},
+ {"BPF_MINOR_VERSION", Const, 0, ""},
+ {"BPF_MISC", Const, 0, ""},
+ {"BPF_MSH", Const, 0, ""},
+ {"BPF_MUL", Const, 0, ""},
+ {"BPF_NEG", Const, 0, ""},
+ {"BPF_OR", Const, 0, ""},
+ {"BPF_RELEASE", Const, 0, ""},
+ {"BPF_RET", Const, 0, ""},
+ {"BPF_RSH", Const, 0, ""},
+ {"BPF_ST", Const, 0, ""},
+ {"BPF_STX", Const, 0, ""},
+ {"BPF_SUB", Const, 0, ""},
+ {"BPF_TAX", Const, 0, ""},
+ {"BPF_TXA", Const, 0, ""},
+ {"BPF_T_BINTIME", Const, 1, ""},
+ {"BPF_T_BINTIME_FAST", Const, 1, ""},
+ {"BPF_T_BINTIME_MONOTONIC", Const, 1, ""},
+ {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_FAST", Const, 1, ""},
+ {"BPF_T_FLAG_MASK", Const, 1, ""},
+ {"BPF_T_FORMAT_MASK", Const, 1, ""},
+ {"BPF_T_MICROTIME", Const, 1, ""},
+ {"BPF_T_MICROTIME_FAST", Const, 1, ""},
+ {"BPF_T_MICROTIME_MONOTONIC", Const, 1, ""},
+ {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_MONOTONIC", Const, 1, ""},
+ {"BPF_T_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_NANOTIME", Const, 1, ""},
+ {"BPF_T_NANOTIME_FAST", Const, 1, ""},
+ {"BPF_T_NANOTIME_MONOTONIC", Const, 1, ""},
+ {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1, ""},
+ {"BPF_T_NONE", Const, 1, ""},
+ {"BPF_T_NORMAL", Const, 1, ""},
+ {"BPF_W", Const, 0, ""},
+ {"BPF_X", Const, 0, ""},
+ {"BRKINT", Const, 0, ""},
+ {"Bind", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
+ {"BindToDevice", Func, 0, "func(fd int, device string) (err error)"},
+ {"BpfBuflen", Func, 0, ""},
+ {"BpfDatalink", Func, 0, ""},
+ {"BpfHdr", Type, 0, ""},
+ {"BpfHdr.Caplen", Field, 0, ""},
+ {"BpfHdr.Datalen", Field, 0, ""},
+ {"BpfHdr.Hdrlen", Field, 0, ""},
+ {"BpfHdr.Pad_cgo_0", Field, 0, ""},
+ {"BpfHdr.Tstamp", Field, 0, ""},
+ {"BpfHeadercmpl", Func, 0, ""},
+ {"BpfInsn", Type, 0, ""},
+ {"BpfInsn.Code", Field, 0, ""},
+ {"BpfInsn.Jf", Field, 0, ""},
+ {"BpfInsn.Jt", Field, 0, ""},
+ {"BpfInsn.K", Field, 0, ""},
+ {"BpfInterface", Func, 0, ""},
+ {"BpfJump", Func, 0, ""},
+ {"BpfProgram", Type, 0, ""},
+ {"BpfProgram.Insns", Field, 0, ""},
+ {"BpfProgram.Len", Field, 0, ""},
+ {"BpfProgram.Pad_cgo_0", Field, 0, ""},
+ {"BpfStat", Type, 0, ""},
+ {"BpfStat.Capt", Field, 2, ""},
+ {"BpfStat.Drop", Field, 0, ""},
+ {"BpfStat.Padding", Field, 2, ""},
+ {"BpfStat.Recv", Field, 0, ""},
+ {"BpfStats", Func, 0, ""},
+ {"BpfStmt", Func, 0, ""},
+ {"BpfTimeout", Func, 0, ""},
+ {"BpfTimeval", Type, 2, ""},
+ {"BpfTimeval.Sec", Field, 2, ""},
+ {"BpfTimeval.Usec", Field, 2, ""},
+ {"BpfVersion", Type, 0, ""},
+ {"BpfVersion.Major", Field, 0, ""},
+ {"BpfVersion.Minor", Field, 0, ""},
+ {"BpfZbuf", Type, 0, ""},
+ {"BpfZbuf.Bufa", Field, 0, ""},
+ {"BpfZbuf.Bufb", Field, 0, ""},
+ {"BpfZbuf.Buflen", Field, 0, ""},
+ {"BpfZbufHeader", Type, 0, ""},
+ {"BpfZbufHeader.Kernel_gen", Field, 0, ""},
+ {"BpfZbufHeader.Kernel_len", Field, 0, ""},
+ {"BpfZbufHeader.User_gen", Field, 0, ""},
+ {"BpfZbufHeader.X_bzh_pad", Field, 0, ""},
+ {"ByHandleFileInformation", Type, 0, ""},
+ {"ByHandleFileInformation.CreationTime", Field, 0, ""},
+ {"ByHandleFileInformation.FileAttributes", Field, 0, ""},
+ {"ByHandleFileInformation.FileIndexHigh", Field, 0, ""},
+ {"ByHandleFileInformation.FileIndexLow", Field, 0, ""},
+ {"ByHandleFileInformation.FileSizeHigh", Field, 0, ""},
+ {"ByHandleFileInformation.FileSizeLow", Field, 0, ""},
+ {"ByHandleFileInformation.LastAccessTime", Field, 0, ""},
+ {"ByHandleFileInformation.LastWriteTime", Field, 0, ""},
+ {"ByHandleFileInformation.NumberOfLinks", Field, 0, ""},
+ {"ByHandleFileInformation.VolumeSerialNumber", Field, 0, ""},
+ {"BytePtrFromString", Func, 1, "func(s string) (*byte, error)"},
+ {"ByteSliceFromString", Func, 1, "func(s string) ([]byte, error)"},
+ {"CCR0_FLUSH", Const, 1, ""},
+ {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_BASE", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_EV", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0, ""},
+ {"CERT_CHAIN_POLICY_SSL", Const, 0, ""},
+ {"CERT_E_CN_NO_MATCH", Const, 0, ""},
+ {"CERT_E_EXPIRED", Const, 0, ""},
+ {"CERT_E_PURPOSE", Const, 0, ""},
+ {"CERT_E_ROLE", Const, 0, ""},
+ {"CERT_E_UNTRUSTEDROOT", Const, 0, ""},
+ {"CERT_STORE_ADD_ALWAYS", Const, 0, ""},
+ {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0, ""},
+ {"CERT_STORE_PROV_MEMORY", Const, 0, ""},
+ {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0, ""},
+ {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0, ""},
+ {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0, ""},
+ {"CERT_TRUST_INVALID_EXTENSION", Const, 0, ""},
+ {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0, ""},
+ {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0, ""},
+ {"CERT_TRUST_IS_CYCLIC", Const, 0, ""},
+ {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0, ""},
+ {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0, ""},
+ {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0, ""},
+ {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0, ""},
+ {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0, ""},
+ {"CERT_TRUST_IS_REVOKED", Const, 0, ""},
+ {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0, ""},
+ {"CERT_TRUST_NO_ERROR", Const, 0, ""},
+ {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0, ""},
+ {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0, ""},
+ {"CFLUSH", Const, 1, ""},
+ {"CLOCAL", Const, 0, ""},
+ {"CLONE_CHILD_CLEARTID", Const, 2, ""},
+ {"CLONE_CHILD_SETTID", Const, 2, ""},
+ {"CLONE_CLEAR_SIGHAND", Const, 20, ""},
+ {"CLONE_CSIGNAL", Const, 3, ""},
+ {"CLONE_DETACHED", Const, 2, ""},
+ {"CLONE_FILES", Const, 2, ""},
+ {"CLONE_FS", Const, 2, ""},
+ {"CLONE_INTO_CGROUP", Const, 20, ""},
+ {"CLONE_IO", Const, 2, ""},
+ {"CLONE_NEWCGROUP", Const, 20, ""},
+ {"CLONE_NEWIPC", Const, 2, ""},
+ {"CLONE_NEWNET", Const, 2, ""},
+ {"CLONE_NEWNS", Const, 2, ""},
+ {"CLONE_NEWPID", Const, 2, ""},
+ {"CLONE_NEWTIME", Const, 20, ""},
+ {"CLONE_NEWUSER", Const, 2, ""},
+ {"CLONE_NEWUTS", Const, 2, ""},
+ {"CLONE_PARENT", Const, 2, ""},
+ {"CLONE_PARENT_SETTID", Const, 2, ""},
+ {"CLONE_PID", Const, 3, ""},
+ {"CLONE_PIDFD", Const, 20, ""},
+ {"CLONE_PTRACE", Const, 2, ""},
+ {"CLONE_SETTLS", Const, 2, ""},
+ {"CLONE_SIGHAND", Const, 2, ""},
+ {"CLONE_SYSVSEM", Const, 2, ""},
+ {"CLONE_THREAD", Const, 2, ""},
+ {"CLONE_UNTRACED", Const, 2, ""},
+ {"CLONE_VFORK", Const, 2, ""},
+ {"CLONE_VM", Const, 2, ""},
+ {"CPUID_CFLUSH", Const, 1, ""},
+ {"CREAD", Const, 0, ""},
+ {"CREATE_ALWAYS", Const, 0, ""},
+ {"CREATE_NEW", Const, 0, ""},
+ {"CREATE_NEW_PROCESS_GROUP", Const, 1, ""},
+ {"CREATE_UNICODE_ENVIRONMENT", Const, 0, ""},
+ {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0, ""},
+ {"CRYPT_DELETEKEYSET", Const, 0, ""},
+ {"CRYPT_MACHINE_KEYSET", Const, 0, ""},
+ {"CRYPT_NEWKEYSET", Const, 0, ""},
+ {"CRYPT_SILENT", Const, 0, ""},
+ {"CRYPT_VERIFYCONTEXT", Const, 0, ""},
+ {"CS5", Const, 0, ""},
+ {"CS6", Const, 0, ""},
+ {"CS7", Const, 0, ""},
+ {"CS8", Const, 0, ""},
+ {"CSIZE", Const, 0, ""},
+ {"CSTART", Const, 1, ""},
+ {"CSTATUS", Const, 1, ""},
+ {"CSTOP", Const, 1, ""},
+ {"CSTOPB", Const, 0, ""},
+ {"CSUSP", Const, 1, ""},
+ {"CTL_MAXNAME", Const, 0, ""},
+ {"CTL_NET", Const, 0, ""},
+ {"CTL_QUERY", Const, 1, ""},
+ {"CTRL_BREAK_EVENT", Const, 1, ""},
+ {"CTRL_CLOSE_EVENT", Const, 14, ""},
+ {"CTRL_C_EVENT", Const, 1, ""},
+ {"CTRL_LOGOFF_EVENT", Const, 14, ""},
+ {"CTRL_SHUTDOWN_EVENT", Const, 14, ""},
+ {"CancelIo", Func, 0, ""},
+ {"CancelIoEx", Func, 1, ""},
+ {"CertAddCertificateContextToStore", Func, 0, ""},
+ {"CertChainContext", Type, 0, ""},
+ {"CertChainContext.ChainCount", Field, 0, ""},
+ {"CertChainContext.Chains", Field, 0, ""},
+ {"CertChainContext.HasRevocationFreshnessTime", Field, 0, ""},
+ {"CertChainContext.LowerQualityChainCount", Field, 0, ""},
+ {"CertChainContext.LowerQualityChains", Field, 0, ""},
+ {"CertChainContext.RevocationFreshnessTime", Field, 0, ""},
+ {"CertChainContext.Size", Field, 0, ""},
+ {"CertChainContext.TrustStatus", Field, 0, ""},
+ {"CertChainElement", Type, 0, ""},
+ {"CertChainElement.ApplicationUsage", Field, 0, ""},
+ {"CertChainElement.CertContext", Field, 0, ""},
+ {"CertChainElement.ExtendedErrorInfo", Field, 0, ""},
+ {"CertChainElement.IssuanceUsage", Field, 0, ""},
+ {"CertChainElement.RevocationInfo", Field, 0, ""},
+ {"CertChainElement.Size", Field, 0, ""},
+ {"CertChainElement.TrustStatus", Field, 0, ""},
+ {"CertChainPara", Type, 0, ""},
+ {"CertChainPara.CacheResync", Field, 0, ""},
+ {"CertChainPara.CheckRevocationFreshnessTime", Field, 0, ""},
+ {"CertChainPara.RequestedUsage", Field, 0, ""},
+ {"CertChainPara.RequstedIssuancePolicy", Field, 0, ""},
+ {"CertChainPara.RevocationFreshnessTime", Field, 0, ""},
+ {"CertChainPara.Size", Field, 0, ""},
+ {"CertChainPara.URLRetrievalTimeout", Field, 0, ""},
+ {"CertChainPolicyPara", Type, 0, ""},
+ {"CertChainPolicyPara.ExtraPolicyPara", Field, 0, ""},
+ {"CertChainPolicyPara.Flags", Field, 0, ""},
+ {"CertChainPolicyPara.Size", Field, 0, ""},
+ {"CertChainPolicyStatus", Type, 0, ""},
+ {"CertChainPolicyStatus.ChainIndex", Field, 0, ""},
+ {"CertChainPolicyStatus.ElementIndex", Field, 0, ""},
+ {"CertChainPolicyStatus.Error", Field, 0, ""},
+ {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0, ""},
+ {"CertChainPolicyStatus.Size", Field, 0, ""},
+ {"CertCloseStore", Func, 0, ""},
+ {"CertContext", Type, 0, ""},
+ {"CertContext.CertInfo", Field, 0, ""},
+ {"CertContext.EncodedCert", Field, 0, ""},
+ {"CertContext.EncodingType", Field, 0, ""},
+ {"CertContext.Length", Field, 0, ""},
+ {"CertContext.Store", Field, 0, ""},
+ {"CertCreateCertificateContext", Func, 0, ""},
+ {"CertEnhKeyUsage", Type, 0, ""},
+ {"CertEnhKeyUsage.Length", Field, 0, ""},
+ {"CertEnhKeyUsage.UsageIdentifiers", Field, 0, ""},
+ {"CertEnumCertificatesInStore", Func, 0, ""},
+ {"CertFreeCertificateChain", Func, 0, ""},
+ {"CertFreeCertificateContext", Func, 0, ""},
+ {"CertGetCertificateChain", Func, 0, ""},
+ {"CertInfo", Type, 11, ""},
+ {"CertOpenStore", Func, 0, ""},
+ {"CertOpenSystemStore", Func, 0, ""},
+ {"CertRevocationCrlInfo", Type, 11, ""},
+ {"CertRevocationInfo", Type, 0, ""},
+ {"CertRevocationInfo.CrlInfo", Field, 0, ""},
+ {"CertRevocationInfo.FreshnessTime", Field, 0, ""},
+ {"CertRevocationInfo.HasFreshnessTime", Field, 0, ""},
+ {"CertRevocationInfo.OidSpecificInfo", Field, 0, ""},
+ {"CertRevocationInfo.RevocationOid", Field, 0, ""},
+ {"CertRevocationInfo.RevocationResult", Field, 0, ""},
+ {"CertRevocationInfo.Size", Field, 0, ""},
+ {"CertSimpleChain", Type, 0, ""},
+ {"CertSimpleChain.Elements", Field, 0, ""},
+ {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0, ""},
+ {"CertSimpleChain.NumElements", Field, 0, ""},
+ {"CertSimpleChain.RevocationFreshnessTime", Field, 0, ""},
+ {"CertSimpleChain.Size", Field, 0, ""},
+ {"CertSimpleChain.TrustListInfo", Field, 0, ""},
+ {"CertSimpleChain.TrustStatus", Field, 0, ""},
+ {"CertTrustListInfo", Type, 11, ""},
+ {"CertTrustStatus", Type, 0, ""},
+ {"CertTrustStatus.ErrorStatus", Field, 0, ""},
+ {"CertTrustStatus.InfoStatus", Field, 0, ""},
+ {"CertUsageMatch", Type, 0, ""},
+ {"CertUsageMatch.Type", Field, 0, ""},
+ {"CertUsageMatch.Usage", Field, 0, ""},
+ {"CertVerifyCertificateChainPolicy", Func, 0, ""},
+ {"Chdir", Func, 0, "func(path string) (err error)"},
+ {"CheckBpfVersion", Func, 0, ""},
+ {"Chflags", Func, 0, ""},
+ {"Chmod", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Chown", Func, 0, "func(path string, uid int, gid int) (err error)"},
+ {"Chroot", Func, 0, "func(path string) (err error)"},
+ {"Clearenv", Func, 0, "func()"},
+ {"Close", Func, 0, "func(fd int) (err error)"},
+ {"CloseHandle", Func, 0, ""},
+ {"CloseOnExec", Func, 0, "func(fd int)"},
+ {"Closesocket", Func, 0, ""},
+ {"CmsgLen", Func, 0, "func(datalen int) int"},
+ {"CmsgSpace", Func, 0, "func(datalen int) int"},
+ {"Cmsghdr", Type, 0, ""},
+ {"Cmsghdr.Len", Field, 0, ""},
+ {"Cmsghdr.Level", Field, 0, ""},
+ {"Cmsghdr.Type", Field, 0, ""},
+ {"Cmsghdr.X__cmsg_data", Field, 0, ""},
+ {"CommandLineToArgv", Func, 0, ""},
+ {"ComputerName", Func, 0, ""},
+ {"Conn", Type, 9, ""},
+ {"Connect", Func, 0, "func(fd int, sa Sockaddr) (err error)"},
+ {"ConnectEx", Func, 1, ""},
+ {"ConvertSidToStringSid", Func, 0, ""},
+ {"ConvertStringSidToSid", Func, 0, ""},
+ {"CopySid", Func, 0, ""},
+ {"Creat", Func, 0, "func(path string, mode uint32) (fd int, err error)"},
+ {"CreateDirectory", Func, 0, ""},
+ {"CreateFile", Func, 0, ""},
+ {"CreateFileMapping", Func, 0, ""},
+ {"CreateHardLink", Func, 4, ""},
+ {"CreateIoCompletionPort", Func, 0, ""},
+ {"CreatePipe", Func, 0, ""},
+ {"CreateProcess", Func, 0, ""},
+ {"CreateProcessAsUser", Func, 10, ""},
+ {"CreateSymbolicLink", Func, 4, ""},
+ {"CreateToolhelp32Snapshot", Func, 4, ""},
+ {"Credential", Type, 0, ""},
+ {"Credential.Gid", Field, 0, ""},
+ {"Credential.Groups", Field, 0, ""},
+ {"Credential.NoSetGroups", Field, 9, ""},
+ {"Credential.Uid", Field, 0, ""},
+ {"CryptAcquireContext", Func, 0, ""},
+ {"CryptGenRandom", Func, 0, ""},
+ {"CryptReleaseContext", Func, 0, ""},
+ {"DIOCBSFLUSH", Const, 1, ""},
+ {"DIOCOSFPFLUSH", Const, 1, ""},
+ {"DLL", Type, 0, ""},
+ {"DLL.Handle", Field, 0, ""},
+ {"DLL.Name", Field, 0, ""},
+ {"DLLError", Type, 0, ""},
+ {"DLLError.Err", Field, 0, ""},
+ {"DLLError.Msg", Field, 0, ""},
+ {"DLLError.ObjName", Field, 0, ""},
+ {"DLT_A429", Const, 0, ""},
+ {"DLT_A653_ICM", Const, 0, ""},
+ {"DLT_AIRONET_HEADER", Const, 0, ""},
+ {"DLT_AOS", Const, 1, ""},
+ {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0, ""},
+ {"DLT_ARCNET", Const, 0, ""},
+ {"DLT_ARCNET_LINUX", Const, 0, ""},
+ {"DLT_ATM_CLIP", Const, 0, ""},
+ {"DLT_ATM_RFC1483", Const, 0, ""},
+ {"DLT_AURORA", Const, 0, ""},
+ {"DLT_AX25", Const, 0, ""},
+ {"DLT_AX25_KISS", Const, 0, ""},
+ {"DLT_BACNET_MS_TP", Const, 0, ""},
+ {"DLT_BLUETOOTH_HCI_H4", Const, 0, ""},
+ {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0, ""},
+ {"DLT_CAN20B", Const, 0, ""},
+ {"DLT_CAN_SOCKETCAN", Const, 1, ""},
+ {"DLT_CHAOS", Const, 0, ""},
+ {"DLT_CHDLC", Const, 0, ""},
+ {"DLT_CISCO_IOS", Const, 0, ""},
+ {"DLT_C_HDLC", Const, 0, ""},
+ {"DLT_C_HDLC_WITH_DIR", Const, 0, ""},
+ {"DLT_DBUS", Const, 1, ""},
+ {"DLT_DECT", Const, 1, ""},
+ {"DLT_DOCSIS", Const, 0, ""},
+ {"DLT_DVB_CI", Const, 1, ""},
+ {"DLT_ECONET", Const, 0, ""},
+ {"DLT_EN10MB", Const, 0, ""},
+ {"DLT_EN3MB", Const, 0, ""},
+ {"DLT_ENC", Const, 0, ""},
+ {"DLT_ERF", Const, 0, ""},
+ {"DLT_ERF_ETH", Const, 0, ""},
+ {"DLT_ERF_POS", Const, 0, ""},
+ {"DLT_FC_2", Const, 1, ""},
+ {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1, ""},
+ {"DLT_FDDI", Const, 0, ""},
+ {"DLT_FLEXRAY", Const, 0, ""},
+ {"DLT_FRELAY", Const, 0, ""},
+ {"DLT_FRELAY_WITH_DIR", Const, 0, ""},
+ {"DLT_GCOM_SERIAL", Const, 0, ""},
+ {"DLT_GCOM_T1E1", Const, 0, ""},
+ {"DLT_GPF_F", Const, 0, ""},
+ {"DLT_GPF_T", Const, 0, ""},
+ {"DLT_GPRS_LLC", Const, 0, ""},
+ {"DLT_GSMTAP_ABIS", Const, 1, ""},
+ {"DLT_GSMTAP_UM", Const, 1, ""},
+ {"DLT_HDLC", Const, 1, ""},
+ {"DLT_HHDLC", Const, 0, ""},
+ {"DLT_HIPPI", Const, 1, ""},
+ {"DLT_IBM_SN", Const, 0, ""},
+ {"DLT_IBM_SP", Const, 0, ""},
+ {"DLT_IEEE802", Const, 0, ""},
+ {"DLT_IEEE802_11", Const, 0, ""},
+ {"DLT_IEEE802_11_RADIO", Const, 0, ""},
+ {"DLT_IEEE802_11_RADIO_AVS", Const, 0, ""},
+ {"DLT_IEEE802_15_4", Const, 0, ""},
+ {"DLT_IEEE802_15_4_LINUX", Const, 0, ""},
+ {"DLT_IEEE802_15_4_NOFCS", Const, 1, ""},
+ {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0, ""},
+ {"DLT_IEEE802_16_MAC_CPS", Const, 0, ""},
+ {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0, ""},
+ {"DLT_IPFILTER", Const, 0, ""},
+ {"DLT_IPMB", Const, 0, ""},
+ {"DLT_IPMB_LINUX", Const, 0, ""},
+ {"DLT_IPNET", Const, 1, ""},
+ {"DLT_IPOIB", Const, 1, ""},
+ {"DLT_IPV4", Const, 1, ""},
+ {"DLT_IPV6", Const, 1, ""},
+ {"DLT_IP_OVER_FC", Const, 0, ""},
+ {"DLT_JUNIPER_ATM1", Const, 0, ""},
+ {"DLT_JUNIPER_ATM2", Const, 0, ""},
+ {"DLT_JUNIPER_ATM_CEMIC", Const, 1, ""},
+ {"DLT_JUNIPER_CHDLC", Const, 0, ""},
+ {"DLT_JUNIPER_ES", Const, 0, ""},
+ {"DLT_JUNIPER_ETHER", Const, 0, ""},
+ {"DLT_JUNIPER_FIBRECHANNEL", Const, 1, ""},
+ {"DLT_JUNIPER_FRELAY", Const, 0, ""},
+ {"DLT_JUNIPER_GGSN", Const, 0, ""},
+ {"DLT_JUNIPER_ISM", Const, 0, ""},
+ {"DLT_JUNIPER_MFR", Const, 0, ""},
+ {"DLT_JUNIPER_MLFR", Const, 0, ""},
+ {"DLT_JUNIPER_MLPPP", Const, 0, ""},
+ {"DLT_JUNIPER_MONITOR", Const, 0, ""},
+ {"DLT_JUNIPER_PIC_PEER", Const, 0, ""},
+ {"DLT_JUNIPER_PPP", Const, 0, ""},
+ {"DLT_JUNIPER_PPPOE", Const, 0, ""},
+ {"DLT_JUNIPER_PPPOE_ATM", Const, 0, ""},
+ {"DLT_JUNIPER_SERVICES", Const, 0, ""},
+ {"DLT_JUNIPER_SRX_E2E", Const, 1, ""},
+ {"DLT_JUNIPER_ST", Const, 0, ""},
+ {"DLT_JUNIPER_VP", Const, 0, ""},
+ {"DLT_JUNIPER_VS", Const, 1, ""},
+ {"DLT_LAPB_WITH_DIR", Const, 0, ""},
+ {"DLT_LAPD", Const, 0, ""},
+ {"DLT_LIN", Const, 0, ""},
+ {"DLT_LINUX_EVDEV", Const, 1, ""},
+ {"DLT_LINUX_IRDA", Const, 0, ""},
+ {"DLT_LINUX_LAPD", Const, 0, ""},
+ {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0, ""},
+ {"DLT_LINUX_SLL", Const, 0, ""},
+ {"DLT_LOOP", Const, 0, ""},
+ {"DLT_LTALK", Const, 0, ""},
+ {"DLT_MATCHING_MAX", Const, 1, ""},
+ {"DLT_MATCHING_MIN", Const, 1, ""},
+ {"DLT_MFR", Const, 0, ""},
+ {"DLT_MOST", Const, 0, ""},
+ {"DLT_MPEG_2_TS", Const, 1, ""},
+ {"DLT_MPLS", Const, 1, ""},
+ {"DLT_MTP2", Const, 0, ""},
+ {"DLT_MTP2_WITH_PHDR", Const, 0, ""},
+ {"DLT_MTP3", Const, 0, ""},
+ {"DLT_MUX27010", Const, 1, ""},
+ {"DLT_NETANALYZER", Const, 1, ""},
+ {"DLT_NETANALYZER_TRANSPARENT", Const, 1, ""},
+ {"DLT_NFC_LLCP", Const, 1, ""},
+ {"DLT_NFLOG", Const, 1, ""},
+ {"DLT_NG40", Const, 1, ""},
+ {"DLT_NULL", Const, 0, ""},
+ {"DLT_PCI_EXP", Const, 0, ""},
+ {"DLT_PFLOG", Const, 0, ""},
+ {"DLT_PFSYNC", Const, 0, ""},
+ {"DLT_PPI", Const, 0, ""},
+ {"DLT_PPP", Const, 0, ""},
+ {"DLT_PPP_BSDOS", Const, 0, ""},
+ {"DLT_PPP_ETHER", Const, 0, ""},
+ {"DLT_PPP_PPPD", Const, 0, ""},
+ {"DLT_PPP_SERIAL", Const, 0, ""},
+ {"DLT_PPP_WITH_DIR", Const, 0, ""},
+ {"DLT_PPP_WITH_DIRECTION", Const, 0, ""},
+ {"DLT_PRISM_HEADER", Const, 0, ""},
+ {"DLT_PRONET", Const, 0, ""},
+ {"DLT_RAIF1", Const, 0, ""},
+ {"DLT_RAW", Const, 0, ""},
+ {"DLT_RAWAF_MASK", Const, 1, ""},
+ {"DLT_RIO", Const, 0, ""},
+ {"DLT_SCCP", Const, 0, ""},
+ {"DLT_SITA", Const, 0, ""},
+ {"DLT_SLIP", Const, 0, ""},
+ {"DLT_SLIP_BSDOS", Const, 0, ""},
+ {"DLT_STANAG_5066_D_PDU", Const, 1, ""},
+ {"DLT_SUNATM", Const, 0, ""},
+ {"DLT_SYMANTEC_FIREWALL", Const, 0, ""},
+ {"DLT_TZSP", Const, 0, ""},
+ {"DLT_USB", Const, 0, ""},
+ {"DLT_USB_LINUX", Const, 0, ""},
+ {"DLT_USB_LINUX_MMAPPED", Const, 1, ""},
+ {"DLT_USER0", Const, 0, ""},
+ {"DLT_USER1", Const, 0, ""},
+ {"DLT_USER10", Const, 0, ""},
+ {"DLT_USER11", Const, 0, ""},
+ {"DLT_USER12", Const, 0, ""},
+ {"DLT_USER13", Const, 0, ""},
+ {"DLT_USER14", Const, 0, ""},
+ {"DLT_USER15", Const, 0, ""},
+ {"DLT_USER2", Const, 0, ""},
+ {"DLT_USER3", Const, 0, ""},
+ {"DLT_USER4", Const, 0, ""},
+ {"DLT_USER5", Const, 0, ""},
+ {"DLT_USER6", Const, 0, ""},
+ {"DLT_USER7", Const, 0, ""},
+ {"DLT_USER8", Const, 0, ""},
+ {"DLT_USER9", Const, 0, ""},
+ {"DLT_WIHART", Const, 1, ""},
+ {"DLT_X2E_SERIAL", Const, 0, ""},
+ {"DLT_X2E_XORAYA", Const, 0, ""},
+ {"DNSMXData", Type, 0, ""},
+ {"DNSMXData.NameExchange", Field, 0, ""},
+ {"DNSMXData.Pad", Field, 0, ""},
+ {"DNSMXData.Preference", Field, 0, ""},
+ {"DNSPTRData", Type, 0, ""},
+ {"DNSPTRData.Host", Field, 0, ""},
+ {"DNSRecord", Type, 0, ""},
+ {"DNSRecord.Data", Field, 0, ""},
+ {"DNSRecord.Dw", Field, 0, ""},
+ {"DNSRecord.Length", Field, 0, ""},
+ {"DNSRecord.Name", Field, 0, ""},
+ {"DNSRecord.Next", Field, 0, ""},
+ {"DNSRecord.Reserved", Field, 0, ""},
+ {"DNSRecord.Ttl", Field, 0, ""},
+ {"DNSRecord.Type", Field, 0, ""},
+ {"DNSSRVData", Type, 0, ""},
+ {"DNSSRVData.Pad", Field, 0, ""},
+ {"DNSSRVData.Port", Field, 0, ""},
+ {"DNSSRVData.Priority", Field, 0, ""},
+ {"DNSSRVData.Target", Field, 0, ""},
+ {"DNSSRVData.Weight", Field, 0, ""},
+ {"DNSTXTData", Type, 0, ""},
+ {"DNSTXTData.StringArray", Field, 0, ""},
+ {"DNSTXTData.StringCount", Field, 0, ""},
+ {"DNS_INFO_NO_RECORDS", Const, 4, ""},
+ {"DNS_TYPE_A", Const, 0, ""},
+ {"DNS_TYPE_A6", Const, 0, ""},
+ {"DNS_TYPE_AAAA", Const, 0, ""},
+ {"DNS_TYPE_ADDRS", Const, 0, ""},
+ {"DNS_TYPE_AFSDB", Const, 0, ""},
+ {"DNS_TYPE_ALL", Const, 0, ""},
+ {"DNS_TYPE_ANY", Const, 0, ""},
+ {"DNS_TYPE_ATMA", Const, 0, ""},
+ {"DNS_TYPE_AXFR", Const, 0, ""},
+ {"DNS_TYPE_CERT", Const, 0, ""},
+ {"DNS_TYPE_CNAME", Const, 0, ""},
+ {"DNS_TYPE_DHCID", Const, 0, ""},
+ {"DNS_TYPE_DNAME", Const, 0, ""},
+ {"DNS_TYPE_DNSKEY", Const, 0, ""},
+ {"DNS_TYPE_DS", Const, 0, ""},
+ {"DNS_TYPE_EID", Const, 0, ""},
+ {"DNS_TYPE_GID", Const, 0, ""},
+ {"DNS_TYPE_GPOS", Const, 0, ""},
+ {"DNS_TYPE_HINFO", Const, 0, ""},
+ {"DNS_TYPE_ISDN", Const, 0, ""},
+ {"DNS_TYPE_IXFR", Const, 0, ""},
+ {"DNS_TYPE_KEY", Const, 0, ""},
+ {"DNS_TYPE_KX", Const, 0, ""},
+ {"DNS_TYPE_LOC", Const, 0, ""},
+ {"DNS_TYPE_MAILA", Const, 0, ""},
+ {"DNS_TYPE_MAILB", Const, 0, ""},
+ {"DNS_TYPE_MB", Const, 0, ""},
+ {"DNS_TYPE_MD", Const, 0, ""},
+ {"DNS_TYPE_MF", Const, 0, ""},
+ {"DNS_TYPE_MG", Const, 0, ""},
+ {"DNS_TYPE_MINFO", Const, 0, ""},
+ {"DNS_TYPE_MR", Const, 0, ""},
+ {"DNS_TYPE_MX", Const, 0, ""},
+ {"DNS_TYPE_NAPTR", Const, 0, ""},
+ {"DNS_TYPE_NBSTAT", Const, 0, ""},
+ {"DNS_TYPE_NIMLOC", Const, 0, ""},
+ {"DNS_TYPE_NS", Const, 0, ""},
+ {"DNS_TYPE_NSAP", Const, 0, ""},
+ {"DNS_TYPE_NSAPPTR", Const, 0, ""},
+ {"DNS_TYPE_NSEC", Const, 0, ""},
+ {"DNS_TYPE_NULL", Const, 0, ""},
+ {"DNS_TYPE_NXT", Const, 0, ""},
+ {"DNS_TYPE_OPT", Const, 0, ""},
+ {"DNS_TYPE_PTR", Const, 0, ""},
+ {"DNS_TYPE_PX", Const, 0, ""},
+ {"DNS_TYPE_RP", Const, 0, ""},
+ {"DNS_TYPE_RRSIG", Const, 0, ""},
+ {"DNS_TYPE_RT", Const, 0, ""},
+ {"DNS_TYPE_SIG", Const, 0, ""},
+ {"DNS_TYPE_SINK", Const, 0, ""},
+ {"DNS_TYPE_SOA", Const, 0, ""},
+ {"DNS_TYPE_SRV", Const, 0, ""},
+ {"DNS_TYPE_TEXT", Const, 0, ""},
+ {"DNS_TYPE_TKEY", Const, 0, ""},
+ {"DNS_TYPE_TSIG", Const, 0, ""},
+ {"DNS_TYPE_UID", Const, 0, ""},
+ {"DNS_TYPE_UINFO", Const, 0, ""},
+ {"DNS_TYPE_UNSPEC", Const, 0, ""},
+ {"DNS_TYPE_WINS", Const, 0, ""},
+ {"DNS_TYPE_WINSR", Const, 0, ""},
+ {"DNS_TYPE_WKS", Const, 0, ""},
+ {"DNS_TYPE_X25", Const, 0, ""},
+ {"DT_BLK", Const, 0, ""},
+ {"DT_CHR", Const, 0, ""},
+ {"DT_DIR", Const, 0, ""},
+ {"DT_FIFO", Const, 0, ""},
+ {"DT_LNK", Const, 0, ""},
+ {"DT_REG", Const, 0, ""},
+ {"DT_SOCK", Const, 0, ""},
+ {"DT_UNKNOWN", Const, 0, ""},
+ {"DT_WHT", Const, 0, ""},
+ {"DUPLICATE_CLOSE_SOURCE", Const, 0, ""},
+ {"DUPLICATE_SAME_ACCESS", Const, 0, ""},
+ {"DeleteFile", Func, 0, ""},
+ {"DetachLsf", Func, 0, "func(fd int) error"},
+ {"DeviceIoControl", Func, 4, ""},
+ {"Dirent", Type, 0, ""},
+ {"Dirent.Fileno", Field, 0, ""},
+ {"Dirent.Ino", Field, 0, ""},
+ {"Dirent.Name", Field, 0, ""},
+ {"Dirent.Namlen", Field, 0, ""},
+ {"Dirent.Off", Field, 0, ""},
+ {"Dirent.Pad0", Field, 12, ""},
+ {"Dirent.Pad1", Field, 12, ""},
+ {"Dirent.Pad_cgo_0", Field, 0, ""},
+ {"Dirent.Reclen", Field, 0, ""},
+ {"Dirent.Seekoff", Field, 0, ""},
+ {"Dirent.Type", Field, 0, ""},
+ {"Dirent.X__d_padding", Field, 3, ""},
+ {"DnsNameCompare", Func, 4, ""},
+ {"DnsQuery", Func, 0, ""},
+ {"DnsRecordListFree", Func, 0, ""},
+ {"DnsSectionAdditional", Const, 4, ""},
+ {"DnsSectionAnswer", Const, 4, ""},
+ {"DnsSectionAuthority", Const, 4, ""},
+ {"DnsSectionQuestion", Const, 4, ""},
+ {"Dup", Func, 0, "func(oldfd int) (fd int, err error)"},
+ {"Dup2", Func, 0, "func(oldfd int, newfd int) (err error)"},
+ {"Dup3", Func, 2, "func(oldfd int, newfd int, flags int) (err error)"},
+ {"DuplicateHandle", Func, 0, ""},
+ {"E2BIG", Const, 0, ""},
+ {"EACCES", Const, 0, ""},
+ {"EADDRINUSE", Const, 0, ""},
+ {"EADDRNOTAVAIL", Const, 0, ""},
+ {"EADV", Const, 0, ""},
+ {"EAFNOSUPPORT", Const, 0, ""},
+ {"EAGAIN", Const, 0, ""},
+ {"EALREADY", Const, 0, ""},
+ {"EAUTH", Const, 0, ""},
+ {"EBADARCH", Const, 0, ""},
+ {"EBADE", Const, 0, ""},
+ {"EBADEXEC", Const, 0, ""},
+ {"EBADF", Const, 0, ""},
+ {"EBADFD", Const, 0, ""},
+ {"EBADMACHO", Const, 0, ""},
+ {"EBADMSG", Const, 0, ""},
+ {"EBADR", Const, 0, ""},
+ {"EBADRPC", Const, 0, ""},
+ {"EBADRQC", Const, 0, ""},
+ {"EBADSLT", Const, 0, ""},
+ {"EBFONT", Const, 0, ""},
+ {"EBUSY", Const, 0, ""},
+ {"ECANCELED", Const, 0, ""},
+ {"ECAPMODE", Const, 1, ""},
+ {"ECHILD", Const, 0, ""},
+ {"ECHO", Const, 0, ""},
+ {"ECHOCTL", Const, 0, ""},
+ {"ECHOE", Const, 0, ""},
+ {"ECHOK", Const, 0, ""},
+ {"ECHOKE", Const, 0, ""},
+ {"ECHONL", Const, 0, ""},
+ {"ECHOPRT", Const, 0, ""},
+ {"ECHRNG", Const, 0, ""},
+ {"ECOMM", Const, 0, ""},
+ {"ECONNABORTED", Const, 0, ""},
+ {"ECONNREFUSED", Const, 0, ""},
+ {"ECONNRESET", Const, 0, ""},
+ {"EDEADLK", Const, 0, ""},
+ {"EDEADLOCK", Const, 0, ""},
+ {"EDESTADDRREQ", Const, 0, ""},
+ {"EDEVERR", Const, 0, ""},
+ {"EDOM", Const, 0, ""},
+ {"EDOOFUS", Const, 0, ""},
+ {"EDOTDOT", Const, 0, ""},
+ {"EDQUOT", Const, 0, ""},
+ {"EEXIST", Const, 0, ""},
+ {"EFAULT", Const, 0, ""},
+ {"EFBIG", Const, 0, ""},
+ {"EFER_LMA", Const, 1, ""},
+ {"EFER_LME", Const, 1, ""},
+ {"EFER_NXE", Const, 1, ""},
+ {"EFER_SCE", Const, 1, ""},
+ {"EFTYPE", Const, 0, ""},
+ {"EHOSTDOWN", Const, 0, ""},
+ {"EHOSTUNREACH", Const, 0, ""},
+ {"EHWPOISON", Const, 0, ""},
+ {"EIDRM", Const, 0, ""},
+ {"EILSEQ", Const, 0, ""},
+ {"EINPROGRESS", Const, 0, ""},
+ {"EINTR", Const, 0, ""},
+ {"EINVAL", Const, 0, ""},
+ {"EIO", Const, 0, ""},
+ {"EIPSEC", Const, 1, ""},
+ {"EISCONN", Const, 0, ""},
+ {"EISDIR", Const, 0, ""},
+ {"EISNAM", Const, 0, ""},
+ {"EKEYEXPIRED", Const, 0, ""},
+ {"EKEYREJECTED", Const, 0, ""},
+ {"EKEYREVOKED", Const, 0, ""},
+ {"EL2HLT", Const, 0, ""},
+ {"EL2NSYNC", Const, 0, ""},
+ {"EL3HLT", Const, 0, ""},
+ {"EL3RST", Const, 0, ""},
+ {"ELAST", Const, 0, ""},
+ {"ELF_NGREG", Const, 0, ""},
+ {"ELF_PRARGSZ", Const, 0, ""},
+ {"ELIBACC", Const, 0, ""},
+ {"ELIBBAD", Const, 0, ""},
+ {"ELIBEXEC", Const, 0, ""},
+ {"ELIBMAX", Const, 0, ""},
+ {"ELIBSCN", Const, 0, ""},
+ {"ELNRNG", Const, 0, ""},
+ {"ELOOP", Const, 0, ""},
+ {"EMEDIUMTYPE", Const, 0, ""},
+ {"EMFILE", Const, 0, ""},
+ {"EMLINK", Const, 0, ""},
+ {"EMSGSIZE", Const, 0, ""},
+ {"EMT_TAGOVF", Const, 1, ""},
+ {"EMULTIHOP", Const, 0, ""},
+ {"EMUL_ENABLED", Const, 1, ""},
+ {"EMUL_LINUX", Const, 1, ""},
+ {"EMUL_LINUX32", Const, 1, ""},
+ {"EMUL_MAXID", Const, 1, ""},
+ {"EMUL_NATIVE", Const, 1, ""},
+ {"ENAMETOOLONG", Const, 0, ""},
+ {"ENAVAIL", Const, 0, ""},
+ {"ENDRUNDISC", Const, 1, ""},
+ {"ENEEDAUTH", Const, 0, ""},
+ {"ENETDOWN", Const, 0, ""},
+ {"ENETRESET", Const, 0, ""},
+ {"ENETUNREACH", Const, 0, ""},
+ {"ENFILE", Const, 0, ""},
+ {"ENOANO", Const, 0, ""},
+ {"ENOATTR", Const, 0, ""},
+ {"ENOBUFS", Const, 0, ""},
+ {"ENOCSI", Const, 0, ""},
+ {"ENODATA", Const, 0, ""},
+ {"ENODEV", Const, 0, ""},
+ {"ENOENT", Const, 0, ""},
+ {"ENOEXEC", Const, 0, ""},
+ {"ENOKEY", Const, 0, ""},
+ {"ENOLCK", Const, 0, ""},
+ {"ENOLINK", Const, 0, ""},
+ {"ENOMEDIUM", Const, 0, ""},
+ {"ENOMEM", Const, 0, ""},
+ {"ENOMSG", Const, 0, ""},
+ {"ENONET", Const, 0, ""},
+ {"ENOPKG", Const, 0, ""},
+ {"ENOPOLICY", Const, 0, ""},
+ {"ENOPROTOOPT", Const, 0, ""},
+ {"ENOSPC", Const, 0, ""},
+ {"ENOSR", Const, 0, ""},
+ {"ENOSTR", Const, 0, ""},
+ {"ENOSYS", Const, 0, ""},
+ {"ENOTBLK", Const, 0, ""},
+ {"ENOTCAPABLE", Const, 0, ""},
+ {"ENOTCONN", Const, 0, ""},
+ {"ENOTDIR", Const, 0, ""},
+ {"ENOTEMPTY", Const, 0, ""},
+ {"ENOTNAM", Const, 0, ""},
+ {"ENOTRECOVERABLE", Const, 0, ""},
+ {"ENOTSOCK", Const, 0, ""},
+ {"ENOTSUP", Const, 0, ""},
+ {"ENOTTY", Const, 0, ""},
+ {"ENOTUNIQ", Const, 0, ""},
+ {"ENXIO", Const, 0, ""},
+ {"EN_SW_CTL_INF", Const, 1, ""},
+ {"EN_SW_CTL_PREC", Const, 1, ""},
+ {"EN_SW_CTL_ROUND", Const, 1, ""},
+ {"EN_SW_DATACHAIN", Const, 1, ""},
+ {"EN_SW_DENORM", Const, 1, ""},
+ {"EN_SW_INVOP", Const, 1, ""},
+ {"EN_SW_OVERFLOW", Const, 1, ""},
+ {"EN_SW_PRECLOSS", Const, 1, ""},
+ {"EN_SW_UNDERFLOW", Const, 1, ""},
+ {"EN_SW_ZERODIV", Const, 1, ""},
+ {"EOPNOTSUPP", Const, 0, ""},
+ {"EOVERFLOW", Const, 0, ""},
+ {"EOWNERDEAD", Const, 0, ""},
+ {"EPERM", Const, 0, ""},
+ {"EPFNOSUPPORT", Const, 0, ""},
+ {"EPIPE", Const, 0, ""},
+ {"EPOLLERR", Const, 0, ""},
+ {"EPOLLET", Const, 0, ""},
+ {"EPOLLHUP", Const, 0, ""},
+ {"EPOLLIN", Const, 0, ""},
+ {"EPOLLMSG", Const, 0, ""},
+ {"EPOLLONESHOT", Const, 0, ""},
+ {"EPOLLOUT", Const, 0, ""},
+ {"EPOLLPRI", Const, 0, ""},
+ {"EPOLLRDBAND", Const, 0, ""},
+ {"EPOLLRDHUP", Const, 0, ""},
+ {"EPOLLRDNORM", Const, 0, ""},
+ {"EPOLLWRBAND", Const, 0, ""},
+ {"EPOLLWRNORM", Const, 0, ""},
+ {"EPOLL_CLOEXEC", Const, 0, ""},
+ {"EPOLL_CTL_ADD", Const, 0, ""},
+ {"EPOLL_CTL_DEL", Const, 0, ""},
+ {"EPOLL_CTL_MOD", Const, 0, ""},
+ {"EPOLL_NONBLOCK", Const, 0, ""},
+ {"EPROCLIM", Const, 0, ""},
+ {"EPROCUNAVAIL", Const, 0, ""},
+ {"EPROGMISMATCH", Const, 0, ""},
+ {"EPROGUNAVAIL", Const, 0, ""},
+ {"EPROTO", Const, 0, ""},
+ {"EPROTONOSUPPORT", Const, 0, ""},
+ {"EPROTOTYPE", Const, 0, ""},
+ {"EPWROFF", Const, 0, ""},
+ {"EQFULL", Const, 16, ""},
+ {"ERANGE", Const, 0, ""},
+ {"EREMCHG", Const, 0, ""},
+ {"EREMOTE", Const, 0, ""},
+ {"EREMOTEIO", Const, 0, ""},
+ {"ERESTART", Const, 0, ""},
+ {"ERFKILL", Const, 0, ""},
+ {"EROFS", Const, 0, ""},
+ {"ERPCMISMATCH", Const, 0, ""},
+ {"ERROR_ACCESS_DENIED", Const, 0, ""},
+ {"ERROR_ALREADY_EXISTS", Const, 0, ""},
+ {"ERROR_BROKEN_PIPE", Const, 0, ""},
+ {"ERROR_BUFFER_OVERFLOW", Const, 0, ""},
+ {"ERROR_DIR_NOT_EMPTY", Const, 8, ""},
+ {"ERROR_ENVVAR_NOT_FOUND", Const, 0, ""},
+ {"ERROR_FILE_EXISTS", Const, 0, ""},
+ {"ERROR_FILE_NOT_FOUND", Const, 0, ""},
+ {"ERROR_HANDLE_EOF", Const, 2, ""},
+ {"ERROR_INSUFFICIENT_BUFFER", Const, 0, ""},
+ {"ERROR_IO_PENDING", Const, 0, ""},
+ {"ERROR_MOD_NOT_FOUND", Const, 0, ""},
+ {"ERROR_MORE_DATA", Const, 3, ""},
+ {"ERROR_NETNAME_DELETED", Const, 3, ""},
+ {"ERROR_NOT_FOUND", Const, 1, ""},
+ {"ERROR_NO_MORE_FILES", Const, 0, ""},
+ {"ERROR_OPERATION_ABORTED", Const, 0, ""},
+ {"ERROR_PATH_NOT_FOUND", Const, 0, ""},
+ {"ERROR_PRIVILEGE_NOT_HELD", Const, 4, ""},
+ {"ERROR_PROC_NOT_FOUND", Const, 0, ""},
+ {"ESHLIBVERS", Const, 0, ""},
+ {"ESHUTDOWN", Const, 0, ""},
+ {"ESOCKTNOSUPPORT", Const, 0, ""},
+ {"ESPIPE", Const, 0, ""},
+ {"ESRCH", Const, 0, ""},
+ {"ESRMNT", Const, 0, ""},
+ {"ESTALE", Const, 0, ""},
+ {"ESTRPIPE", Const, 0, ""},
+ {"ETHERCAP_JUMBO_MTU", Const, 1, ""},
+ {"ETHERCAP_VLAN_HWTAGGING", Const, 1, ""},
+ {"ETHERCAP_VLAN_MTU", Const, 1, ""},
+ {"ETHERMIN", Const, 1, ""},
+ {"ETHERMTU", Const, 1, ""},
+ {"ETHERMTU_JUMBO", Const, 1, ""},
+ {"ETHERTYPE_8023", Const, 1, ""},
+ {"ETHERTYPE_AARP", Const, 1, ""},
+ {"ETHERTYPE_ACCTON", Const, 1, ""},
+ {"ETHERTYPE_AEONIC", Const, 1, ""},
+ {"ETHERTYPE_ALPHA", Const, 1, ""},
+ {"ETHERTYPE_AMBER", Const, 1, ""},
+ {"ETHERTYPE_AMOEBA", Const, 1, ""},
+ {"ETHERTYPE_AOE", Const, 1, ""},
+ {"ETHERTYPE_APOLLO", Const, 1, ""},
+ {"ETHERTYPE_APOLLODOMAIN", Const, 1, ""},
+ {"ETHERTYPE_APPLETALK", Const, 1, ""},
+ {"ETHERTYPE_APPLITEK", Const, 1, ""},
+ {"ETHERTYPE_ARGONAUT", Const, 1, ""},
+ {"ETHERTYPE_ARP", Const, 1, ""},
+ {"ETHERTYPE_AT", Const, 1, ""},
+ {"ETHERTYPE_ATALK", Const, 1, ""},
+ {"ETHERTYPE_ATOMIC", Const, 1, ""},
+ {"ETHERTYPE_ATT", Const, 1, ""},
+ {"ETHERTYPE_ATTSTANFORD", Const, 1, ""},
+ {"ETHERTYPE_AUTOPHON", Const, 1, ""},
+ {"ETHERTYPE_AXIS", Const, 1, ""},
+ {"ETHERTYPE_BCLOOP", Const, 1, ""},
+ {"ETHERTYPE_BOFL", Const, 1, ""},
+ {"ETHERTYPE_CABLETRON", Const, 1, ""},
+ {"ETHERTYPE_CHAOS", Const, 1, ""},
+ {"ETHERTYPE_COMDESIGN", Const, 1, ""},
+ {"ETHERTYPE_COMPUGRAPHIC", Const, 1, ""},
+ {"ETHERTYPE_COUNTERPOINT", Const, 1, ""},
+ {"ETHERTYPE_CRONUS", Const, 1, ""},
+ {"ETHERTYPE_CRONUSVLN", Const, 1, ""},
+ {"ETHERTYPE_DCA", Const, 1, ""},
+ {"ETHERTYPE_DDE", Const, 1, ""},
+ {"ETHERTYPE_DEBNI", Const, 1, ""},
+ {"ETHERTYPE_DECAM", Const, 1, ""},
+ {"ETHERTYPE_DECCUST", Const, 1, ""},
+ {"ETHERTYPE_DECDIAG", Const, 1, ""},
+ {"ETHERTYPE_DECDNS", Const, 1, ""},
+ {"ETHERTYPE_DECDTS", Const, 1, ""},
+ {"ETHERTYPE_DECEXPER", Const, 1, ""},
+ {"ETHERTYPE_DECLAST", Const, 1, ""},
+ {"ETHERTYPE_DECLTM", Const, 1, ""},
+ {"ETHERTYPE_DECMUMPS", Const, 1, ""},
+ {"ETHERTYPE_DECNETBIOS", Const, 1, ""},
+ {"ETHERTYPE_DELTACON", Const, 1, ""},
+ {"ETHERTYPE_DIDDLE", Const, 1, ""},
+ {"ETHERTYPE_DLOG1", Const, 1, ""},
+ {"ETHERTYPE_DLOG2", Const, 1, ""},
+ {"ETHERTYPE_DN", Const, 1, ""},
+ {"ETHERTYPE_DOGFIGHT", Const, 1, ""},
+ {"ETHERTYPE_DSMD", Const, 1, ""},
+ {"ETHERTYPE_ECMA", Const, 1, ""},
+ {"ETHERTYPE_ENCRYPT", Const, 1, ""},
+ {"ETHERTYPE_ES", Const, 1, ""},
+ {"ETHERTYPE_EXCELAN", Const, 1, ""},
+ {"ETHERTYPE_EXPERDATA", Const, 1, ""},
+ {"ETHERTYPE_FLIP", Const, 1, ""},
+ {"ETHERTYPE_FLOWCONTROL", Const, 1, ""},
+ {"ETHERTYPE_FRARP", Const, 1, ""},
+ {"ETHERTYPE_GENDYN", Const, 1, ""},
+ {"ETHERTYPE_HAYES", Const, 1, ""},
+ {"ETHERTYPE_HIPPI_FP", Const, 1, ""},
+ {"ETHERTYPE_HITACHI", Const, 1, ""},
+ {"ETHERTYPE_HP", Const, 1, ""},
+ {"ETHERTYPE_IEEEPUP", Const, 1, ""},
+ {"ETHERTYPE_IEEEPUPAT", Const, 1, ""},
+ {"ETHERTYPE_IMLBL", Const, 1, ""},
+ {"ETHERTYPE_IMLBLDIAG", Const, 1, ""},
+ {"ETHERTYPE_IP", Const, 1, ""},
+ {"ETHERTYPE_IPAS", Const, 1, ""},
+ {"ETHERTYPE_IPV6", Const, 1, ""},
+ {"ETHERTYPE_IPX", Const, 1, ""},
+ {"ETHERTYPE_IPXNEW", Const, 1, ""},
+ {"ETHERTYPE_KALPANA", Const, 1, ""},
+ {"ETHERTYPE_LANBRIDGE", Const, 1, ""},
+ {"ETHERTYPE_LANPROBE", Const, 1, ""},
+ {"ETHERTYPE_LAT", Const, 1, ""},
+ {"ETHERTYPE_LBACK", Const, 1, ""},
+ {"ETHERTYPE_LITTLE", Const, 1, ""},
+ {"ETHERTYPE_LLDP", Const, 1, ""},
+ {"ETHERTYPE_LOGICRAFT", Const, 1, ""},
+ {"ETHERTYPE_LOOPBACK", Const, 1, ""},
+ {"ETHERTYPE_MATRA", Const, 1, ""},
+ {"ETHERTYPE_MAX", Const, 1, ""},
+ {"ETHERTYPE_MERIT", Const, 1, ""},
+ {"ETHERTYPE_MICP", Const, 1, ""},
+ {"ETHERTYPE_MOPDL", Const, 1, ""},
+ {"ETHERTYPE_MOPRC", Const, 1, ""},
+ {"ETHERTYPE_MOTOROLA", Const, 1, ""},
+ {"ETHERTYPE_MPLS", Const, 1, ""},
+ {"ETHERTYPE_MPLS_MCAST", Const, 1, ""},
+ {"ETHERTYPE_MUMPS", Const, 1, ""},
+ {"ETHERTYPE_NBPCC", Const, 1, ""},
+ {"ETHERTYPE_NBPCLAIM", Const, 1, ""},
+ {"ETHERTYPE_NBPCLREQ", Const, 1, ""},
+ {"ETHERTYPE_NBPCLRSP", Const, 1, ""},
+ {"ETHERTYPE_NBPCREQ", Const, 1, ""},
+ {"ETHERTYPE_NBPCRSP", Const, 1, ""},
+ {"ETHERTYPE_NBPDG", Const, 1, ""},
+ {"ETHERTYPE_NBPDGB", Const, 1, ""},
+ {"ETHERTYPE_NBPDLTE", Const, 1, ""},
+ {"ETHERTYPE_NBPRAR", Const, 1, ""},
+ {"ETHERTYPE_NBPRAS", Const, 1, ""},
+ {"ETHERTYPE_NBPRST", Const, 1, ""},
+ {"ETHERTYPE_NBPSCD", Const, 1, ""},
+ {"ETHERTYPE_NBPVCD", Const, 1, ""},
+ {"ETHERTYPE_NBS", Const, 1, ""},
+ {"ETHERTYPE_NCD", Const, 1, ""},
+ {"ETHERTYPE_NESTAR", Const, 1, ""},
+ {"ETHERTYPE_NETBEUI", Const, 1, ""},
+ {"ETHERTYPE_NOVELL", Const, 1, ""},
+ {"ETHERTYPE_NS", Const, 1, ""},
+ {"ETHERTYPE_NSAT", Const, 1, ""},
+ {"ETHERTYPE_NSCOMPAT", Const, 1, ""},
+ {"ETHERTYPE_NTRAILER", Const, 1, ""},
+ {"ETHERTYPE_OS9", Const, 1, ""},
+ {"ETHERTYPE_OS9NET", Const, 1, ""},
+ {"ETHERTYPE_PACER", Const, 1, ""},
+ {"ETHERTYPE_PAE", Const, 1, ""},
+ {"ETHERTYPE_PCS", Const, 1, ""},
+ {"ETHERTYPE_PLANNING", Const, 1, ""},
+ {"ETHERTYPE_PPP", Const, 1, ""},
+ {"ETHERTYPE_PPPOE", Const, 1, ""},
+ {"ETHERTYPE_PPPOEDISC", Const, 1, ""},
+ {"ETHERTYPE_PRIMENTS", Const, 1, ""},
+ {"ETHERTYPE_PUP", Const, 1, ""},
+ {"ETHERTYPE_PUPAT", Const, 1, ""},
+ {"ETHERTYPE_QINQ", Const, 1, ""},
+ {"ETHERTYPE_RACAL", Const, 1, ""},
+ {"ETHERTYPE_RATIONAL", Const, 1, ""},
+ {"ETHERTYPE_RAWFR", Const, 1, ""},
+ {"ETHERTYPE_RCL", Const, 1, ""},
+ {"ETHERTYPE_RDP", Const, 1, ""},
+ {"ETHERTYPE_RETIX", Const, 1, ""},
+ {"ETHERTYPE_REVARP", Const, 1, ""},
+ {"ETHERTYPE_SCA", Const, 1, ""},
+ {"ETHERTYPE_SECTRA", Const, 1, ""},
+ {"ETHERTYPE_SECUREDATA", Const, 1, ""},
+ {"ETHERTYPE_SGITW", Const, 1, ""},
+ {"ETHERTYPE_SG_BOUNCE", Const, 1, ""},
+ {"ETHERTYPE_SG_DIAG", Const, 1, ""},
+ {"ETHERTYPE_SG_NETGAMES", Const, 1, ""},
+ {"ETHERTYPE_SG_RESV", Const, 1, ""},
+ {"ETHERTYPE_SIMNET", Const, 1, ""},
+ {"ETHERTYPE_SLOW", Const, 1, ""},
+ {"ETHERTYPE_SLOWPROTOCOLS", Const, 1, ""},
+ {"ETHERTYPE_SNA", Const, 1, ""},
+ {"ETHERTYPE_SNMP", Const, 1, ""},
+ {"ETHERTYPE_SONIX", Const, 1, ""},
+ {"ETHERTYPE_SPIDER", Const, 1, ""},
+ {"ETHERTYPE_SPRITE", Const, 1, ""},
+ {"ETHERTYPE_STP", Const, 1, ""},
+ {"ETHERTYPE_TALARIS", Const, 1, ""},
+ {"ETHERTYPE_TALARISMC", Const, 1, ""},
+ {"ETHERTYPE_TCPCOMP", Const, 1, ""},
+ {"ETHERTYPE_TCPSM", Const, 1, ""},
+ {"ETHERTYPE_TEC", Const, 1, ""},
+ {"ETHERTYPE_TIGAN", Const, 1, ""},
+ {"ETHERTYPE_TRAIL", Const, 1, ""},
+ {"ETHERTYPE_TRANSETHER", Const, 1, ""},
+ {"ETHERTYPE_TYMSHARE", Const, 1, ""},
+ {"ETHERTYPE_UBBST", Const, 1, ""},
+ {"ETHERTYPE_UBDEBUG", Const, 1, ""},
+ {"ETHERTYPE_UBDIAGLOOP", Const, 1, ""},
+ {"ETHERTYPE_UBDL", Const, 1, ""},
+ {"ETHERTYPE_UBNIU", Const, 1, ""},
+ {"ETHERTYPE_UBNMC", Const, 1, ""},
+ {"ETHERTYPE_VALID", Const, 1, ""},
+ {"ETHERTYPE_VARIAN", Const, 1, ""},
+ {"ETHERTYPE_VAXELN", Const, 1, ""},
+ {"ETHERTYPE_VEECO", Const, 1, ""},
+ {"ETHERTYPE_VEXP", Const, 1, ""},
+ {"ETHERTYPE_VGLAB", Const, 1, ""},
+ {"ETHERTYPE_VINES", Const, 1, ""},
+ {"ETHERTYPE_VINESECHO", Const, 1, ""},
+ {"ETHERTYPE_VINESLOOP", Const, 1, ""},
+ {"ETHERTYPE_VITAL", Const, 1, ""},
+ {"ETHERTYPE_VLAN", Const, 1, ""},
+ {"ETHERTYPE_VLTLMAN", Const, 1, ""},
+ {"ETHERTYPE_VPROD", Const, 1, ""},
+ {"ETHERTYPE_VURESERVED", Const, 1, ""},
+ {"ETHERTYPE_WATERLOO", Const, 1, ""},
+ {"ETHERTYPE_WELLFLEET", Const, 1, ""},
+ {"ETHERTYPE_X25", Const, 1, ""},
+ {"ETHERTYPE_X75", Const, 1, ""},
+ {"ETHERTYPE_XNSSM", Const, 1, ""},
+ {"ETHERTYPE_XTP", Const, 1, ""},
+ {"ETHER_ADDR_LEN", Const, 1, ""},
+ {"ETHER_ALIGN", Const, 1, ""},
+ {"ETHER_CRC_LEN", Const, 1, ""},
+ {"ETHER_CRC_POLY_BE", Const, 1, ""},
+ {"ETHER_CRC_POLY_LE", Const, 1, ""},
+ {"ETHER_HDR_LEN", Const, 1, ""},
+ {"ETHER_MAX_DIX_LEN", Const, 1, ""},
+ {"ETHER_MAX_LEN", Const, 1, ""},
+ {"ETHER_MAX_LEN_JUMBO", Const, 1, ""},
+ {"ETHER_MIN_LEN", Const, 1, ""},
+ {"ETHER_PPPOE_ENCAP_LEN", Const, 1, ""},
+ {"ETHER_TYPE_LEN", Const, 1, ""},
+ {"ETHER_VLAN_ENCAP_LEN", Const, 1, ""},
+ {"ETH_P_1588", Const, 0, ""},
+ {"ETH_P_8021Q", Const, 0, ""},
+ {"ETH_P_802_2", Const, 0, ""},
+ {"ETH_P_802_3", Const, 0, ""},
+ {"ETH_P_AARP", Const, 0, ""},
+ {"ETH_P_ALL", Const, 0, ""},
+ {"ETH_P_AOE", Const, 0, ""},
+ {"ETH_P_ARCNET", Const, 0, ""},
+ {"ETH_P_ARP", Const, 0, ""},
+ {"ETH_P_ATALK", Const, 0, ""},
+ {"ETH_P_ATMFATE", Const, 0, ""},
+ {"ETH_P_ATMMPOA", Const, 0, ""},
+ {"ETH_P_AX25", Const, 0, ""},
+ {"ETH_P_BPQ", Const, 0, ""},
+ {"ETH_P_CAIF", Const, 0, ""},
+ {"ETH_P_CAN", Const, 0, ""},
+ {"ETH_P_CONTROL", Const, 0, ""},
+ {"ETH_P_CUST", Const, 0, ""},
+ {"ETH_P_DDCMP", Const, 0, ""},
+ {"ETH_P_DEC", Const, 0, ""},
+ {"ETH_P_DIAG", Const, 0, ""},
+ {"ETH_P_DNA_DL", Const, 0, ""},
+ {"ETH_P_DNA_RC", Const, 0, ""},
+ {"ETH_P_DNA_RT", Const, 0, ""},
+ {"ETH_P_DSA", Const, 0, ""},
+ {"ETH_P_ECONET", Const, 0, ""},
+ {"ETH_P_EDSA", Const, 0, ""},
+ {"ETH_P_FCOE", Const, 0, ""},
+ {"ETH_P_FIP", Const, 0, ""},
+ {"ETH_P_HDLC", Const, 0, ""},
+ {"ETH_P_IEEE802154", Const, 0, ""},
+ {"ETH_P_IEEEPUP", Const, 0, ""},
+ {"ETH_P_IEEEPUPAT", Const, 0, ""},
+ {"ETH_P_IP", Const, 0, ""},
+ {"ETH_P_IPV6", Const, 0, ""},
+ {"ETH_P_IPX", Const, 0, ""},
+ {"ETH_P_IRDA", Const, 0, ""},
+ {"ETH_P_LAT", Const, 0, ""},
+ {"ETH_P_LINK_CTL", Const, 0, ""},
+ {"ETH_P_LOCALTALK", Const, 0, ""},
+ {"ETH_P_LOOP", Const, 0, ""},
+ {"ETH_P_MOBITEX", Const, 0, ""},
+ {"ETH_P_MPLS_MC", Const, 0, ""},
+ {"ETH_P_MPLS_UC", Const, 0, ""},
+ {"ETH_P_PAE", Const, 0, ""},
+ {"ETH_P_PAUSE", Const, 0, ""},
+ {"ETH_P_PHONET", Const, 0, ""},
+ {"ETH_P_PPPTALK", Const, 0, ""},
+ {"ETH_P_PPP_DISC", Const, 0, ""},
+ {"ETH_P_PPP_MP", Const, 0, ""},
+ {"ETH_P_PPP_SES", Const, 0, ""},
+ {"ETH_P_PUP", Const, 0, ""},
+ {"ETH_P_PUPAT", Const, 0, ""},
+ {"ETH_P_RARP", Const, 0, ""},
+ {"ETH_P_SCA", Const, 0, ""},
+ {"ETH_P_SLOW", Const, 0, ""},
+ {"ETH_P_SNAP", Const, 0, ""},
+ {"ETH_P_TEB", Const, 0, ""},
+ {"ETH_P_TIPC", Const, 0, ""},
+ {"ETH_P_TRAILER", Const, 0, ""},
+ {"ETH_P_TR_802_2", Const, 0, ""},
+ {"ETH_P_WAN_PPP", Const, 0, ""},
+ {"ETH_P_WCCP", Const, 0, ""},
+ {"ETH_P_X25", Const, 0, ""},
+ {"ETIME", Const, 0, ""},
+ {"ETIMEDOUT", Const, 0, ""},
+ {"ETOOMANYREFS", Const, 0, ""},
+ {"ETXTBSY", Const, 0, ""},
+ {"EUCLEAN", Const, 0, ""},
+ {"EUNATCH", Const, 0, ""},
+ {"EUSERS", Const, 0, ""},
+ {"EVFILT_AIO", Const, 0, ""},
+ {"EVFILT_FS", Const, 0, ""},
+ {"EVFILT_LIO", Const, 0, ""},
+ {"EVFILT_MACHPORT", Const, 0, ""},
+ {"EVFILT_PROC", Const, 0, ""},
+ {"EVFILT_READ", Const, 0, ""},
+ {"EVFILT_SIGNAL", Const, 0, ""},
+ {"EVFILT_SYSCOUNT", Const, 0, ""},
+ {"EVFILT_THREADMARKER", Const, 0, ""},
+ {"EVFILT_TIMER", Const, 0, ""},
+ {"EVFILT_USER", Const, 0, ""},
+ {"EVFILT_VM", Const, 0, ""},
+ {"EVFILT_VNODE", Const, 0, ""},
+ {"EVFILT_WRITE", Const, 0, ""},
+ {"EV_ADD", Const, 0, ""},
+ {"EV_CLEAR", Const, 0, ""},
+ {"EV_DELETE", Const, 0, ""},
+ {"EV_DISABLE", Const, 0, ""},
+ {"EV_DISPATCH", Const, 0, ""},
+ {"EV_DROP", Const, 3, ""},
+ {"EV_ENABLE", Const, 0, ""},
+ {"EV_EOF", Const, 0, ""},
+ {"EV_ERROR", Const, 0, ""},
+ {"EV_FLAG0", Const, 0, ""},
+ {"EV_FLAG1", Const, 0, ""},
+ {"EV_ONESHOT", Const, 0, ""},
+ {"EV_OOBAND", Const, 0, ""},
+ {"EV_POLL", Const, 0, ""},
+ {"EV_RECEIPT", Const, 0, ""},
+ {"EV_SYSFLAGS", Const, 0, ""},
+ {"EWINDOWS", Const, 0, ""},
+ {"EWOULDBLOCK", Const, 0, ""},
+ {"EXDEV", Const, 0, ""},
+ {"EXFULL", Const, 0, ""},
+ {"EXTA", Const, 0, ""},
+ {"EXTB", Const, 0, ""},
+ {"EXTPROC", Const, 0, ""},
+ {"Environ", Func, 0, "func() []string"},
+ {"EpollCreate", Func, 0, "func(size int) (fd int, err error)"},
+ {"EpollCreate1", Func, 0, "func(flag int) (fd int, err error)"},
+ {"EpollCtl", Func, 0, "func(epfd int, op int, fd int, event *EpollEvent) (err error)"},
+ {"EpollEvent", Type, 0, ""},
+ {"EpollEvent.Events", Field, 0, ""},
+ {"EpollEvent.Fd", Field, 0, ""},
+ {"EpollEvent.Pad", Field, 0, ""},
+ {"EpollEvent.PadFd", Field, 0, ""},
+ {"EpollWait", Func, 0, "func(epfd int, events []EpollEvent, msec int) (n int, err error)"},
+ {"Errno", Type, 0, ""},
+ {"EscapeArg", Func, 0, ""},
+ {"Exchangedata", Func, 0, ""},
+ {"Exec", Func, 0, "func(argv0 string, argv []string, envv []string) (err error)"},
+ {"Exit", Func, 0, "func(code int)"},
+ {"ExitProcess", Func, 0, ""},
+ {"FD_CLOEXEC", Const, 0, ""},
+ {"FD_SETSIZE", Const, 0, ""},
+ {"FILE_ACTION_ADDED", Const, 0, ""},
+ {"FILE_ACTION_MODIFIED", Const, 0, ""},
+ {"FILE_ACTION_REMOVED", Const, 0, ""},
+ {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0, ""},
+ {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0, ""},
+ {"FILE_APPEND_DATA", Const, 0, ""},
+ {"FILE_ATTRIBUTE_ARCHIVE", Const, 0, ""},
+ {"FILE_ATTRIBUTE_DIRECTORY", Const, 0, ""},
+ {"FILE_ATTRIBUTE_HIDDEN", Const, 0, ""},
+ {"FILE_ATTRIBUTE_NORMAL", Const, 0, ""},
+ {"FILE_ATTRIBUTE_READONLY", Const, 0, ""},
+ {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4, ""},
+ {"FILE_ATTRIBUTE_SYSTEM", Const, 0, ""},
+ {"FILE_BEGIN", Const, 0, ""},
+ {"FILE_CURRENT", Const, 0, ""},
+ {"FILE_END", Const, 0, ""},
+ {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0, ""},
+ {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4, ""},
+ {"FILE_FLAG_OVERLAPPED", Const, 0, ""},
+ {"FILE_LIST_DIRECTORY", Const, 0, ""},
+ {"FILE_MAP_COPY", Const, 0, ""},
+ {"FILE_MAP_EXECUTE", Const, 0, ""},
+ {"FILE_MAP_READ", Const, 0, ""},
+ {"FILE_MAP_WRITE", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_CREATION", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0, ""},
+ {"FILE_NOTIFY_CHANGE_SIZE", Const, 0, ""},
+ {"FILE_SHARE_DELETE", Const, 0, ""},
+ {"FILE_SHARE_READ", Const, 0, ""},
+ {"FILE_SHARE_WRITE", Const, 0, ""},
+ {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2, ""},
+ {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2, ""},
+ {"FILE_TYPE_CHAR", Const, 0, ""},
+ {"FILE_TYPE_DISK", Const, 0, ""},
+ {"FILE_TYPE_PIPE", Const, 0, ""},
+ {"FILE_TYPE_REMOTE", Const, 0, ""},
+ {"FILE_TYPE_UNKNOWN", Const, 0, ""},
+ {"FILE_WRITE_ATTRIBUTES", Const, 0, ""},
+ {"FLUSHO", Const, 0, ""},
+ {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0, ""},
+ {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0, ""},
+ {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0, ""},
+ {"FORMAT_MESSAGE_FROM_STRING", Const, 0, ""},
+ {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0, ""},
+ {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0, ""},
+ {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0, ""},
+ {"FSCTL_GET_REPARSE_POINT", Const, 4, ""},
+ {"F_ADDFILESIGS", Const, 0, ""},
+ {"F_ADDSIGS", Const, 0, ""},
+ {"F_ALLOCATEALL", Const, 0, ""},
+ {"F_ALLOCATECONTIG", Const, 0, ""},
+ {"F_CANCEL", Const, 0, ""},
+ {"F_CHKCLEAN", Const, 0, ""},
+ {"F_CLOSEM", Const, 1, ""},
+ {"F_DUP2FD", Const, 0, ""},
+ {"F_DUP2FD_CLOEXEC", Const, 1, ""},
+ {"F_DUPFD", Const, 0, ""},
+ {"F_DUPFD_CLOEXEC", Const, 0, ""},
+ {"F_EXLCK", Const, 0, ""},
+ {"F_FINDSIGS", Const, 16, ""},
+ {"F_FLUSH_DATA", Const, 0, ""},
+ {"F_FREEZE_FS", Const, 0, ""},
+ {"F_FSCTL", Const, 1, ""},
+ {"F_FSDIRMASK", Const, 1, ""},
+ {"F_FSIN", Const, 1, ""},
+ {"F_FSINOUT", Const, 1, ""},
+ {"F_FSOUT", Const, 1, ""},
+ {"F_FSPRIV", Const, 1, ""},
+ {"F_FSVOID", Const, 1, ""},
+ {"F_FULLFSYNC", Const, 0, ""},
+ {"F_GETCODEDIR", Const, 16, ""},
+ {"F_GETFD", Const, 0, ""},
+ {"F_GETFL", Const, 0, ""},
+ {"F_GETLEASE", Const, 0, ""},
+ {"F_GETLK", Const, 0, ""},
+ {"F_GETLK64", Const, 0, ""},
+ {"F_GETLKPID", Const, 0, ""},
+ {"F_GETNOSIGPIPE", Const, 0, ""},
+ {"F_GETOWN", Const, 0, ""},
+ {"F_GETOWN_EX", Const, 0, ""},
+ {"F_GETPATH", Const, 0, ""},
+ {"F_GETPATH_MTMINFO", Const, 0, ""},
+ {"F_GETPIPE_SZ", Const, 0, ""},
+ {"F_GETPROTECTIONCLASS", Const, 0, ""},
+ {"F_GETPROTECTIONLEVEL", Const, 16, ""},
+ {"F_GETSIG", Const, 0, ""},
+ {"F_GLOBAL_NOCACHE", Const, 0, ""},
+ {"F_LOCK", Const, 0, ""},
+ {"F_LOG2PHYS", Const, 0, ""},
+ {"F_LOG2PHYS_EXT", Const, 0, ""},
+ {"F_MARKDEPENDENCY", Const, 0, ""},
+ {"F_MAXFD", Const, 1, ""},
+ {"F_NOCACHE", Const, 0, ""},
+ {"F_NODIRECT", Const, 0, ""},
+ {"F_NOTIFY", Const, 0, ""},
+ {"F_OGETLK", Const, 0, ""},
+ {"F_OK", Const, 0, ""},
+ {"F_OSETLK", Const, 0, ""},
+ {"F_OSETLKW", Const, 0, ""},
+ {"F_PARAM_MASK", Const, 1, ""},
+ {"F_PARAM_MAX", Const, 1, ""},
+ {"F_PATHPKG_CHECK", Const, 0, ""},
+ {"F_PEOFPOSMODE", Const, 0, ""},
+ {"F_PREALLOCATE", Const, 0, ""},
+ {"F_RDADVISE", Const, 0, ""},
+ {"F_RDAHEAD", Const, 0, ""},
+ {"F_RDLCK", Const, 0, ""},
+ {"F_READAHEAD", Const, 0, ""},
+ {"F_READBOOTSTRAP", Const, 0, ""},
+ {"F_SETBACKINGSTORE", Const, 0, ""},
+ {"F_SETFD", Const, 0, ""},
+ {"F_SETFL", Const, 0, ""},
+ {"F_SETLEASE", Const, 0, ""},
+ {"F_SETLK", Const, 0, ""},
+ {"F_SETLK64", Const, 0, ""},
+ {"F_SETLKW", Const, 0, ""},
+ {"F_SETLKW64", Const, 0, ""},
+ {"F_SETLKWTIMEOUT", Const, 16, ""},
+ {"F_SETLK_REMOTE", Const, 0, ""},
+ {"F_SETNOSIGPIPE", Const, 0, ""},
+ {"F_SETOWN", Const, 0, ""},
+ {"F_SETOWN_EX", Const, 0, ""},
+ {"F_SETPIPE_SZ", Const, 0, ""},
+ {"F_SETPROTECTIONCLASS", Const, 0, ""},
+ {"F_SETSIG", Const, 0, ""},
+ {"F_SETSIZE", Const, 0, ""},
+ {"F_SHLCK", Const, 0, ""},
+ {"F_SINGLE_WRITER", Const, 16, ""},
+ {"F_TEST", Const, 0, ""},
+ {"F_THAW_FS", Const, 0, ""},
+ {"F_TLOCK", Const, 0, ""},
+ {"F_TRANSCODEKEY", Const, 16, ""},
+ {"F_ULOCK", Const, 0, ""},
+ {"F_UNLCK", Const, 0, ""},
+ {"F_UNLCKSYS", Const, 0, ""},
+ {"F_VOLPOSMODE", Const, 0, ""},
+ {"F_WRITEBOOTSTRAP", Const, 0, ""},
+ {"F_WRLCK", Const, 0, ""},
+ {"Faccessat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) (err error)"},
+ {"Fallocate", Func, 0, "func(fd int, mode uint32, off int64, len int64) (err error)"},
+ {"Fbootstraptransfer_t", Type, 0, ""},
+ {"Fbootstraptransfer_t.Buffer", Field, 0, ""},
+ {"Fbootstraptransfer_t.Length", Field, 0, ""},
+ {"Fbootstraptransfer_t.Offset", Field, 0, ""},
+ {"Fchdir", Func, 0, "func(fd int) (err error)"},
+ {"Fchflags", Func, 0, ""},
+ {"Fchmod", Func, 0, "func(fd int, mode uint32) (err error)"},
+ {"Fchmodat", Func, 0, "func(dirfd int, path string, mode uint32, flags int) error"},
+ {"Fchown", Func, 0, "func(fd int, uid int, gid int) (err error)"},
+ {"Fchownat", Func, 0, "func(dirfd int, path string, uid int, gid int, flags int) (err error)"},
+ {"FcntlFlock", Func, 3, "func(fd uintptr, cmd int, lk *Flock_t) error"},
+ {"FdSet", Type, 0, ""},
+ {"FdSet.Bits", Field, 0, ""},
+ {"FdSet.X__fds_bits", Field, 0, ""},
+ {"Fdatasync", Func, 0, "func(fd int) (err error)"},
+ {"FileNotifyInformation", Type, 0, ""},
+ {"FileNotifyInformation.Action", Field, 0, ""},
+ {"FileNotifyInformation.FileName", Field, 0, ""},
+ {"FileNotifyInformation.FileNameLength", Field, 0, ""},
+ {"FileNotifyInformation.NextEntryOffset", Field, 0, ""},
+ {"Filetime", Type, 0, ""},
+ {"Filetime.HighDateTime", Field, 0, ""},
+ {"Filetime.LowDateTime", Field, 0, ""},
+ {"FindClose", Func, 0, ""},
+ {"FindFirstFile", Func, 0, ""},
+ {"FindNextFile", Func, 0, ""},
+ {"Flock", Func, 0, "func(fd int, how int) (err error)"},
+ {"Flock_t", Type, 0, ""},
+ {"Flock_t.Len", Field, 0, ""},
+ {"Flock_t.Pad_cgo_0", Field, 0, ""},
+ {"Flock_t.Pad_cgo_1", Field, 3, ""},
+ {"Flock_t.Pid", Field, 0, ""},
+ {"Flock_t.Start", Field, 0, ""},
+ {"Flock_t.Sysid", Field, 0, ""},
+ {"Flock_t.Type", Field, 0, ""},
+ {"Flock_t.Whence", Field, 0, ""},
+ {"FlushBpf", Func, 0, ""},
+ {"FlushFileBuffers", Func, 0, ""},
+ {"FlushViewOfFile", Func, 0, ""},
+ {"ForkExec", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)"},
+ {"ForkLock", Var, 0, ""},
+ {"FormatMessage", Func, 0, ""},
+ {"Fpathconf", Func, 0, ""},
+ {"FreeAddrInfoW", Func, 1, ""},
+ {"FreeEnvironmentStrings", Func, 0, ""},
+ {"FreeLibrary", Func, 0, ""},
+ {"Fsid", Type, 0, ""},
+ {"Fsid.Val", Field, 0, ""},
+ {"Fsid.X__fsid_val", Field, 2, ""},
+ {"Fsid.X__val", Field, 0, ""},
+ {"Fstat", Func, 0, "func(fd int, stat *Stat_t) (err error)"},
+ {"Fstatat", Func, 12, ""},
+ {"Fstatfs", Func, 0, "func(fd int, buf *Statfs_t) (err error)"},
+ {"Fstore_t", Type, 0, ""},
+ {"Fstore_t.Bytesalloc", Field, 0, ""},
+ {"Fstore_t.Flags", Field, 0, ""},
+ {"Fstore_t.Length", Field, 0, ""},
+ {"Fstore_t.Offset", Field, 0, ""},
+ {"Fstore_t.Posmode", Field, 0, ""},
+ {"Fsync", Func, 0, "func(fd int) (err error)"},
+ {"Ftruncate", Func, 0, "func(fd int, length int64) (err error)"},
+ {"FullPath", Func, 4, ""},
+ {"Futimes", Func, 0, "func(fd int, tv []Timeval) (err error)"},
+ {"Futimesat", Func, 0, "func(dirfd int, path string, tv []Timeval) (err error)"},
+ {"GENERIC_ALL", Const, 0, ""},
+ {"GENERIC_EXECUTE", Const, 0, ""},
+ {"GENERIC_READ", Const, 0, ""},
+ {"GENERIC_WRITE", Const, 0, ""},
+ {"GUID", Type, 1, ""},
+ {"GUID.Data1", Field, 1, ""},
+ {"GUID.Data2", Field, 1, ""},
+ {"GUID.Data3", Field, 1, ""},
+ {"GUID.Data4", Field, 1, ""},
+ {"GetAcceptExSockaddrs", Func, 0, ""},
+ {"GetAdaptersInfo", Func, 0, ""},
+ {"GetAddrInfoW", Func, 1, ""},
+ {"GetCommandLine", Func, 0, ""},
+ {"GetComputerName", Func, 0, ""},
+ {"GetConsoleMode", Func, 1, ""},
+ {"GetCurrentDirectory", Func, 0, ""},
+ {"GetCurrentProcess", Func, 0, ""},
+ {"GetEnvironmentStrings", Func, 0, ""},
+ {"GetEnvironmentVariable", Func, 0, ""},
+ {"GetExitCodeProcess", Func, 0, ""},
+ {"GetFileAttributes", Func, 0, ""},
+ {"GetFileAttributesEx", Func, 0, ""},
+ {"GetFileExInfoStandard", Const, 0, ""},
+ {"GetFileExMaxInfoLevel", Const, 0, ""},
+ {"GetFileInformationByHandle", Func, 0, ""},
+ {"GetFileType", Func, 0, ""},
+ {"GetFullPathName", Func, 0, ""},
+ {"GetHostByName", Func, 0, ""},
+ {"GetIfEntry", Func, 0, ""},
+ {"GetLastError", Func, 0, ""},
+ {"GetLengthSid", Func, 0, ""},
+ {"GetLongPathName", Func, 0, ""},
+ {"GetProcAddress", Func, 0, ""},
+ {"GetProcessTimes", Func, 0, ""},
+ {"GetProtoByName", Func, 0, ""},
+ {"GetQueuedCompletionStatus", Func, 0, ""},
+ {"GetServByName", Func, 0, ""},
+ {"GetShortPathName", Func, 0, ""},
+ {"GetStartupInfo", Func, 0, ""},
+ {"GetStdHandle", Func, 0, ""},
+ {"GetSystemTimeAsFileTime", Func, 0, ""},
+ {"GetTempPath", Func, 0, ""},
+ {"GetTimeZoneInformation", Func, 0, ""},
+ {"GetTokenInformation", Func, 0, ""},
+ {"GetUserNameEx", Func, 0, ""},
+ {"GetUserProfileDirectory", Func, 0, ""},
+ {"GetVersion", Func, 0, ""},
+ {"Getcwd", Func, 0, "func(buf []byte) (n int, err error)"},
+ {"Getdents", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
+ {"Getdirentries", Func, 0, ""},
+ {"Getdtablesize", Func, 0, ""},
+ {"Getegid", Func, 0, "func() (egid int)"},
+ {"Getenv", Func, 0, "func(key string) (value string, found bool)"},
+ {"Geteuid", Func, 0, "func() (euid int)"},
+ {"Getfsstat", Func, 0, ""},
+ {"Getgid", Func, 0, "func() (gid int)"},
+ {"Getgroups", Func, 0, "func() (gids []int, err error)"},
+ {"Getpagesize", Func, 0, "func() int"},
+ {"Getpeername", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
+ {"Getpgid", Func, 0, "func(pid int) (pgid int, err error)"},
+ {"Getpgrp", Func, 0, "func() (pid int)"},
+ {"Getpid", Func, 0, "func() (pid int)"},
+ {"Getppid", Func, 0, "func() (ppid int)"},
+ {"Getpriority", Func, 0, "func(which int, who int) (prio int, err error)"},
+ {"Getrlimit", Func, 0, "func(resource int, rlim *Rlimit) (err error)"},
+ {"Getrusage", Func, 0, "func(who int, rusage *Rusage) (err error)"},
+ {"Getsid", Func, 0, ""},
+ {"Getsockname", Func, 0, "func(fd int) (sa Sockaddr, err error)"},
+ {"Getsockopt", Func, 1, ""},
+ {"GetsockoptByte", Func, 0, ""},
+ {"GetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int) (*ICMPv6Filter, error)"},
+ {"GetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int) (*IPMreq, error)"},
+ {"GetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int) (*IPMreqn, error)"},
+ {"GetsockoptIPv6MTUInfo", Func, 2, "func(fd int, level int, opt int) (*IPv6MTUInfo, error)"},
+ {"GetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int) (*IPv6Mreq, error)"},
+ {"GetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int) (value [4]byte, err error)"},
+ {"GetsockoptInt", Func, 0, "func(fd int, level int, opt int) (value int, err error)"},
+ {"GetsockoptUcred", Func, 1, "func(fd int, level int, opt int) (*Ucred, error)"},
+ {"Gettid", Func, 0, "func() (tid int)"},
+ {"Gettimeofday", Func, 0, "func(tv *Timeval) (err error)"},
+ {"Getuid", Func, 0, "func() (uid int)"},
+ {"Getwd", Func, 0, "func() (wd string, err error)"},
+ {"Getxattr", Func, 1, "func(path string, attr string, dest []byte) (sz int, err error)"},
+ {"HANDLE_FLAG_INHERIT", Const, 0, ""},
+ {"HKEY_CLASSES_ROOT", Const, 0, ""},
+ {"HKEY_CURRENT_CONFIG", Const, 0, ""},
+ {"HKEY_CURRENT_USER", Const, 0, ""},
+ {"HKEY_DYN_DATA", Const, 0, ""},
+ {"HKEY_LOCAL_MACHINE", Const, 0, ""},
+ {"HKEY_PERFORMANCE_DATA", Const, 0, ""},
+ {"HKEY_USERS", Const, 0, ""},
+ {"HUPCL", Const, 0, ""},
+ {"Handle", Type, 0, ""},
+ {"Hostent", Type, 0, ""},
+ {"Hostent.AddrList", Field, 0, ""},
+ {"Hostent.AddrType", Field, 0, ""},
+ {"Hostent.Aliases", Field, 0, ""},
+ {"Hostent.Length", Field, 0, ""},
+ {"Hostent.Name", Field, 0, ""},
+ {"ICANON", Const, 0, ""},
+ {"ICMP6_FILTER", Const, 2, ""},
+ {"ICMPV6_FILTER", Const, 2, ""},
+ {"ICMPv6Filter", Type, 2, ""},
+ {"ICMPv6Filter.Data", Field, 2, ""},
+ {"ICMPv6Filter.Filt", Field, 2, ""},
+ {"ICRNL", Const, 0, ""},
+ {"IEXTEN", Const, 0, ""},
+ {"IFAN_ARRIVAL", Const, 1, ""},
+ {"IFAN_DEPARTURE", Const, 1, ""},
+ {"IFA_ADDRESS", Const, 0, ""},
+ {"IFA_ANYCAST", Const, 0, ""},
+ {"IFA_BROADCAST", Const, 0, ""},
+ {"IFA_CACHEINFO", Const, 0, ""},
+ {"IFA_F_DADFAILED", Const, 0, ""},
+ {"IFA_F_DEPRECATED", Const, 0, ""},
+ {"IFA_F_HOMEADDRESS", Const, 0, ""},
+ {"IFA_F_NODAD", Const, 0, ""},
+ {"IFA_F_OPTIMISTIC", Const, 0, ""},
+ {"IFA_F_PERMANENT", Const, 0, ""},
+ {"IFA_F_SECONDARY", Const, 0, ""},
+ {"IFA_F_TEMPORARY", Const, 0, ""},
+ {"IFA_F_TENTATIVE", Const, 0, ""},
+ {"IFA_LABEL", Const, 0, ""},
+ {"IFA_LOCAL", Const, 0, ""},
+ {"IFA_MAX", Const, 0, ""},
+ {"IFA_MULTICAST", Const, 0, ""},
+ {"IFA_ROUTE", Const, 1, ""},
+ {"IFA_UNSPEC", Const, 0, ""},
+ {"IFF_ALLMULTI", Const, 0, ""},
+ {"IFF_ALTPHYS", Const, 0, ""},
+ {"IFF_AUTOMEDIA", Const, 0, ""},
+ {"IFF_BROADCAST", Const, 0, ""},
+ {"IFF_CANTCHANGE", Const, 0, ""},
+ {"IFF_CANTCONFIG", Const, 1, ""},
+ {"IFF_DEBUG", Const, 0, ""},
+ {"IFF_DRV_OACTIVE", Const, 0, ""},
+ {"IFF_DRV_RUNNING", Const, 0, ""},
+ {"IFF_DYING", Const, 0, ""},
+ {"IFF_DYNAMIC", Const, 0, ""},
+ {"IFF_LINK0", Const, 0, ""},
+ {"IFF_LINK1", Const, 0, ""},
+ {"IFF_LINK2", Const, 0, ""},
+ {"IFF_LOOPBACK", Const, 0, ""},
+ {"IFF_MASTER", Const, 0, ""},
+ {"IFF_MONITOR", Const, 0, ""},
+ {"IFF_MULTICAST", Const, 0, ""},
+ {"IFF_NOARP", Const, 0, ""},
+ {"IFF_NOTRAILERS", Const, 0, ""},
+ {"IFF_NO_PI", Const, 0, ""},
+ {"IFF_OACTIVE", Const, 0, ""},
+ {"IFF_ONE_QUEUE", Const, 0, ""},
+ {"IFF_POINTOPOINT", Const, 0, ""},
+ {"IFF_POINTTOPOINT", Const, 0, ""},
+ {"IFF_PORTSEL", Const, 0, ""},
+ {"IFF_PPROMISC", Const, 0, ""},
+ {"IFF_PROMISC", Const, 0, ""},
+ {"IFF_RENAMING", Const, 0, ""},
+ {"IFF_RUNNING", Const, 0, ""},
+ {"IFF_SIMPLEX", Const, 0, ""},
+ {"IFF_SLAVE", Const, 0, ""},
+ {"IFF_SMART", Const, 0, ""},
+ {"IFF_STATICARP", Const, 0, ""},
+ {"IFF_TAP", Const, 0, ""},
+ {"IFF_TUN", Const, 0, ""},
+ {"IFF_TUN_EXCL", Const, 0, ""},
+ {"IFF_UP", Const, 0, ""},
+ {"IFF_VNET_HDR", Const, 0, ""},
+ {"IFLA_ADDRESS", Const, 0, ""},
+ {"IFLA_BROADCAST", Const, 0, ""},
+ {"IFLA_COST", Const, 0, ""},
+ {"IFLA_IFALIAS", Const, 0, ""},
+ {"IFLA_IFNAME", Const, 0, ""},
+ {"IFLA_LINK", Const, 0, ""},
+ {"IFLA_LINKINFO", Const, 0, ""},
+ {"IFLA_LINKMODE", Const, 0, ""},
+ {"IFLA_MAP", Const, 0, ""},
+ {"IFLA_MASTER", Const, 0, ""},
+ {"IFLA_MAX", Const, 0, ""},
+ {"IFLA_MTU", Const, 0, ""},
+ {"IFLA_NET_NS_PID", Const, 0, ""},
+ {"IFLA_OPERSTATE", Const, 0, ""},
+ {"IFLA_PRIORITY", Const, 0, ""},
+ {"IFLA_PROTINFO", Const, 0, ""},
+ {"IFLA_QDISC", Const, 0, ""},
+ {"IFLA_STATS", Const, 0, ""},
+ {"IFLA_TXQLEN", Const, 0, ""},
+ {"IFLA_UNSPEC", Const, 0, ""},
+ {"IFLA_WEIGHT", Const, 0, ""},
+ {"IFLA_WIRELESS", Const, 0, ""},
+ {"IFNAMSIZ", Const, 0, ""},
+ {"IFT_1822", Const, 0, ""},
+ {"IFT_A12MPPSWITCH", Const, 0, ""},
+ {"IFT_AAL2", Const, 0, ""},
+ {"IFT_AAL5", Const, 0, ""},
+ {"IFT_ADSL", Const, 0, ""},
+ {"IFT_AFLANE8023", Const, 0, ""},
+ {"IFT_AFLANE8025", Const, 0, ""},
+ {"IFT_ARAP", Const, 0, ""},
+ {"IFT_ARCNET", Const, 0, ""},
+ {"IFT_ARCNETPLUS", Const, 0, ""},
+ {"IFT_ASYNC", Const, 0, ""},
+ {"IFT_ATM", Const, 0, ""},
+ {"IFT_ATMDXI", Const, 0, ""},
+ {"IFT_ATMFUNI", Const, 0, ""},
+ {"IFT_ATMIMA", Const, 0, ""},
+ {"IFT_ATMLOGICAL", Const, 0, ""},
+ {"IFT_ATMRADIO", Const, 0, ""},
+ {"IFT_ATMSUBINTERFACE", Const, 0, ""},
+ {"IFT_ATMVCIENDPT", Const, 0, ""},
+ {"IFT_ATMVIRTUAL", Const, 0, ""},
+ {"IFT_BGPPOLICYACCOUNTING", Const, 0, ""},
+ {"IFT_BLUETOOTH", Const, 1, ""},
+ {"IFT_BRIDGE", Const, 0, ""},
+ {"IFT_BSC", Const, 0, ""},
+ {"IFT_CARP", Const, 0, ""},
+ {"IFT_CCTEMUL", Const, 0, ""},
+ {"IFT_CELLULAR", Const, 0, ""},
+ {"IFT_CEPT", Const, 0, ""},
+ {"IFT_CES", Const, 0, ""},
+ {"IFT_CHANNEL", Const, 0, ""},
+ {"IFT_CNR", Const, 0, ""},
+ {"IFT_COFFEE", Const, 0, ""},
+ {"IFT_COMPOSITELINK", Const, 0, ""},
+ {"IFT_DCN", Const, 0, ""},
+ {"IFT_DIGITALPOWERLINE", Const, 0, ""},
+ {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0, ""},
+ {"IFT_DLSW", Const, 0, ""},
+ {"IFT_DOCSCABLEDOWNSTREAM", Const, 0, ""},
+ {"IFT_DOCSCABLEMACLAYER", Const, 0, ""},
+ {"IFT_DOCSCABLEUPSTREAM", Const, 0, ""},
+ {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1, ""},
+ {"IFT_DS0", Const, 0, ""},
+ {"IFT_DS0BUNDLE", Const, 0, ""},
+ {"IFT_DS1FDL", Const, 0, ""},
+ {"IFT_DS3", Const, 0, ""},
+ {"IFT_DTM", Const, 0, ""},
+ {"IFT_DUMMY", Const, 1, ""},
+ {"IFT_DVBASILN", Const, 0, ""},
+ {"IFT_DVBASIOUT", Const, 0, ""},
+ {"IFT_DVBRCCDOWNSTREAM", Const, 0, ""},
+ {"IFT_DVBRCCMACLAYER", Const, 0, ""},
+ {"IFT_DVBRCCUPSTREAM", Const, 0, ""},
+ {"IFT_ECONET", Const, 1, ""},
+ {"IFT_ENC", Const, 0, ""},
+ {"IFT_EON", Const, 0, ""},
+ {"IFT_EPLRS", Const, 0, ""},
+ {"IFT_ESCON", Const, 0, ""},
+ {"IFT_ETHER", Const, 0, ""},
+ {"IFT_FAITH", Const, 0, ""},
+ {"IFT_FAST", Const, 0, ""},
+ {"IFT_FASTETHER", Const, 0, ""},
+ {"IFT_FASTETHERFX", Const, 0, ""},
+ {"IFT_FDDI", Const, 0, ""},
+ {"IFT_FIBRECHANNEL", Const, 0, ""},
+ {"IFT_FRAMERELAYINTERCONNECT", Const, 0, ""},
+ {"IFT_FRAMERELAYMPI", Const, 0, ""},
+ {"IFT_FRDLCIENDPT", Const, 0, ""},
+ {"IFT_FRELAY", Const, 0, ""},
+ {"IFT_FRELAYDCE", Const, 0, ""},
+ {"IFT_FRF16MFRBUNDLE", Const, 0, ""},
+ {"IFT_FRFORWARD", Const, 0, ""},
+ {"IFT_G703AT2MB", Const, 0, ""},
+ {"IFT_G703AT64K", Const, 0, ""},
+ {"IFT_GIF", Const, 0, ""},
+ {"IFT_GIGABITETHERNET", Const, 0, ""},
+ {"IFT_GR303IDT", Const, 0, ""},
+ {"IFT_GR303RDT", Const, 0, ""},
+ {"IFT_H323GATEKEEPER", Const, 0, ""},
+ {"IFT_H323PROXY", Const, 0, ""},
+ {"IFT_HDH1822", Const, 0, ""},
+ {"IFT_HDLC", Const, 0, ""},
+ {"IFT_HDSL2", Const, 0, ""},
+ {"IFT_HIPERLAN2", Const, 0, ""},
+ {"IFT_HIPPI", Const, 0, ""},
+ {"IFT_HIPPIINTERFACE", Const, 0, ""},
+ {"IFT_HOSTPAD", Const, 0, ""},
+ {"IFT_HSSI", Const, 0, ""},
+ {"IFT_HY", Const, 0, ""},
+ {"IFT_IBM370PARCHAN", Const, 0, ""},
+ {"IFT_IDSL", Const, 0, ""},
+ {"IFT_IEEE1394", Const, 0, ""},
+ {"IFT_IEEE80211", Const, 0, ""},
+ {"IFT_IEEE80212", Const, 0, ""},
+ {"IFT_IEEE8023ADLAG", Const, 0, ""},
+ {"IFT_IFGSN", Const, 0, ""},
+ {"IFT_IMT", Const, 0, ""},
+ {"IFT_INFINIBAND", Const, 1, ""},
+ {"IFT_INTERLEAVE", Const, 0, ""},
+ {"IFT_IP", Const, 0, ""},
+ {"IFT_IPFORWARD", Const, 0, ""},
+ {"IFT_IPOVERATM", Const, 0, ""},
+ {"IFT_IPOVERCDLC", Const, 0, ""},
+ {"IFT_IPOVERCLAW", Const, 0, ""},
+ {"IFT_IPSWITCH", Const, 0, ""},
+ {"IFT_IPXIP", Const, 0, ""},
+ {"IFT_ISDN", Const, 0, ""},
+ {"IFT_ISDNBASIC", Const, 0, ""},
+ {"IFT_ISDNPRIMARY", Const, 0, ""},
+ {"IFT_ISDNS", Const, 0, ""},
+ {"IFT_ISDNU", Const, 0, ""},
+ {"IFT_ISO88022LLC", Const, 0, ""},
+ {"IFT_ISO88023", Const, 0, ""},
+ {"IFT_ISO88024", Const, 0, ""},
+ {"IFT_ISO88025", Const, 0, ""},
+ {"IFT_ISO88025CRFPINT", Const, 0, ""},
+ {"IFT_ISO88025DTR", Const, 0, ""},
+ {"IFT_ISO88025FIBER", Const, 0, ""},
+ {"IFT_ISO88026", Const, 0, ""},
+ {"IFT_ISUP", Const, 0, ""},
+ {"IFT_L2VLAN", Const, 0, ""},
+ {"IFT_L3IPVLAN", Const, 0, ""},
+ {"IFT_L3IPXVLAN", Const, 0, ""},
+ {"IFT_LAPB", Const, 0, ""},
+ {"IFT_LAPD", Const, 0, ""},
+ {"IFT_LAPF", Const, 0, ""},
+ {"IFT_LINEGROUP", Const, 1, ""},
+ {"IFT_LOCALTALK", Const, 0, ""},
+ {"IFT_LOOP", Const, 0, ""},
+ {"IFT_MEDIAMAILOVERIP", Const, 0, ""},
+ {"IFT_MFSIGLINK", Const, 0, ""},
+ {"IFT_MIOX25", Const, 0, ""},
+ {"IFT_MODEM", Const, 0, ""},
+ {"IFT_MPC", Const, 0, ""},
+ {"IFT_MPLS", Const, 0, ""},
+ {"IFT_MPLSTUNNEL", Const, 0, ""},
+ {"IFT_MSDSL", Const, 0, ""},
+ {"IFT_MVL", Const, 0, ""},
+ {"IFT_MYRINET", Const, 0, ""},
+ {"IFT_NFAS", Const, 0, ""},
+ {"IFT_NSIP", Const, 0, ""},
+ {"IFT_OPTICALCHANNEL", Const, 0, ""},
+ {"IFT_OPTICALTRANSPORT", Const, 0, ""},
+ {"IFT_OTHER", Const, 0, ""},
+ {"IFT_P10", Const, 0, ""},
+ {"IFT_P80", Const, 0, ""},
+ {"IFT_PARA", Const, 0, ""},
+ {"IFT_PDP", Const, 0, ""},
+ {"IFT_PFLOG", Const, 0, ""},
+ {"IFT_PFLOW", Const, 1, ""},
+ {"IFT_PFSYNC", Const, 0, ""},
+ {"IFT_PLC", Const, 0, ""},
+ {"IFT_PON155", Const, 1, ""},
+ {"IFT_PON622", Const, 1, ""},
+ {"IFT_POS", Const, 0, ""},
+ {"IFT_PPP", Const, 0, ""},
+ {"IFT_PPPMULTILINKBUNDLE", Const, 0, ""},
+ {"IFT_PROPATM", Const, 1, ""},
+ {"IFT_PROPBWAP2MP", Const, 0, ""},
+ {"IFT_PROPCNLS", Const, 0, ""},
+ {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0, ""},
+ {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0, ""},
+ {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0, ""},
+ {"IFT_PROPMUX", Const, 0, ""},
+ {"IFT_PROPVIRTUAL", Const, 0, ""},
+ {"IFT_PROPWIRELESSP2P", Const, 0, ""},
+ {"IFT_PTPSERIAL", Const, 0, ""},
+ {"IFT_PVC", Const, 0, ""},
+ {"IFT_Q2931", Const, 1, ""},
+ {"IFT_QLLC", Const, 0, ""},
+ {"IFT_RADIOMAC", Const, 0, ""},
+ {"IFT_RADSL", Const, 0, ""},
+ {"IFT_REACHDSL", Const, 0, ""},
+ {"IFT_RFC1483", Const, 0, ""},
+ {"IFT_RS232", Const, 0, ""},
+ {"IFT_RSRB", Const, 0, ""},
+ {"IFT_SDLC", Const, 0, ""},
+ {"IFT_SDSL", Const, 0, ""},
+ {"IFT_SHDSL", Const, 0, ""},
+ {"IFT_SIP", Const, 0, ""},
+ {"IFT_SIPSIG", Const, 1, ""},
+ {"IFT_SIPTG", Const, 1, ""},
+ {"IFT_SLIP", Const, 0, ""},
+ {"IFT_SMDSDXI", Const, 0, ""},
+ {"IFT_SMDSICIP", Const, 0, ""},
+ {"IFT_SONET", Const, 0, ""},
+ {"IFT_SONETOVERHEADCHANNEL", Const, 0, ""},
+ {"IFT_SONETPATH", Const, 0, ""},
+ {"IFT_SONETVT", Const, 0, ""},
+ {"IFT_SRP", Const, 0, ""},
+ {"IFT_SS7SIGLINK", Const, 0, ""},
+ {"IFT_STACKTOSTACK", Const, 0, ""},
+ {"IFT_STARLAN", Const, 0, ""},
+ {"IFT_STF", Const, 0, ""},
+ {"IFT_T1", Const, 0, ""},
+ {"IFT_TDLC", Const, 0, ""},
+ {"IFT_TELINK", Const, 1, ""},
+ {"IFT_TERMPAD", Const, 0, ""},
+ {"IFT_TR008", Const, 0, ""},
+ {"IFT_TRANSPHDLC", Const, 0, ""},
+ {"IFT_TUNNEL", Const, 0, ""},
+ {"IFT_ULTRA", Const, 0, ""},
+ {"IFT_USB", Const, 0, ""},
+ {"IFT_V11", Const, 0, ""},
+ {"IFT_V35", Const, 0, ""},
+ {"IFT_V36", Const, 0, ""},
+ {"IFT_V37", Const, 0, ""},
+ {"IFT_VDSL", Const, 0, ""},
+ {"IFT_VIRTUALIPADDRESS", Const, 0, ""},
+ {"IFT_VIRTUALTG", Const, 1, ""},
+ {"IFT_VOICEDID", Const, 1, ""},
+ {"IFT_VOICEEM", Const, 0, ""},
+ {"IFT_VOICEEMFGD", Const, 1, ""},
+ {"IFT_VOICEENCAP", Const, 0, ""},
+ {"IFT_VOICEFGDEANA", Const, 1, ""},
+ {"IFT_VOICEFXO", Const, 0, ""},
+ {"IFT_VOICEFXS", Const, 0, ""},
+ {"IFT_VOICEOVERATM", Const, 0, ""},
+ {"IFT_VOICEOVERCABLE", Const, 1, ""},
+ {"IFT_VOICEOVERFRAMERELAY", Const, 0, ""},
+ {"IFT_VOICEOVERIP", Const, 0, ""},
+ {"IFT_X213", Const, 0, ""},
+ {"IFT_X25", Const, 0, ""},
+ {"IFT_X25DDN", Const, 0, ""},
+ {"IFT_X25HUNTGROUP", Const, 0, ""},
+ {"IFT_X25MLP", Const, 0, ""},
+ {"IFT_X25PLE", Const, 0, ""},
+ {"IFT_XETHER", Const, 0, ""},
+ {"IGNBRK", Const, 0, ""},
+ {"IGNCR", Const, 0, ""},
+ {"IGNORE", Const, 0, ""},
+ {"IGNPAR", Const, 0, ""},
+ {"IMAXBEL", Const, 0, ""},
+ {"INFINITE", Const, 0, ""},
+ {"INLCR", Const, 0, ""},
+ {"INPCK", Const, 0, ""},
+ {"INVALID_FILE_ATTRIBUTES", Const, 0, ""},
+ {"IN_ACCESS", Const, 0, ""},
+ {"IN_ALL_EVENTS", Const, 0, ""},
+ {"IN_ATTRIB", Const, 0, ""},
+ {"IN_CLASSA_HOST", Const, 0, ""},
+ {"IN_CLASSA_MAX", Const, 0, ""},
+ {"IN_CLASSA_NET", Const, 0, ""},
+ {"IN_CLASSA_NSHIFT", Const, 0, ""},
+ {"IN_CLASSB_HOST", Const, 0, ""},
+ {"IN_CLASSB_MAX", Const, 0, ""},
+ {"IN_CLASSB_NET", Const, 0, ""},
+ {"IN_CLASSB_NSHIFT", Const, 0, ""},
+ {"IN_CLASSC_HOST", Const, 0, ""},
+ {"IN_CLASSC_NET", Const, 0, ""},
+ {"IN_CLASSC_NSHIFT", Const, 0, ""},
+ {"IN_CLASSD_HOST", Const, 0, ""},
+ {"IN_CLASSD_NET", Const, 0, ""},
+ {"IN_CLASSD_NSHIFT", Const, 0, ""},
+ {"IN_CLOEXEC", Const, 0, ""},
+ {"IN_CLOSE", Const, 0, ""},
+ {"IN_CLOSE_NOWRITE", Const, 0, ""},
+ {"IN_CLOSE_WRITE", Const, 0, ""},
+ {"IN_CREATE", Const, 0, ""},
+ {"IN_DELETE", Const, 0, ""},
+ {"IN_DELETE_SELF", Const, 0, ""},
+ {"IN_DONT_FOLLOW", Const, 0, ""},
+ {"IN_EXCL_UNLINK", Const, 0, ""},
+ {"IN_IGNORED", Const, 0, ""},
+ {"IN_ISDIR", Const, 0, ""},
+ {"IN_LINKLOCALNETNUM", Const, 0, ""},
+ {"IN_LOOPBACKNET", Const, 0, ""},
+ {"IN_MASK_ADD", Const, 0, ""},
+ {"IN_MODIFY", Const, 0, ""},
+ {"IN_MOVE", Const, 0, ""},
+ {"IN_MOVED_FROM", Const, 0, ""},
+ {"IN_MOVED_TO", Const, 0, ""},
+ {"IN_MOVE_SELF", Const, 0, ""},
+ {"IN_NONBLOCK", Const, 0, ""},
+ {"IN_ONESHOT", Const, 0, ""},
+ {"IN_ONLYDIR", Const, 0, ""},
+ {"IN_OPEN", Const, 0, ""},
+ {"IN_Q_OVERFLOW", Const, 0, ""},
+ {"IN_RFC3021_HOST", Const, 1, ""},
+ {"IN_RFC3021_MASK", Const, 1, ""},
+ {"IN_RFC3021_NET", Const, 1, ""},
+ {"IN_RFC3021_NSHIFT", Const, 1, ""},
+ {"IN_UNMOUNT", Const, 0, ""},
+ {"IOC_IN", Const, 1, ""},
+ {"IOC_INOUT", Const, 1, ""},
+ {"IOC_OUT", Const, 1, ""},
+ {"IOC_VENDOR", Const, 3, ""},
+ {"IOC_WS2", Const, 1, ""},
+ {"IO_REPARSE_TAG_SYMLINK", Const, 4, ""},
+ {"IPMreq", Type, 0, ""},
+ {"IPMreq.Interface", Field, 0, ""},
+ {"IPMreq.Multiaddr", Field, 0, ""},
+ {"IPMreqn", Type, 0, ""},
+ {"IPMreqn.Address", Field, 0, ""},
+ {"IPMreqn.Ifindex", Field, 0, ""},
+ {"IPMreqn.Multiaddr", Field, 0, ""},
+ {"IPPROTO_3PC", Const, 0, ""},
+ {"IPPROTO_ADFS", Const, 0, ""},
+ {"IPPROTO_AH", Const, 0, ""},
+ {"IPPROTO_AHIP", Const, 0, ""},
+ {"IPPROTO_APES", Const, 0, ""},
+ {"IPPROTO_ARGUS", Const, 0, ""},
+ {"IPPROTO_AX25", Const, 0, ""},
+ {"IPPROTO_BHA", Const, 0, ""},
+ {"IPPROTO_BLT", Const, 0, ""},
+ {"IPPROTO_BRSATMON", Const, 0, ""},
+ {"IPPROTO_CARP", Const, 0, ""},
+ {"IPPROTO_CFTP", Const, 0, ""},
+ {"IPPROTO_CHAOS", Const, 0, ""},
+ {"IPPROTO_CMTP", Const, 0, ""},
+ {"IPPROTO_COMP", Const, 0, ""},
+ {"IPPROTO_CPHB", Const, 0, ""},
+ {"IPPROTO_CPNX", Const, 0, ""},
+ {"IPPROTO_DCCP", Const, 0, ""},
+ {"IPPROTO_DDP", Const, 0, ""},
+ {"IPPROTO_DGP", Const, 0, ""},
+ {"IPPROTO_DIVERT", Const, 0, ""},
+ {"IPPROTO_DIVERT_INIT", Const, 3, ""},
+ {"IPPROTO_DIVERT_RESP", Const, 3, ""},
+ {"IPPROTO_DONE", Const, 0, ""},
+ {"IPPROTO_DSTOPTS", Const, 0, ""},
+ {"IPPROTO_EGP", Const, 0, ""},
+ {"IPPROTO_EMCON", Const, 0, ""},
+ {"IPPROTO_ENCAP", Const, 0, ""},
+ {"IPPROTO_EON", Const, 0, ""},
+ {"IPPROTO_ESP", Const, 0, ""},
+ {"IPPROTO_ETHERIP", Const, 0, ""},
+ {"IPPROTO_FRAGMENT", Const, 0, ""},
+ {"IPPROTO_GGP", Const, 0, ""},
+ {"IPPROTO_GMTP", Const, 0, ""},
+ {"IPPROTO_GRE", Const, 0, ""},
+ {"IPPROTO_HELLO", Const, 0, ""},
+ {"IPPROTO_HMP", Const, 0, ""},
+ {"IPPROTO_HOPOPTS", Const, 0, ""},
+ {"IPPROTO_ICMP", Const, 0, ""},
+ {"IPPROTO_ICMPV6", Const, 0, ""},
+ {"IPPROTO_IDP", Const, 0, ""},
+ {"IPPROTO_IDPR", Const, 0, ""},
+ {"IPPROTO_IDRP", Const, 0, ""},
+ {"IPPROTO_IGMP", Const, 0, ""},
+ {"IPPROTO_IGP", Const, 0, ""},
+ {"IPPROTO_IGRP", Const, 0, ""},
+ {"IPPROTO_IL", Const, 0, ""},
+ {"IPPROTO_INLSP", Const, 0, ""},
+ {"IPPROTO_INP", Const, 0, ""},
+ {"IPPROTO_IP", Const, 0, ""},
+ {"IPPROTO_IPCOMP", Const, 0, ""},
+ {"IPPROTO_IPCV", Const, 0, ""},
+ {"IPPROTO_IPEIP", Const, 0, ""},
+ {"IPPROTO_IPIP", Const, 0, ""},
+ {"IPPROTO_IPPC", Const, 0, ""},
+ {"IPPROTO_IPV4", Const, 0, ""},
+ {"IPPROTO_IPV6", Const, 0, ""},
+ {"IPPROTO_IPV6_ICMP", Const, 1, ""},
+ {"IPPROTO_IRTP", Const, 0, ""},
+ {"IPPROTO_KRYPTOLAN", Const, 0, ""},
+ {"IPPROTO_LARP", Const, 0, ""},
+ {"IPPROTO_LEAF1", Const, 0, ""},
+ {"IPPROTO_LEAF2", Const, 0, ""},
+ {"IPPROTO_MAX", Const, 0, ""},
+ {"IPPROTO_MAXID", Const, 0, ""},
+ {"IPPROTO_MEAS", Const, 0, ""},
+ {"IPPROTO_MH", Const, 1, ""},
+ {"IPPROTO_MHRP", Const, 0, ""},
+ {"IPPROTO_MICP", Const, 0, ""},
+ {"IPPROTO_MOBILE", Const, 0, ""},
+ {"IPPROTO_MPLS", Const, 1, ""},
+ {"IPPROTO_MTP", Const, 0, ""},
+ {"IPPROTO_MUX", Const, 0, ""},
+ {"IPPROTO_ND", Const, 0, ""},
+ {"IPPROTO_NHRP", Const, 0, ""},
+ {"IPPROTO_NONE", Const, 0, ""},
+ {"IPPROTO_NSP", Const, 0, ""},
+ {"IPPROTO_NVPII", Const, 0, ""},
+ {"IPPROTO_OLD_DIVERT", Const, 0, ""},
+ {"IPPROTO_OSPFIGP", Const, 0, ""},
+ {"IPPROTO_PFSYNC", Const, 0, ""},
+ {"IPPROTO_PGM", Const, 0, ""},
+ {"IPPROTO_PIGP", Const, 0, ""},
+ {"IPPROTO_PIM", Const, 0, ""},
+ {"IPPROTO_PRM", Const, 0, ""},
+ {"IPPROTO_PUP", Const, 0, ""},
+ {"IPPROTO_PVP", Const, 0, ""},
+ {"IPPROTO_RAW", Const, 0, ""},
+ {"IPPROTO_RCCMON", Const, 0, ""},
+ {"IPPROTO_RDP", Const, 0, ""},
+ {"IPPROTO_ROUTING", Const, 0, ""},
+ {"IPPROTO_RSVP", Const, 0, ""},
+ {"IPPROTO_RVD", Const, 0, ""},
+ {"IPPROTO_SATEXPAK", Const, 0, ""},
+ {"IPPROTO_SATMON", Const, 0, ""},
+ {"IPPROTO_SCCSP", Const, 0, ""},
+ {"IPPROTO_SCTP", Const, 0, ""},
+ {"IPPROTO_SDRP", Const, 0, ""},
+ {"IPPROTO_SEND", Const, 1, ""},
+ {"IPPROTO_SEP", Const, 0, ""},
+ {"IPPROTO_SKIP", Const, 0, ""},
+ {"IPPROTO_SPACER", Const, 0, ""},
+ {"IPPROTO_SRPC", Const, 0, ""},
+ {"IPPROTO_ST", Const, 0, ""},
+ {"IPPROTO_SVMTP", Const, 0, ""},
+ {"IPPROTO_SWIPE", Const, 0, ""},
+ {"IPPROTO_TCF", Const, 0, ""},
+ {"IPPROTO_TCP", Const, 0, ""},
+ {"IPPROTO_TLSP", Const, 0, ""},
+ {"IPPROTO_TP", Const, 0, ""},
+ {"IPPROTO_TPXX", Const, 0, ""},
+ {"IPPROTO_TRUNK1", Const, 0, ""},
+ {"IPPROTO_TRUNK2", Const, 0, ""},
+ {"IPPROTO_TTP", Const, 0, ""},
+ {"IPPROTO_UDP", Const, 0, ""},
+ {"IPPROTO_UDPLITE", Const, 0, ""},
+ {"IPPROTO_VINES", Const, 0, ""},
+ {"IPPROTO_VISA", Const, 0, ""},
+ {"IPPROTO_VMTP", Const, 0, ""},
+ {"IPPROTO_VRRP", Const, 1, ""},
+ {"IPPROTO_WBEXPAK", Const, 0, ""},
+ {"IPPROTO_WBMON", Const, 0, ""},
+ {"IPPROTO_WSN", Const, 0, ""},
+ {"IPPROTO_XNET", Const, 0, ""},
+ {"IPPROTO_XTP", Const, 0, ""},
+ {"IPV6_2292DSTOPTS", Const, 0, ""},
+ {"IPV6_2292HOPLIMIT", Const, 0, ""},
+ {"IPV6_2292HOPOPTS", Const, 0, ""},
+ {"IPV6_2292NEXTHOP", Const, 0, ""},
+ {"IPV6_2292PKTINFO", Const, 0, ""},
+ {"IPV6_2292PKTOPTIONS", Const, 0, ""},
+ {"IPV6_2292RTHDR", Const, 0, ""},
+ {"IPV6_ADDRFORM", Const, 0, ""},
+ {"IPV6_ADD_MEMBERSHIP", Const, 0, ""},
+ {"IPV6_AUTHHDR", Const, 0, ""},
+ {"IPV6_AUTH_LEVEL", Const, 1, ""},
+ {"IPV6_AUTOFLOWLABEL", Const, 0, ""},
+ {"IPV6_BINDANY", Const, 0, ""},
+ {"IPV6_BINDV6ONLY", Const, 0, ""},
+ {"IPV6_BOUND_IF", Const, 0, ""},
+ {"IPV6_CHECKSUM", Const, 0, ""},
+ {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0, ""},
+ {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
+ {"IPV6_DEFHLIM", Const, 0, ""},
+ {"IPV6_DONTFRAG", Const, 0, ""},
+ {"IPV6_DROP_MEMBERSHIP", Const, 0, ""},
+ {"IPV6_DSTOPTS", Const, 0, ""},
+ {"IPV6_ESP_NETWORK_LEVEL", Const, 1, ""},
+ {"IPV6_ESP_TRANS_LEVEL", Const, 1, ""},
+ {"IPV6_FAITH", Const, 0, ""},
+ {"IPV6_FLOWINFO_MASK", Const, 0, ""},
+ {"IPV6_FLOWLABEL_MASK", Const, 0, ""},
+ {"IPV6_FRAGTTL", Const, 0, ""},
+ {"IPV6_FW_ADD", Const, 0, ""},
+ {"IPV6_FW_DEL", Const, 0, ""},
+ {"IPV6_FW_FLUSH", Const, 0, ""},
+ {"IPV6_FW_GET", Const, 0, ""},
+ {"IPV6_FW_ZERO", Const, 0, ""},
+ {"IPV6_HLIMDEC", Const, 0, ""},
+ {"IPV6_HOPLIMIT", Const, 0, ""},
+ {"IPV6_HOPOPTS", Const, 0, ""},
+ {"IPV6_IPCOMP_LEVEL", Const, 1, ""},
+ {"IPV6_IPSEC_POLICY", Const, 0, ""},
+ {"IPV6_JOIN_ANYCAST", Const, 0, ""},
+ {"IPV6_JOIN_GROUP", Const, 0, ""},
+ {"IPV6_LEAVE_ANYCAST", Const, 0, ""},
+ {"IPV6_LEAVE_GROUP", Const, 0, ""},
+ {"IPV6_MAXHLIM", Const, 0, ""},
+ {"IPV6_MAXOPTHDR", Const, 0, ""},
+ {"IPV6_MAXPACKET", Const, 0, ""},
+ {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0, ""},
+ {"IPV6_MAX_MEMBERSHIPS", Const, 0, ""},
+ {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0, ""},
+ {"IPV6_MIN_MEMBERSHIPS", Const, 0, ""},
+ {"IPV6_MMTU", Const, 0, ""},
+ {"IPV6_MSFILTER", Const, 0, ""},
+ {"IPV6_MTU", Const, 0, ""},
+ {"IPV6_MTU_DISCOVER", Const, 0, ""},
+ {"IPV6_MULTICAST_HOPS", Const, 0, ""},
+ {"IPV6_MULTICAST_IF", Const, 0, ""},
+ {"IPV6_MULTICAST_LOOP", Const, 0, ""},
+ {"IPV6_NEXTHOP", Const, 0, ""},
+ {"IPV6_OPTIONS", Const, 1, ""},
+ {"IPV6_PATHMTU", Const, 0, ""},
+ {"IPV6_PIPEX", Const, 1, ""},
+ {"IPV6_PKTINFO", Const, 0, ""},
+ {"IPV6_PMTUDISC_DO", Const, 0, ""},
+ {"IPV6_PMTUDISC_DONT", Const, 0, ""},
+ {"IPV6_PMTUDISC_PROBE", Const, 0, ""},
+ {"IPV6_PMTUDISC_WANT", Const, 0, ""},
+ {"IPV6_PORTRANGE", Const, 0, ""},
+ {"IPV6_PORTRANGE_DEFAULT", Const, 0, ""},
+ {"IPV6_PORTRANGE_HIGH", Const, 0, ""},
+ {"IPV6_PORTRANGE_LOW", Const, 0, ""},
+ {"IPV6_PREFER_TEMPADDR", Const, 0, ""},
+ {"IPV6_RECVDSTOPTS", Const, 0, ""},
+ {"IPV6_RECVDSTPORT", Const, 3, ""},
+ {"IPV6_RECVERR", Const, 0, ""},
+ {"IPV6_RECVHOPLIMIT", Const, 0, ""},
+ {"IPV6_RECVHOPOPTS", Const, 0, ""},
+ {"IPV6_RECVPATHMTU", Const, 0, ""},
+ {"IPV6_RECVPKTINFO", Const, 0, ""},
+ {"IPV6_RECVRTHDR", Const, 0, ""},
+ {"IPV6_RECVTCLASS", Const, 0, ""},
+ {"IPV6_ROUTER_ALERT", Const, 0, ""},
+ {"IPV6_RTABLE", Const, 1, ""},
+ {"IPV6_RTHDR", Const, 0, ""},
+ {"IPV6_RTHDRDSTOPTS", Const, 0, ""},
+ {"IPV6_RTHDR_LOOSE", Const, 0, ""},
+ {"IPV6_RTHDR_STRICT", Const, 0, ""},
+ {"IPV6_RTHDR_TYPE_0", Const, 0, ""},
+ {"IPV6_RXDSTOPTS", Const, 0, ""},
+ {"IPV6_RXHOPOPTS", Const, 0, ""},
+ {"IPV6_SOCKOPT_RESERVED1", Const, 0, ""},
+ {"IPV6_TCLASS", Const, 0, ""},
+ {"IPV6_UNICAST_HOPS", Const, 0, ""},
+ {"IPV6_USE_MIN_MTU", Const, 0, ""},
+ {"IPV6_V6ONLY", Const, 0, ""},
+ {"IPV6_VERSION", Const, 0, ""},
+ {"IPV6_VERSION_MASK", Const, 0, ""},
+ {"IPV6_XFRM_POLICY", Const, 0, ""},
+ {"IP_ADD_MEMBERSHIP", Const, 0, ""},
+ {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0, ""},
+ {"IP_AUTH_LEVEL", Const, 1, ""},
+ {"IP_BINDANY", Const, 0, ""},
+ {"IP_BLOCK_SOURCE", Const, 0, ""},
+ {"IP_BOUND_IF", Const, 0, ""},
+ {"IP_DEFAULT_MULTICAST_LOOP", Const, 0, ""},
+ {"IP_DEFAULT_MULTICAST_TTL", Const, 0, ""},
+ {"IP_DF", Const, 0, ""},
+ {"IP_DIVERTFL", Const, 3, ""},
+ {"IP_DONTFRAG", Const, 0, ""},
+ {"IP_DROP_MEMBERSHIP", Const, 0, ""},
+ {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0, ""},
+ {"IP_DUMMYNET3", Const, 0, ""},
+ {"IP_DUMMYNET_CONFIGURE", Const, 0, ""},
+ {"IP_DUMMYNET_DEL", Const, 0, ""},
+ {"IP_DUMMYNET_FLUSH", Const, 0, ""},
+ {"IP_DUMMYNET_GET", Const, 0, ""},
+ {"IP_EF", Const, 1, ""},
+ {"IP_ERRORMTU", Const, 1, ""},
+ {"IP_ESP_NETWORK_LEVEL", Const, 1, ""},
+ {"IP_ESP_TRANS_LEVEL", Const, 1, ""},
+ {"IP_FAITH", Const, 0, ""},
+ {"IP_FREEBIND", Const, 0, ""},
+ {"IP_FW3", Const, 0, ""},
+ {"IP_FW_ADD", Const, 0, ""},
+ {"IP_FW_DEL", Const, 0, ""},
+ {"IP_FW_FLUSH", Const, 0, ""},
+ {"IP_FW_GET", Const, 0, ""},
+ {"IP_FW_NAT_CFG", Const, 0, ""},
+ {"IP_FW_NAT_DEL", Const, 0, ""},
+ {"IP_FW_NAT_GET_CONFIG", Const, 0, ""},
+ {"IP_FW_NAT_GET_LOG", Const, 0, ""},
+ {"IP_FW_RESETLOG", Const, 0, ""},
+ {"IP_FW_TABLE_ADD", Const, 0, ""},
+ {"IP_FW_TABLE_DEL", Const, 0, ""},
+ {"IP_FW_TABLE_FLUSH", Const, 0, ""},
+ {"IP_FW_TABLE_GETSIZE", Const, 0, ""},
+ {"IP_FW_TABLE_LIST", Const, 0, ""},
+ {"IP_FW_ZERO", Const, 0, ""},
+ {"IP_HDRINCL", Const, 0, ""},
+ {"IP_IPCOMP_LEVEL", Const, 1, ""},
+ {"IP_IPSECFLOWINFO", Const, 1, ""},
+ {"IP_IPSEC_LOCAL_AUTH", Const, 1, ""},
+ {"IP_IPSEC_LOCAL_CRED", Const, 1, ""},
+ {"IP_IPSEC_LOCAL_ID", Const, 1, ""},
+ {"IP_IPSEC_POLICY", Const, 0, ""},
+ {"IP_IPSEC_REMOTE_AUTH", Const, 1, ""},
+ {"IP_IPSEC_REMOTE_CRED", Const, 1, ""},
+ {"IP_IPSEC_REMOTE_ID", Const, 1, ""},
+ {"IP_MAXPACKET", Const, 0, ""},
+ {"IP_MAX_GROUP_SRC_FILTER", Const, 0, ""},
+ {"IP_MAX_MEMBERSHIPS", Const, 0, ""},
+ {"IP_MAX_SOCK_MUTE_FILTER", Const, 0, ""},
+ {"IP_MAX_SOCK_SRC_FILTER", Const, 0, ""},
+ {"IP_MAX_SOURCE_FILTER", Const, 0, ""},
+ {"IP_MF", Const, 0, ""},
+ {"IP_MINFRAGSIZE", Const, 1, ""},
+ {"IP_MINTTL", Const, 0, ""},
+ {"IP_MIN_MEMBERSHIPS", Const, 0, ""},
+ {"IP_MSFILTER", Const, 0, ""},
+ {"IP_MSS", Const, 0, ""},
+ {"IP_MTU", Const, 0, ""},
+ {"IP_MTU_DISCOVER", Const, 0, ""},
+ {"IP_MULTICAST_IF", Const, 0, ""},
+ {"IP_MULTICAST_IFINDEX", Const, 0, ""},
+ {"IP_MULTICAST_LOOP", Const, 0, ""},
+ {"IP_MULTICAST_TTL", Const, 0, ""},
+ {"IP_MULTICAST_VIF", Const, 0, ""},
+ {"IP_NAT__XXX", Const, 0, ""},
+ {"IP_OFFMASK", Const, 0, ""},
+ {"IP_OLD_FW_ADD", Const, 0, ""},
+ {"IP_OLD_FW_DEL", Const, 0, ""},
+ {"IP_OLD_FW_FLUSH", Const, 0, ""},
+ {"IP_OLD_FW_GET", Const, 0, ""},
+ {"IP_OLD_FW_RESETLOG", Const, 0, ""},
+ {"IP_OLD_FW_ZERO", Const, 0, ""},
+ {"IP_ONESBCAST", Const, 0, ""},
+ {"IP_OPTIONS", Const, 0, ""},
+ {"IP_ORIGDSTADDR", Const, 0, ""},
+ {"IP_PASSSEC", Const, 0, ""},
+ {"IP_PIPEX", Const, 1, ""},
+ {"IP_PKTINFO", Const, 0, ""},
+ {"IP_PKTOPTIONS", Const, 0, ""},
+ {"IP_PMTUDISC", Const, 0, ""},
+ {"IP_PMTUDISC_DO", Const, 0, ""},
+ {"IP_PMTUDISC_DONT", Const, 0, ""},
+ {"IP_PMTUDISC_PROBE", Const, 0, ""},
+ {"IP_PMTUDISC_WANT", Const, 0, ""},
+ {"IP_PORTRANGE", Const, 0, ""},
+ {"IP_PORTRANGE_DEFAULT", Const, 0, ""},
+ {"IP_PORTRANGE_HIGH", Const, 0, ""},
+ {"IP_PORTRANGE_LOW", Const, 0, ""},
+ {"IP_RECVDSTADDR", Const, 0, ""},
+ {"IP_RECVDSTPORT", Const, 1, ""},
+ {"IP_RECVERR", Const, 0, ""},
+ {"IP_RECVIF", Const, 0, ""},
+ {"IP_RECVOPTS", Const, 0, ""},
+ {"IP_RECVORIGDSTADDR", Const, 0, ""},
+ {"IP_RECVPKTINFO", Const, 0, ""},
+ {"IP_RECVRETOPTS", Const, 0, ""},
+ {"IP_RECVRTABLE", Const, 1, ""},
+ {"IP_RECVTOS", Const, 0, ""},
+ {"IP_RECVTTL", Const, 0, ""},
+ {"IP_RETOPTS", Const, 0, ""},
+ {"IP_RF", Const, 0, ""},
+ {"IP_ROUTER_ALERT", Const, 0, ""},
+ {"IP_RSVP_OFF", Const, 0, ""},
+ {"IP_RSVP_ON", Const, 0, ""},
+ {"IP_RSVP_VIF_OFF", Const, 0, ""},
+ {"IP_RSVP_VIF_ON", Const, 0, ""},
+ {"IP_RTABLE", Const, 1, ""},
+ {"IP_SENDSRCADDR", Const, 0, ""},
+ {"IP_STRIPHDR", Const, 0, ""},
+ {"IP_TOS", Const, 0, ""},
+ {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0, ""},
+ {"IP_TRANSPARENT", Const, 0, ""},
+ {"IP_TTL", Const, 0, ""},
+ {"IP_UNBLOCK_SOURCE", Const, 0, ""},
+ {"IP_XFRM_POLICY", Const, 0, ""},
+ {"IPv6MTUInfo", Type, 2, ""},
+ {"IPv6MTUInfo.Addr", Field, 2, ""},
+ {"IPv6MTUInfo.Mtu", Field, 2, ""},
+ {"IPv6Mreq", Type, 0, ""},
+ {"IPv6Mreq.Interface", Field, 0, ""},
+ {"IPv6Mreq.Multiaddr", Field, 0, ""},
+ {"ISIG", Const, 0, ""},
+ {"ISTRIP", Const, 0, ""},
+ {"IUCLC", Const, 0, ""},
+ {"IUTF8", Const, 0, ""},
+ {"IXANY", Const, 0, ""},
+ {"IXOFF", Const, 0, ""},
+ {"IXON", Const, 0, ""},
+ {"IfAddrmsg", Type, 0, ""},
+ {"IfAddrmsg.Family", Field, 0, ""},
+ {"IfAddrmsg.Flags", Field, 0, ""},
+ {"IfAddrmsg.Index", Field, 0, ""},
+ {"IfAddrmsg.Prefixlen", Field, 0, ""},
+ {"IfAddrmsg.Scope", Field, 0, ""},
+ {"IfAnnounceMsghdr", Type, 1, ""},
+ {"IfAnnounceMsghdr.Hdrlen", Field, 2, ""},
+ {"IfAnnounceMsghdr.Index", Field, 1, ""},
+ {"IfAnnounceMsghdr.Msglen", Field, 1, ""},
+ {"IfAnnounceMsghdr.Name", Field, 1, ""},
+ {"IfAnnounceMsghdr.Type", Field, 1, ""},
+ {"IfAnnounceMsghdr.Version", Field, 1, ""},
+ {"IfAnnounceMsghdr.What", Field, 1, ""},
+ {"IfData", Type, 0, ""},
+ {"IfData.Addrlen", Field, 0, ""},
+ {"IfData.Baudrate", Field, 0, ""},
+ {"IfData.Capabilities", Field, 2, ""},
+ {"IfData.Collisions", Field, 0, ""},
+ {"IfData.Datalen", Field, 0, ""},
+ {"IfData.Epoch", Field, 0, ""},
+ {"IfData.Hdrlen", Field, 0, ""},
+ {"IfData.Hwassist", Field, 0, ""},
+ {"IfData.Ibytes", Field, 0, ""},
+ {"IfData.Ierrors", Field, 0, ""},
+ {"IfData.Imcasts", Field, 0, ""},
+ {"IfData.Ipackets", Field, 0, ""},
+ {"IfData.Iqdrops", Field, 0, ""},
+ {"IfData.Lastchange", Field, 0, ""},
+ {"IfData.Link_state", Field, 0, ""},
+ {"IfData.Mclpool", Field, 2, ""},
+ {"IfData.Metric", Field, 0, ""},
+ {"IfData.Mtu", Field, 0, ""},
+ {"IfData.Noproto", Field, 0, ""},
+ {"IfData.Obytes", Field, 0, ""},
+ {"IfData.Oerrors", Field, 0, ""},
+ {"IfData.Omcasts", Field, 0, ""},
+ {"IfData.Opackets", Field, 0, ""},
+ {"IfData.Pad", Field, 2, ""},
+ {"IfData.Pad_cgo_0", Field, 2, ""},
+ {"IfData.Pad_cgo_1", Field, 2, ""},
+ {"IfData.Physical", Field, 0, ""},
+ {"IfData.Recvquota", Field, 0, ""},
+ {"IfData.Recvtiming", Field, 0, ""},
+ {"IfData.Reserved1", Field, 0, ""},
+ {"IfData.Reserved2", Field, 0, ""},
+ {"IfData.Spare_char1", Field, 0, ""},
+ {"IfData.Spare_char2", Field, 0, ""},
+ {"IfData.Type", Field, 0, ""},
+ {"IfData.Typelen", Field, 0, ""},
+ {"IfData.Unused1", Field, 0, ""},
+ {"IfData.Unused2", Field, 0, ""},
+ {"IfData.Xmitquota", Field, 0, ""},
+ {"IfData.Xmittiming", Field, 0, ""},
+ {"IfInfomsg", Type, 0, ""},
+ {"IfInfomsg.Change", Field, 0, ""},
+ {"IfInfomsg.Family", Field, 0, ""},
+ {"IfInfomsg.Flags", Field, 0, ""},
+ {"IfInfomsg.Index", Field, 0, ""},
+ {"IfInfomsg.Type", Field, 0, ""},
+ {"IfInfomsg.X__ifi_pad", Field, 0, ""},
+ {"IfMsghdr", Type, 0, ""},
+ {"IfMsghdr.Addrs", Field, 0, ""},
+ {"IfMsghdr.Data", Field, 0, ""},
+ {"IfMsghdr.Flags", Field, 0, ""},
+ {"IfMsghdr.Hdrlen", Field, 2, ""},
+ {"IfMsghdr.Index", Field, 0, ""},
+ {"IfMsghdr.Msglen", Field, 0, ""},
+ {"IfMsghdr.Pad1", Field, 2, ""},
+ {"IfMsghdr.Pad2", Field, 2, ""},
+ {"IfMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"IfMsghdr.Pad_cgo_1", Field, 2, ""},
+ {"IfMsghdr.Tableid", Field, 2, ""},
+ {"IfMsghdr.Type", Field, 0, ""},
+ {"IfMsghdr.Version", Field, 0, ""},
+ {"IfMsghdr.Xflags", Field, 2, ""},
+ {"IfaMsghdr", Type, 0, ""},
+ {"IfaMsghdr.Addrs", Field, 0, ""},
+ {"IfaMsghdr.Flags", Field, 0, ""},
+ {"IfaMsghdr.Hdrlen", Field, 2, ""},
+ {"IfaMsghdr.Index", Field, 0, ""},
+ {"IfaMsghdr.Metric", Field, 0, ""},
+ {"IfaMsghdr.Msglen", Field, 0, ""},
+ {"IfaMsghdr.Pad1", Field, 2, ""},
+ {"IfaMsghdr.Pad2", Field, 2, ""},
+ {"IfaMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"IfaMsghdr.Tableid", Field, 2, ""},
+ {"IfaMsghdr.Type", Field, 0, ""},
+ {"IfaMsghdr.Version", Field, 0, ""},
+ {"IfmaMsghdr", Type, 0, ""},
+ {"IfmaMsghdr.Addrs", Field, 0, ""},
+ {"IfmaMsghdr.Flags", Field, 0, ""},
+ {"IfmaMsghdr.Index", Field, 0, ""},
+ {"IfmaMsghdr.Msglen", Field, 0, ""},
+ {"IfmaMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"IfmaMsghdr.Type", Field, 0, ""},
+ {"IfmaMsghdr.Version", Field, 0, ""},
+ {"IfmaMsghdr2", Type, 0, ""},
+ {"IfmaMsghdr2.Addrs", Field, 0, ""},
+ {"IfmaMsghdr2.Flags", Field, 0, ""},
+ {"IfmaMsghdr2.Index", Field, 0, ""},
+ {"IfmaMsghdr2.Msglen", Field, 0, ""},
+ {"IfmaMsghdr2.Pad_cgo_0", Field, 0, ""},
+ {"IfmaMsghdr2.Refcount", Field, 0, ""},
+ {"IfmaMsghdr2.Type", Field, 0, ""},
+ {"IfmaMsghdr2.Version", Field, 0, ""},
+ {"ImplementsGetwd", Const, 0, ""},
+ {"Inet4Pktinfo", Type, 0, ""},
+ {"Inet4Pktinfo.Addr", Field, 0, ""},
+ {"Inet4Pktinfo.Ifindex", Field, 0, ""},
+ {"Inet4Pktinfo.Spec_dst", Field, 0, ""},
+ {"Inet6Pktinfo", Type, 0, ""},
+ {"Inet6Pktinfo.Addr", Field, 0, ""},
+ {"Inet6Pktinfo.Ifindex", Field, 0, ""},
+ {"InotifyAddWatch", Func, 0, "func(fd int, pathname string, mask uint32) (watchdesc int, err error)"},
+ {"InotifyEvent", Type, 0, ""},
+ {"InotifyEvent.Cookie", Field, 0, ""},
+ {"InotifyEvent.Len", Field, 0, ""},
+ {"InotifyEvent.Mask", Field, 0, ""},
+ {"InotifyEvent.Name", Field, 0, ""},
+ {"InotifyEvent.Wd", Field, 0, ""},
+ {"InotifyInit", Func, 0, "func() (fd int, err error)"},
+ {"InotifyInit1", Func, 0, "func(flags int) (fd int, err error)"},
+ {"InotifyRmWatch", Func, 0, "func(fd int, watchdesc uint32) (success int, err error)"},
+ {"InterfaceAddrMessage", Type, 0, ""},
+ {"InterfaceAddrMessage.Data", Field, 0, ""},
+ {"InterfaceAddrMessage.Header", Field, 0, ""},
+ {"InterfaceAnnounceMessage", Type, 1, ""},
+ {"InterfaceAnnounceMessage.Header", Field, 1, ""},
+ {"InterfaceInfo", Type, 0, ""},
+ {"InterfaceInfo.Address", Field, 0, ""},
+ {"InterfaceInfo.BroadcastAddress", Field, 0, ""},
+ {"InterfaceInfo.Flags", Field, 0, ""},
+ {"InterfaceInfo.Netmask", Field, 0, ""},
+ {"InterfaceMessage", Type, 0, ""},
+ {"InterfaceMessage.Data", Field, 0, ""},
+ {"InterfaceMessage.Header", Field, 0, ""},
+ {"InterfaceMulticastAddrMessage", Type, 0, ""},
+ {"InterfaceMulticastAddrMessage.Data", Field, 0, ""},
+ {"InterfaceMulticastAddrMessage.Header", Field, 0, ""},
+ {"InvalidHandle", Const, 0, ""},
+ {"Ioperm", Func, 0, "func(from int, num int, on int) (err error)"},
+ {"Iopl", Func, 0, "func(level int) (err error)"},
+ {"Iovec", Type, 0, ""},
+ {"Iovec.Base", Field, 0, ""},
+ {"Iovec.Len", Field, 0, ""},
+ {"IpAdapterInfo", Type, 0, ""},
+ {"IpAdapterInfo.AdapterName", Field, 0, ""},
+ {"IpAdapterInfo.Address", Field, 0, ""},
+ {"IpAdapterInfo.AddressLength", Field, 0, ""},
+ {"IpAdapterInfo.ComboIndex", Field, 0, ""},
+ {"IpAdapterInfo.CurrentIpAddress", Field, 0, ""},
+ {"IpAdapterInfo.Description", Field, 0, ""},
+ {"IpAdapterInfo.DhcpEnabled", Field, 0, ""},
+ {"IpAdapterInfo.DhcpServer", Field, 0, ""},
+ {"IpAdapterInfo.GatewayList", Field, 0, ""},
+ {"IpAdapterInfo.HaveWins", Field, 0, ""},
+ {"IpAdapterInfo.Index", Field, 0, ""},
+ {"IpAdapterInfo.IpAddressList", Field, 0, ""},
+ {"IpAdapterInfo.LeaseExpires", Field, 0, ""},
+ {"IpAdapterInfo.LeaseObtained", Field, 0, ""},
+ {"IpAdapterInfo.Next", Field, 0, ""},
+ {"IpAdapterInfo.PrimaryWinsServer", Field, 0, ""},
+ {"IpAdapterInfo.SecondaryWinsServer", Field, 0, ""},
+ {"IpAdapterInfo.Type", Field, 0, ""},
+ {"IpAddrString", Type, 0, ""},
+ {"IpAddrString.Context", Field, 0, ""},
+ {"IpAddrString.IpAddress", Field, 0, ""},
+ {"IpAddrString.IpMask", Field, 0, ""},
+ {"IpAddrString.Next", Field, 0, ""},
+ {"IpAddressString", Type, 0, ""},
+ {"IpAddressString.String", Field, 0, ""},
+ {"IpMaskString", Type, 0, ""},
+ {"IpMaskString.String", Field, 2, ""},
+ {"Issetugid", Func, 0, ""},
+ {"KEY_ALL_ACCESS", Const, 0, ""},
+ {"KEY_CREATE_LINK", Const, 0, ""},
+ {"KEY_CREATE_SUB_KEY", Const, 0, ""},
+ {"KEY_ENUMERATE_SUB_KEYS", Const, 0, ""},
+ {"KEY_EXECUTE", Const, 0, ""},
+ {"KEY_NOTIFY", Const, 0, ""},
+ {"KEY_QUERY_VALUE", Const, 0, ""},
+ {"KEY_READ", Const, 0, ""},
+ {"KEY_SET_VALUE", Const, 0, ""},
+ {"KEY_WOW64_32KEY", Const, 0, ""},
+ {"KEY_WOW64_64KEY", Const, 0, ""},
+ {"KEY_WRITE", Const, 0, ""},
+ {"Kevent", Func, 0, ""},
+ {"Kevent_t", Type, 0, ""},
+ {"Kevent_t.Data", Field, 0, ""},
+ {"Kevent_t.Fflags", Field, 0, ""},
+ {"Kevent_t.Filter", Field, 0, ""},
+ {"Kevent_t.Flags", Field, 0, ""},
+ {"Kevent_t.Ident", Field, 0, ""},
+ {"Kevent_t.Pad_cgo_0", Field, 2, ""},
+ {"Kevent_t.Udata", Field, 0, ""},
+ {"Kill", Func, 0, "func(pid int, sig Signal) (err error)"},
+ {"Klogctl", Func, 0, "func(typ int, buf []byte) (n int, err error)"},
+ {"Kqueue", Func, 0, ""},
+ {"LANG_ENGLISH", Const, 0, ""},
+ {"LAYERED_PROTOCOL", Const, 2, ""},
+ {"LCNT_OVERLOAD_FLUSH", Const, 1, ""},
+ {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_CAD_ON", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_HALT", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_KEXEC", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_RESTART", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_RESTART2", Const, 0, ""},
+ {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0, ""},
+ {"LINUX_REBOOT_MAGIC1", Const, 0, ""},
+ {"LINUX_REBOOT_MAGIC2", Const, 0, ""},
+ {"LOCK_EX", Const, 0, ""},
+ {"LOCK_NB", Const, 0, ""},
+ {"LOCK_SH", Const, 0, ""},
+ {"LOCK_UN", Const, 0, ""},
+ {"LazyDLL", Type, 0, ""},
+ {"LazyDLL.Name", Field, 0, ""},
+ {"LazyProc", Type, 0, ""},
+ {"LazyProc.Name", Field, 0, ""},
+ {"Lchown", Func, 0, "func(path string, uid int, gid int) (err error)"},
+ {"Linger", Type, 0, ""},
+ {"Linger.Linger", Field, 0, ""},
+ {"Linger.Onoff", Field, 0, ""},
+ {"Link", Func, 0, "func(oldpath string, newpath string) (err error)"},
+ {"Listen", Func, 0, "func(s int, n int) (err error)"},
+ {"Listxattr", Func, 1, "func(path string, dest []byte) (sz int, err error)"},
+ {"LoadCancelIoEx", Func, 1, ""},
+ {"LoadConnectEx", Func, 1, ""},
+ {"LoadCreateSymbolicLink", Func, 4, ""},
+ {"LoadDLL", Func, 0, ""},
+ {"LoadGetAddrInfo", Func, 1, ""},
+ {"LoadLibrary", Func, 0, ""},
+ {"LoadSetFileCompletionNotificationModes", Func, 2, ""},
+ {"LocalFree", Func, 0, ""},
+ {"Log2phys_t", Type, 0, ""},
+ {"Log2phys_t.Contigbytes", Field, 0, ""},
+ {"Log2phys_t.Devoffset", Field, 0, ""},
+ {"Log2phys_t.Flags", Field, 0, ""},
+ {"LookupAccountName", Func, 0, ""},
+ {"LookupAccountSid", Func, 0, ""},
+ {"LookupSID", Func, 0, ""},
+ {"LsfJump", Func, 0, "func(code int, k int, jt int, jf int) *SockFilter"},
+ {"LsfSocket", Func, 0, "func(ifindex int, proto int) (int, error)"},
+ {"LsfStmt", Func, 0, "func(code int, k int) *SockFilter"},
+ {"Lstat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
+ {"MADV_AUTOSYNC", Const, 1, ""},
+ {"MADV_CAN_REUSE", Const, 0, ""},
+ {"MADV_CORE", Const, 1, ""},
+ {"MADV_DOFORK", Const, 0, ""},
+ {"MADV_DONTFORK", Const, 0, ""},
+ {"MADV_DONTNEED", Const, 0, ""},
+ {"MADV_FREE", Const, 0, ""},
+ {"MADV_FREE_REUSABLE", Const, 0, ""},
+ {"MADV_FREE_REUSE", Const, 0, ""},
+ {"MADV_HUGEPAGE", Const, 0, ""},
+ {"MADV_HWPOISON", Const, 0, ""},
+ {"MADV_MERGEABLE", Const, 0, ""},
+ {"MADV_NOCORE", Const, 1, ""},
+ {"MADV_NOHUGEPAGE", Const, 0, ""},
+ {"MADV_NORMAL", Const, 0, ""},
+ {"MADV_NOSYNC", Const, 1, ""},
+ {"MADV_PROTECT", Const, 1, ""},
+ {"MADV_RANDOM", Const, 0, ""},
+ {"MADV_REMOVE", Const, 0, ""},
+ {"MADV_SEQUENTIAL", Const, 0, ""},
+ {"MADV_SPACEAVAIL", Const, 3, ""},
+ {"MADV_UNMERGEABLE", Const, 0, ""},
+ {"MADV_WILLNEED", Const, 0, ""},
+ {"MADV_ZERO_WIRED_PAGES", Const, 0, ""},
+ {"MAP_32BIT", Const, 0, ""},
+ {"MAP_ALIGNED_SUPER", Const, 3, ""},
+ {"MAP_ALIGNMENT_16MB", Const, 3, ""},
+ {"MAP_ALIGNMENT_1TB", Const, 3, ""},
+ {"MAP_ALIGNMENT_256TB", Const, 3, ""},
+ {"MAP_ALIGNMENT_4GB", Const, 3, ""},
+ {"MAP_ALIGNMENT_64KB", Const, 3, ""},
+ {"MAP_ALIGNMENT_64PB", Const, 3, ""},
+ {"MAP_ALIGNMENT_MASK", Const, 3, ""},
+ {"MAP_ALIGNMENT_SHIFT", Const, 3, ""},
+ {"MAP_ANON", Const, 0, ""},
+ {"MAP_ANONYMOUS", Const, 0, ""},
+ {"MAP_COPY", Const, 0, ""},
+ {"MAP_DENYWRITE", Const, 0, ""},
+ {"MAP_EXECUTABLE", Const, 0, ""},
+ {"MAP_FILE", Const, 0, ""},
+ {"MAP_FIXED", Const, 0, ""},
+ {"MAP_FLAGMASK", Const, 3, ""},
+ {"MAP_GROWSDOWN", Const, 0, ""},
+ {"MAP_HASSEMAPHORE", Const, 0, ""},
+ {"MAP_HUGETLB", Const, 0, ""},
+ {"MAP_INHERIT", Const, 3, ""},
+ {"MAP_INHERIT_COPY", Const, 3, ""},
+ {"MAP_INHERIT_DEFAULT", Const, 3, ""},
+ {"MAP_INHERIT_DONATE_COPY", Const, 3, ""},
+ {"MAP_INHERIT_NONE", Const, 3, ""},
+ {"MAP_INHERIT_SHARE", Const, 3, ""},
+ {"MAP_JIT", Const, 0, ""},
+ {"MAP_LOCKED", Const, 0, ""},
+ {"MAP_NOCACHE", Const, 0, ""},
+ {"MAP_NOCORE", Const, 1, ""},
+ {"MAP_NOEXTEND", Const, 0, ""},
+ {"MAP_NONBLOCK", Const, 0, ""},
+ {"MAP_NORESERVE", Const, 0, ""},
+ {"MAP_NOSYNC", Const, 1, ""},
+ {"MAP_POPULATE", Const, 0, ""},
+ {"MAP_PREFAULT_READ", Const, 1, ""},
+ {"MAP_PRIVATE", Const, 0, ""},
+ {"MAP_RENAME", Const, 0, ""},
+ {"MAP_RESERVED0080", Const, 0, ""},
+ {"MAP_RESERVED0100", Const, 1, ""},
+ {"MAP_SHARED", Const, 0, ""},
+ {"MAP_STACK", Const, 0, ""},
+ {"MAP_TRYFIXED", Const, 3, ""},
+ {"MAP_TYPE", Const, 0, ""},
+ {"MAP_WIRED", Const, 3, ""},
+ {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4, ""},
+ {"MAXLEN_IFDESCR", Const, 0, ""},
+ {"MAXLEN_PHYSADDR", Const, 0, ""},
+ {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0, ""},
+ {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0, ""},
+ {"MAX_ADAPTER_NAME_LENGTH", Const, 0, ""},
+ {"MAX_COMPUTERNAME_LENGTH", Const, 0, ""},
+ {"MAX_INTERFACE_NAME_LEN", Const, 0, ""},
+ {"MAX_LONG_PATH", Const, 0, ""},
+ {"MAX_PATH", Const, 0, ""},
+ {"MAX_PROTOCOL_CHAIN", Const, 2, ""},
+ {"MCL_CURRENT", Const, 0, ""},
+ {"MCL_FUTURE", Const, 0, ""},
+ {"MNT_DETACH", Const, 0, ""},
+ {"MNT_EXPIRE", Const, 0, ""},
+ {"MNT_FORCE", Const, 0, ""},
+ {"MSG_BCAST", Const, 1, ""},
+ {"MSG_CMSG_CLOEXEC", Const, 0, ""},
+ {"MSG_COMPAT", Const, 0, ""},
+ {"MSG_CONFIRM", Const, 0, ""},
+ {"MSG_CONTROLMBUF", Const, 1, ""},
+ {"MSG_CTRUNC", Const, 0, ""},
+ {"MSG_DONTROUTE", Const, 0, ""},
+ {"MSG_DONTWAIT", Const, 0, ""},
+ {"MSG_EOF", Const, 0, ""},
+ {"MSG_EOR", Const, 0, ""},
+ {"MSG_ERRQUEUE", Const, 0, ""},
+ {"MSG_FASTOPEN", Const, 1, ""},
+ {"MSG_FIN", Const, 0, ""},
+ {"MSG_FLUSH", Const, 0, ""},
+ {"MSG_HAVEMORE", Const, 0, ""},
+ {"MSG_HOLD", Const, 0, ""},
+ {"MSG_IOVUSRSPACE", Const, 1, ""},
+ {"MSG_LENUSRSPACE", Const, 1, ""},
+ {"MSG_MCAST", Const, 1, ""},
+ {"MSG_MORE", Const, 0, ""},
+ {"MSG_NAMEMBUF", Const, 1, ""},
+ {"MSG_NBIO", Const, 0, ""},
+ {"MSG_NEEDSA", Const, 0, ""},
+ {"MSG_NOSIGNAL", Const, 0, ""},
+ {"MSG_NOTIFICATION", Const, 0, ""},
+ {"MSG_OOB", Const, 0, ""},
+ {"MSG_PEEK", Const, 0, ""},
+ {"MSG_PROXY", Const, 0, ""},
+ {"MSG_RCVMORE", Const, 0, ""},
+ {"MSG_RST", Const, 0, ""},
+ {"MSG_SEND", Const, 0, ""},
+ {"MSG_SYN", Const, 0, ""},
+ {"MSG_TRUNC", Const, 0, ""},
+ {"MSG_TRYHARD", Const, 0, ""},
+ {"MSG_USERFLAGS", Const, 1, ""},
+ {"MSG_WAITALL", Const, 0, ""},
+ {"MSG_WAITFORONE", Const, 0, ""},
+ {"MSG_WAITSTREAM", Const, 0, ""},
+ {"MS_ACTIVE", Const, 0, ""},
+ {"MS_ASYNC", Const, 0, ""},
+ {"MS_BIND", Const, 0, ""},
+ {"MS_DEACTIVATE", Const, 0, ""},
+ {"MS_DIRSYNC", Const, 0, ""},
+ {"MS_INVALIDATE", Const, 0, ""},
+ {"MS_I_VERSION", Const, 0, ""},
+ {"MS_KERNMOUNT", Const, 0, ""},
+ {"MS_KILLPAGES", Const, 0, ""},
+ {"MS_MANDLOCK", Const, 0, ""},
+ {"MS_MGC_MSK", Const, 0, ""},
+ {"MS_MGC_VAL", Const, 0, ""},
+ {"MS_MOVE", Const, 0, ""},
+ {"MS_NOATIME", Const, 0, ""},
+ {"MS_NODEV", Const, 0, ""},
+ {"MS_NODIRATIME", Const, 0, ""},
+ {"MS_NOEXEC", Const, 0, ""},
+ {"MS_NOSUID", Const, 0, ""},
+ {"MS_NOUSER", Const, 0, ""},
+ {"MS_POSIXACL", Const, 0, ""},
+ {"MS_PRIVATE", Const, 0, ""},
+ {"MS_RDONLY", Const, 0, ""},
+ {"MS_REC", Const, 0, ""},
+ {"MS_RELATIME", Const, 0, ""},
+ {"MS_REMOUNT", Const, 0, ""},
+ {"MS_RMT_MASK", Const, 0, ""},
+ {"MS_SHARED", Const, 0, ""},
+ {"MS_SILENT", Const, 0, ""},
+ {"MS_SLAVE", Const, 0, ""},
+ {"MS_STRICTATIME", Const, 0, ""},
+ {"MS_SYNC", Const, 0, ""},
+ {"MS_SYNCHRONOUS", Const, 0, ""},
+ {"MS_UNBINDABLE", Const, 0, ""},
+ {"Madvise", Func, 0, "func(b []byte, advice int) (err error)"},
+ {"MapViewOfFile", Func, 0, ""},
+ {"MaxTokenInfoClass", Const, 0, ""},
+ {"Mclpool", Type, 2, ""},
+ {"Mclpool.Alive", Field, 2, ""},
+ {"Mclpool.Cwm", Field, 2, ""},
+ {"Mclpool.Grown", Field, 2, ""},
+ {"Mclpool.Hwm", Field, 2, ""},
+ {"Mclpool.Lwm", Field, 2, ""},
+ {"MibIfRow", Type, 0, ""},
+ {"MibIfRow.AdminStatus", Field, 0, ""},
+ {"MibIfRow.Descr", Field, 0, ""},
+ {"MibIfRow.DescrLen", Field, 0, ""},
+ {"MibIfRow.InDiscards", Field, 0, ""},
+ {"MibIfRow.InErrors", Field, 0, ""},
+ {"MibIfRow.InNUcastPkts", Field, 0, ""},
+ {"MibIfRow.InOctets", Field, 0, ""},
+ {"MibIfRow.InUcastPkts", Field, 0, ""},
+ {"MibIfRow.InUnknownProtos", Field, 0, ""},
+ {"MibIfRow.Index", Field, 0, ""},
+ {"MibIfRow.LastChange", Field, 0, ""},
+ {"MibIfRow.Mtu", Field, 0, ""},
+ {"MibIfRow.Name", Field, 0, ""},
+ {"MibIfRow.OperStatus", Field, 0, ""},
+ {"MibIfRow.OutDiscards", Field, 0, ""},
+ {"MibIfRow.OutErrors", Field, 0, ""},
+ {"MibIfRow.OutNUcastPkts", Field, 0, ""},
+ {"MibIfRow.OutOctets", Field, 0, ""},
+ {"MibIfRow.OutQLen", Field, 0, ""},
+ {"MibIfRow.OutUcastPkts", Field, 0, ""},
+ {"MibIfRow.PhysAddr", Field, 0, ""},
+ {"MibIfRow.PhysAddrLen", Field, 0, ""},
+ {"MibIfRow.Speed", Field, 0, ""},
+ {"MibIfRow.Type", Field, 0, ""},
+ {"Mkdir", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Mkdirat", Func, 0, "func(dirfd int, path string, mode uint32) (err error)"},
+ {"Mkfifo", Func, 0, "func(path string, mode uint32) (err error)"},
+ {"Mknod", Func, 0, "func(path string, mode uint32, dev int) (err error)"},
+ {"Mknodat", Func, 0, "func(dirfd int, path string, mode uint32, dev int) (err error)"},
+ {"Mlock", Func, 0, "func(b []byte) (err error)"},
+ {"Mlockall", Func, 0, "func(flags int) (err error)"},
+ {"Mmap", Func, 0, "func(fd int, offset int64, length int, prot int, flags int) (data []byte, err error)"},
+ {"Mount", Func, 0, "func(source string, target string, fstype string, flags uintptr, data string) (err error)"},
+ {"MoveFile", Func, 0, ""},
+ {"Mprotect", Func, 0, "func(b []byte, prot int) (err error)"},
+ {"Msghdr", Type, 0, ""},
+ {"Msghdr.Control", Field, 0, ""},
+ {"Msghdr.Controllen", Field, 0, ""},
+ {"Msghdr.Flags", Field, 0, ""},
+ {"Msghdr.Iov", Field, 0, ""},
+ {"Msghdr.Iovlen", Field, 0, ""},
+ {"Msghdr.Name", Field, 0, ""},
+ {"Msghdr.Namelen", Field, 0, ""},
+ {"Msghdr.Pad_cgo_0", Field, 0, ""},
+ {"Msghdr.Pad_cgo_1", Field, 0, ""},
+ {"Munlock", Func, 0, "func(b []byte) (err error)"},
+ {"Munlockall", Func, 0, "func() (err error)"},
+ {"Munmap", Func, 0, "func(b []byte) (err error)"},
+ {"MustLoadDLL", Func, 0, ""},
+ {"NAME_MAX", Const, 0, ""},
+ {"NETLINK_ADD_MEMBERSHIP", Const, 0, ""},
+ {"NETLINK_AUDIT", Const, 0, ""},
+ {"NETLINK_BROADCAST_ERROR", Const, 0, ""},
+ {"NETLINK_CONNECTOR", Const, 0, ""},
+ {"NETLINK_DNRTMSG", Const, 0, ""},
+ {"NETLINK_DROP_MEMBERSHIP", Const, 0, ""},
+ {"NETLINK_ECRYPTFS", Const, 0, ""},
+ {"NETLINK_FIB_LOOKUP", Const, 0, ""},
+ {"NETLINK_FIREWALL", Const, 0, ""},
+ {"NETLINK_GENERIC", Const, 0, ""},
+ {"NETLINK_INET_DIAG", Const, 0, ""},
+ {"NETLINK_IP6_FW", Const, 0, ""},
+ {"NETLINK_ISCSI", Const, 0, ""},
+ {"NETLINK_KOBJECT_UEVENT", Const, 0, ""},
+ {"NETLINK_NETFILTER", Const, 0, ""},
+ {"NETLINK_NFLOG", Const, 0, ""},
+ {"NETLINK_NO_ENOBUFS", Const, 0, ""},
+ {"NETLINK_PKTINFO", Const, 0, ""},
+ {"NETLINK_RDMA", Const, 0, ""},
+ {"NETLINK_ROUTE", Const, 0, ""},
+ {"NETLINK_SCSITRANSPORT", Const, 0, ""},
+ {"NETLINK_SELINUX", Const, 0, ""},
+ {"NETLINK_UNUSED", Const, 0, ""},
+ {"NETLINK_USERSOCK", Const, 0, ""},
+ {"NETLINK_XFRM", Const, 0, ""},
+ {"NET_RT_DUMP", Const, 0, ""},
+ {"NET_RT_DUMP2", Const, 0, ""},
+ {"NET_RT_FLAGS", Const, 0, ""},
+ {"NET_RT_IFLIST", Const, 0, ""},
+ {"NET_RT_IFLIST2", Const, 0, ""},
+ {"NET_RT_IFLISTL", Const, 1, ""},
+ {"NET_RT_IFMALIST", Const, 0, ""},
+ {"NET_RT_MAXID", Const, 0, ""},
+ {"NET_RT_OIFLIST", Const, 1, ""},
+ {"NET_RT_OOIFLIST", Const, 1, ""},
+ {"NET_RT_STAT", Const, 0, ""},
+ {"NET_RT_STATS", Const, 1, ""},
+ {"NET_RT_TABLE", Const, 1, ""},
+ {"NET_RT_TRASH", Const, 0, ""},
+ {"NLA_ALIGNTO", Const, 0, ""},
+ {"NLA_F_NESTED", Const, 0, ""},
+ {"NLA_F_NET_BYTEORDER", Const, 0, ""},
+ {"NLA_HDRLEN", Const, 0, ""},
+ {"NLMSG_ALIGNTO", Const, 0, ""},
+ {"NLMSG_DONE", Const, 0, ""},
+ {"NLMSG_ERROR", Const, 0, ""},
+ {"NLMSG_HDRLEN", Const, 0, ""},
+ {"NLMSG_MIN_TYPE", Const, 0, ""},
+ {"NLMSG_NOOP", Const, 0, ""},
+ {"NLMSG_OVERRUN", Const, 0, ""},
+ {"NLM_F_ACK", Const, 0, ""},
+ {"NLM_F_APPEND", Const, 0, ""},
+ {"NLM_F_ATOMIC", Const, 0, ""},
+ {"NLM_F_CREATE", Const, 0, ""},
+ {"NLM_F_DUMP", Const, 0, ""},
+ {"NLM_F_ECHO", Const, 0, ""},
+ {"NLM_F_EXCL", Const, 0, ""},
+ {"NLM_F_MATCH", Const, 0, ""},
+ {"NLM_F_MULTI", Const, 0, ""},
+ {"NLM_F_REPLACE", Const, 0, ""},
+ {"NLM_F_REQUEST", Const, 0, ""},
+ {"NLM_F_ROOT", Const, 0, ""},
+ {"NOFLSH", Const, 0, ""},
+ {"NOTE_ABSOLUTE", Const, 0, ""},
+ {"NOTE_ATTRIB", Const, 0, ""},
+ {"NOTE_BACKGROUND", Const, 16, ""},
+ {"NOTE_CHILD", Const, 0, ""},
+ {"NOTE_CRITICAL", Const, 16, ""},
+ {"NOTE_DELETE", Const, 0, ""},
+ {"NOTE_EOF", Const, 1, ""},
+ {"NOTE_EXEC", Const, 0, ""},
+ {"NOTE_EXIT", Const, 0, ""},
+ {"NOTE_EXITSTATUS", Const, 0, ""},
+ {"NOTE_EXIT_CSERROR", Const, 16, ""},
+ {"NOTE_EXIT_DECRYPTFAIL", Const, 16, ""},
+ {"NOTE_EXIT_DETAIL", Const, 16, ""},
+ {"NOTE_EXIT_DETAIL_MASK", Const, 16, ""},
+ {"NOTE_EXIT_MEMORY", Const, 16, ""},
+ {"NOTE_EXIT_REPARENTED", Const, 16, ""},
+ {"NOTE_EXTEND", Const, 0, ""},
+ {"NOTE_FFAND", Const, 0, ""},
+ {"NOTE_FFCOPY", Const, 0, ""},
+ {"NOTE_FFCTRLMASK", Const, 0, ""},
+ {"NOTE_FFLAGSMASK", Const, 0, ""},
+ {"NOTE_FFNOP", Const, 0, ""},
+ {"NOTE_FFOR", Const, 0, ""},
+ {"NOTE_FORK", Const, 0, ""},
+ {"NOTE_LEEWAY", Const, 16, ""},
+ {"NOTE_LINK", Const, 0, ""},
+ {"NOTE_LOWAT", Const, 0, ""},
+ {"NOTE_NONE", Const, 0, ""},
+ {"NOTE_NSECONDS", Const, 0, ""},
+ {"NOTE_PCTRLMASK", Const, 0, ""},
+ {"NOTE_PDATAMASK", Const, 0, ""},
+ {"NOTE_REAP", Const, 0, ""},
+ {"NOTE_RENAME", Const, 0, ""},
+ {"NOTE_RESOURCEEND", Const, 0, ""},
+ {"NOTE_REVOKE", Const, 0, ""},
+ {"NOTE_SECONDS", Const, 0, ""},
+ {"NOTE_SIGNAL", Const, 0, ""},
+ {"NOTE_TRACK", Const, 0, ""},
+ {"NOTE_TRACKERR", Const, 0, ""},
+ {"NOTE_TRIGGER", Const, 0, ""},
+ {"NOTE_TRUNCATE", Const, 1, ""},
+ {"NOTE_USECONDS", Const, 0, ""},
+ {"NOTE_VM_ERROR", Const, 0, ""},
+ {"NOTE_VM_PRESSURE", Const, 0, ""},
+ {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0, ""},
+ {"NOTE_VM_PRESSURE_TERMINATE", Const, 0, ""},
+ {"NOTE_WRITE", Const, 0, ""},
+ {"NameCanonical", Const, 0, ""},
+ {"NameCanonicalEx", Const, 0, ""},
+ {"NameDisplay", Const, 0, ""},
+ {"NameDnsDomain", Const, 0, ""},
+ {"NameFullyQualifiedDN", Const, 0, ""},
+ {"NameSamCompatible", Const, 0, ""},
+ {"NameServicePrincipal", Const, 0, ""},
+ {"NameUniqueId", Const, 0, ""},
+ {"NameUnknown", Const, 0, ""},
+ {"NameUserPrincipal", Const, 0, ""},
+ {"Nanosleep", Func, 0, "func(time *Timespec, leftover *Timespec) (err error)"},
+ {"NetApiBufferFree", Func, 0, ""},
+ {"NetGetJoinInformation", Func, 2, ""},
+ {"NetSetupDomainName", Const, 2, ""},
+ {"NetSetupUnjoined", Const, 2, ""},
+ {"NetSetupUnknownStatus", Const, 2, ""},
+ {"NetSetupWorkgroupName", Const, 2, ""},
+ {"NetUserGetInfo", Func, 0, ""},
+ {"NetlinkMessage", Type, 0, ""},
+ {"NetlinkMessage.Data", Field, 0, ""},
+ {"NetlinkMessage.Header", Field, 0, ""},
+ {"NetlinkRIB", Func, 0, "func(proto int, family int) ([]byte, error)"},
+ {"NetlinkRouteAttr", Type, 0, ""},
+ {"NetlinkRouteAttr.Attr", Field, 0, ""},
+ {"NetlinkRouteAttr.Value", Field, 0, ""},
+ {"NetlinkRouteRequest", Type, 0, ""},
+ {"NetlinkRouteRequest.Data", Field, 0, ""},
+ {"NetlinkRouteRequest.Header", Field, 0, ""},
+ {"NewCallback", Func, 0, ""},
+ {"NewCallbackCDecl", Func, 3, ""},
+ {"NewLazyDLL", Func, 0, ""},
+ {"NlAttr", Type, 0, ""},
+ {"NlAttr.Len", Field, 0, ""},
+ {"NlAttr.Type", Field, 0, ""},
+ {"NlMsgerr", Type, 0, ""},
+ {"NlMsgerr.Error", Field, 0, ""},
+ {"NlMsgerr.Msg", Field, 0, ""},
+ {"NlMsghdr", Type, 0, ""},
+ {"NlMsghdr.Flags", Field, 0, ""},
+ {"NlMsghdr.Len", Field, 0, ""},
+ {"NlMsghdr.Pid", Field, 0, ""},
+ {"NlMsghdr.Seq", Field, 0, ""},
+ {"NlMsghdr.Type", Field, 0, ""},
+ {"NsecToFiletime", Func, 0, ""},
+ {"NsecToTimespec", Func, 0, "func(nsec int64) Timespec"},
+ {"NsecToTimeval", Func, 0, "func(nsec int64) Timeval"},
+ {"Ntohs", Func, 0, ""},
+ {"OCRNL", Const, 0, ""},
+ {"OFDEL", Const, 0, ""},
+ {"OFILL", Const, 0, ""},
+ {"OFIOGETBMAP", Const, 1, ""},
+ {"OID_PKIX_KP_SERVER_AUTH", Var, 0, ""},
+ {"OID_SERVER_GATED_CRYPTO", Var, 0, ""},
+ {"OID_SGC_NETSCAPE", Var, 0, ""},
+ {"OLCUC", Const, 0, ""},
+ {"ONLCR", Const, 0, ""},
+ {"ONLRET", Const, 0, ""},
+ {"ONOCR", Const, 0, ""},
+ {"ONOEOT", Const, 1, ""},
+ {"OPEN_ALWAYS", Const, 0, ""},
+ {"OPEN_EXISTING", Const, 0, ""},
+ {"OPOST", Const, 0, ""},
+ {"O_ACCMODE", Const, 0, ""},
+ {"O_ALERT", Const, 0, ""},
+ {"O_ALT_IO", Const, 1, ""},
+ {"O_APPEND", Const, 0, ""},
+ {"O_ASYNC", Const, 0, ""},
+ {"O_CLOEXEC", Const, 0, ""},
+ {"O_CREAT", Const, 0, ""},
+ {"O_DIRECT", Const, 0, ""},
+ {"O_DIRECTORY", Const, 0, ""},
+ {"O_DP_GETRAWENCRYPTED", Const, 16, ""},
+ {"O_DSYNC", Const, 0, ""},
+ {"O_EVTONLY", Const, 0, ""},
+ {"O_EXCL", Const, 0, ""},
+ {"O_EXEC", Const, 0, ""},
+ {"O_EXLOCK", Const, 0, ""},
+ {"O_FSYNC", Const, 0, ""},
+ {"O_LARGEFILE", Const, 0, ""},
+ {"O_NDELAY", Const, 0, ""},
+ {"O_NOATIME", Const, 0, ""},
+ {"O_NOCTTY", Const, 0, ""},
+ {"O_NOFOLLOW", Const, 0, ""},
+ {"O_NONBLOCK", Const, 0, ""},
+ {"O_NOSIGPIPE", Const, 1, ""},
+ {"O_POPUP", Const, 0, ""},
+ {"O_RDONLY", Const, 0, ""},
+ {"O_RDWR", Const, 0, ""},
+ {"O_RSYNC", Const, 0, ""},
+ {"O_SHLOCK", Const, 0, ""},
+ {"O_SYMLINK", Const, 0, ""},
+ {"O_SYNC", Const, 0, ""},
+ {"O_TRUNC", Const, 0, ""},
+ {"O_TTY_INIT", Const, 0, ""},
+ {"O_WRONLY", Const, 0, ""},
+ {"Open", Func, 0, "func(path string, mode int, perm uint32) (fd int, err error)"},
+ {"OpenCurrentProcessToken", Func, 0, ""},
+ {"OpenProcess", Func, 0, ""},
+ {"OpenProcessToken", Func, 0, ""},
+ {"Openat", Func, 0, "func(dirfd int, path string, flags int, mode uint32) (fd int, err error)"},
+ {"Overlapped", Type, 0, ""},
+ {"Overlapped.HEvent", Field, 0, ""},
+ {"Overlapped.Internal", Field, 0, ""},
+ {"Overlapped.InternalHigh", Field, 0, ""},
+ {"Overlapped.Offset", Field, 0, ""},
+ {"Overlapped.OffsetHigh", Field, 0, ""},
+ {"PACKET_ADD_MEMBERSHIP", Const, 0, ""},
+ {"PACKET_BROADCAST", Const, 0, ""},
+ {"PACKET_DROP_MEMBERSHIP", Const, 0, ""},
+ {"PACKET_FASTROUTE", Const, 0, ""},
+ {"PACKET_HOST", Const, 0, ""},
+ {"PACKET_LOOPBACK", Const, 0, ""},
+ {"PACKET_MR_ALLMULTI", Const, 0, ""},
+ {"PACKET_MR_MULTICAST", Const, 0, ""},
+ {"PACKET_MR_PROMISC", Const, 0, ""},
+ {"PACKET_MULTICAST", Const, 0, ""},
+ {"PACKET_OTHERHOST", Const, 0, ""},
+ {"PACKET_OUTGOING", Const, 0, ""},
+ {"PACKET_RECV_OUTPUT", Const, 0, ""},
+ {"PACKET_RX_RING", Const, 0, ""},
+ {"PACKET_STATISTICS", Const, 0, ""},
+ {"PAGE_EXECUTE_READ", Const, 0, ""},
+ {"PAGE_EXECUTE_READWRITE", Const, 0, ""},
+ {"PAGE_EXECUTE_WRITECOPY", Const, 0, ""},
+ {"PAGE_READONLY", Const, 0, ""},
+ {"PAGE_READWRITE", Const, 0, ""},
+ {"PAGE_WRITECOPY", Const, 0, ""},
+ {"PARENB", Const, 0, ""},
+ {"PARMRK", Const, 0, ""},
+ {"PARODD", Const, 0, ""},
+ {"PENDIN", Const, 0, ""},
+ {"PFL_HIDDEN", Const, 2, ""},
+ {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2, ""},
+ {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2, ""},
+ {"PFL_NETWORKDIRECT_PROVIDER", Const, 2, ""},
+ {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2, ""},
+ {"PF_FLUSH", Const, 1, ""},
+ {"PKCS_7_ASN_ENCODING", Const, 0, ""},
+ {"PMC5_PIPELINE_FLUSH", Const, 1, ""},
+ {"PRIO_PGRP", Const, 2, ""},
+ {"PRIO_PROCESS", Const, 2, ""},
+ {"PRIO_USER", Const, 2, ""},
+ {"PRI_IOFLUSH", Const, 1, ""},
+ {"PROCESS_QUERY_INFORMATION", Const, 0, ""},
+ {"PROCESS_TERMINATE", Const, 2, ""},
+ {"PROT_EXEC", Const, 0, ""},
+ {"PROT_GROWSDOWN", Const, 0, ""},
+ {"PROT_GROWSUP", Const, 0, ""},
+ {"PROT_NONE", Const, 0, ""},
+ {"PROT_READ", Const, 0, ""},
+ {"PROT_WRITE", Const, 0, ""},
+ {"PROV_DH_SCHANNEL", Const, 0, ""},
+ {"PROV_DSS", Const, 0, ""},
+ {"PROV_DSS_DH", Const, 0, ""},
+ {"PROV_EC_ECDSA_FULL", Const, 0, ""},
+ {"PROV_EC_ECDSA_SIG", Const, 0, ""},
+ {"PROV_EC_ECNRA_FULL", Const, 0, ""},
+ {"PROV_EC_ECNRA_SIG", Const, 0, ""},
+ {"PROV_FORTEZZA", Const, 0, ""},
+ {"PROV_INTEL_SEC", Const, 0, ""},
+ {"PROV_MS_EXCHANGE", Const, 0, ""},
+ {"PROV_REPLACE_OWF", Const, 0, ""},
+ {"PROV_RNG", Const, 0, ""},
+ {"PROV_RSA_AES", Const, 0, ""},
+ {"PROV_RSA_FULL", Const, 0, ""},
+ {"PROV_RSA_SCHANNEL", Const, 0, ""},
+ {"PROV_RSA_SIG", Const, 0, ""},
+ {"PROV_SPYRUS_LYNKS", Const, 0, ""},
+ {"PROV_SSL", Const, 0, ""},
+ {"PR_CAPBSET_DROP", Const, 0, ""},
+ {"PR_CAPBSET_READ", Const, 0, ""},
+ {"PR_CLEAR_SECCOMP_FILTER", Const, 0, ""},
+ {"PR_ENDIAN_BIG", Const, 0, ""},
+ {"PR_ENDIAN_LITTLE", Const, 0, ""},
+ {"PR_ENDIAN_PPC_LITTLE", Const, 0, ""},
+ {"PR_FPEMU_NOPRINT", Const, 0, ""},
+ {"PR_FPEMU_SIGFPE", Const, 0, ""},
+ {"PR_FP_EXC_ASYNC", Const, 0, ""},
+ {"PR_FP_EXC_DISABLED", Const, 0, ""},
+ {"PR_FP_EXC_DIV", Const, 0, ""},
+ {"PR_FP_EXC_INV", Const, 0, ""},
+ {"PR_FP_EXC_NONRECOV", Const, 0, ""},
+ {"PR_FP_EXC_OVF", Const, 0, ""},
+ {"PR_FP_EXC_PRECISE", Const, 0, ""},
+ {"PR_FP_EXC_RES", Const, 0, ""},
+ {"PR_FP_EXC_SW_ENABLE", Const, 0, ""},
+ {"PR_FP_EXC_UND", Const, 0, ""},
+ {"PR_GET_DUMPABLE", Const, 0, ""},
+ {"PR_GET_ENDIAN", Const, 0, ""},
+ {"PR_GET_FPEMU", Const, 0, ""},
+ {"PR_GET_FPEXC", Const, 0, ""},
+ {"PR_GET_KEEPCAPS", Const, 0, ""},
+ {"PR_GET_NAME", Const, 0, ""},
+ {"PR_GET_PDEATHSIG", Const, 0, ""},
+ {"PR_GET_SECCOMP", Const, 0, ""},
+ {"PR_GET_SECCOMP_FILTER", Const, 0, ""},
+ {"PR_GET_SECUREBITS", Const, 0, ""},
+ {"PR_GET_TIMERSLACK", Const, 0, ""},
+ {"PR_GET_TIMING", Const, 0, ""},
+ {"PR_GET_TSC", Const, 0, ""},
+ {"PR_GET_UNALIGN", Const, 0, ""},
+ {"PR_MCE_KILL", Const, 0, ""},
+ {"PR_MCE_KILL_CLEAR", Const, 0, ""},
+ {"PR_MCE_KILL_DEFAULT", Const, 0, ""},
+ {"PR_MCE_KILL_EARLY", Const, 0, ""},
+ {"PR_MCE_KILL_GET", Const, 0, ""},
+ {"PR_MCE_KILL_LATE", Const, 0, ""},
+ {"PR_MCE_KILL_SET", Const, 0, ""},
+ {"PR_SECCOMP_FILTER_EVENT", Const, 0, ""},
+ {"PR_SECCOMP_FILTER_SYSCALL", Const, 0, ""},
+ {"PR_SET_DUMPABLE", Const, 0, ""},
+ {"PR_SET_ENDIAN", Const, 0, ""},
+ {"PR_SET_FPEMU", Const, 0, ""},
+ {"PR_SET_FPEXC", Const, 0, ""},
+ {"PR_SET_KEEPCAPS", Const, 0, ""},
+ {"PR_SET_NAME", Const, 0, ""},
+ {"PR_SET_PDEATHSIG", Const, 0, ""},
+ {"PR_SET_PTRACER", Const, 0, ""},
+ {"PR_SET_SECCOMP", Const, 0, ""},
+ {"PR_SET_SECCOMP_FILTER", Const, 0, ""},
+ {"PR_SET_SECUREBITS", Const, 0, ""},
+ {"PR_SET_TIMERSLACK", Const, 0, ""},
+ {"PR_SET_TIMING", Const, 0, ""},
+ {"PR_SET_TSC", Const, 0, ""},
+ {"PR_SET_UNALIGN", Const, 0, ""},
+ {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0, ""},
+ {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0, ""},
+ {"PR_TIMING_STATISTICAL", Const, 0, ""},
+ {"PR_TIMING_TIMESTAMP", Const, 0, ""},
+ {"PR_TSC_ENABLE", Const, 0, ""},
+ {"PR_TSC_SIGSEGV", Const, 0, ""},
+ {"PR_UNALIGN_NOPRINT", Const, 0, ""},
+ {"PR_UNALIGN_SIGBUS", Const, 0, ""},
+ {"PTRACE_ARCH_PRCTL", Const, 0, ""},
+ {"PTRACE_ATTACH", Const, 0, ""},
+ {"PTRACE_CONT", Const, 0, ""},
+ {"PTRACE_DETACH", Const, 0, ""},
+ {"PTRACE_EVENT_CLONE", Const, 0, ""},
+ {"PTRACE_EVENT_EXEC", Const, 0, ""},
+ {"PTRACE_EVENT_EXIT", Const, 0, ""},
+ {"PTRACE_EVENT_FORK", Const, 0, ""},
+ {"PTRACE_EVENT_VFORK", Const, 0, ""},
+ {"PTRACE_EVENT_VFORK_DONE", Const, 0, ""},
+ {"PTRACE_GETCRUNCHREGS", Const, 0, ""},
+ {"PTRACE_GETEVENTMSG", Const, 0, ""},
+ {"PTRACE_GETFPREGS", Const, 0, ""},
+ {"PTRACE_GETFPXREGS", Const, 0, ""},
+ {"PTRACE_GETHBPREGS", Const, 0, ""},
+ {"PTRACE_GETREGS", Const, 0, ""},
+ {"PTRACE_GETREGSET", Const, 0, ""},
+ {"PTRACE_GETSIGINFO", Const, 0, ""},
+ {"PTRACE_GETVFPREGS", Const, 0, ""},
+ {"PTRACE_GETWMMXREGS", Const, 0, ""},
+ {"PTRACE_GET_THREAD_AREA", Const, 0, ""},
+ {"PTRACE_KILL", Const, 0, ""},
+ {"PTRACE_OLDSETOPTIONS", Const, 0, ""},
+ {"PTRACE_O_MASK", Const, 0, ""},
+ {"PTRACE_O_TRACECLONE", Const, 0, ""},
+ {"PTRACE_O_TRACEEXEC", Const, 0, ""},
+ {"PTRACE_O_TRACEEXIT", Const, 0, ""},
+ {"PTRACE_O_TRACEFORK", Const, 0, ""},
+ {"PTRACE_O_TRACESYSGOOD", Const, 0, ""},
+ {"PTRACE_O_TRACEVFORK", Const, 0, ""},
+ {"PTRACE_O_TRACEVFORKDONE", Const, 0, ""},
+ {"PTRACE_PEEKDATA", Const, 0, ""},
+ {"PTRACE_PEEKTEXT", Const, 0, ""},
+ {"PTRACE_PEEKUSR", Const, 0, ""},
+ {"PTRACE_POKEDATA", Const, 0, ""},
+ {"PTRACE_POKETEXT", Const, 0, ""},
+ {"PTRACE_POKEUSR", Const, 0, ""},
+ {"PTRACE_SETCRUNCHREGS", Const, 0, ""},
+ {"PTRACE_SETFPREGS", Const, 0, ""},
+ {"PTRACE_SETFPXREGS", Const, 0, ""},
+ {"PTRACE_SETHBPREGS", Const, 0, ""},
+ {"PTRACE_SETOPTIONS", Const, 0, ""},
+ {"PTRACE_SETREGS", Const, 0, ""},
+ {"PTRACE_SETREGSET", Const, 0, ""},
+ {"PTRACE_SETSIGINFO", Const, 0, ""},
+ {"PTRACE_SETVFPREGS", Const, 0, ""},
+ {"PTRACE_SETWMMXREGS", Const, 0, ""},
+ {"PTRACE_SET_SYSCALL", Const, 0, ""},
+ {"PTRACE_SET_THREAD_AREA", Const, 0, ""},
+ {"PTRACE_SINGLEBLOCK", Const, 0, ""},
+ {"PTRACE_SINGLESTEP", Const, 0, ""},
+ {"PTRACE_SYSCALL", Const, 0, ""},
+ {"PTRACE_SYSEMU", Const, 0, ""},
+ {"PTRACE_SYSEMU_SINGLESTEP", Const, 0, ""},
+ {"PTRACE_TRACEME", Const, 0, ""},
+ {"PT_ATTACH", Const, 0, ""},
+ {"PT_ATTACHEXC", Const, 0, ""},
+ {"PT_CONTINUE", Const, 0, ""},
+ {"PT_DATA_ADDR", Const, 0, ""},
+ {"PT_DENY_ATTACH", Const, 0, ""},
+ {"PT_DETACH", Const, 0, ""},
+ {"PT_FIRSTMACH", Const, 0, ""},
+ {"PT_FORCEQUOTA", Const, 0, ""},
+ {"PT_KILL", Const, 0, ""},
+ {"PT_MASK", Const, 1, ""},
+ {"PT_READ_D", Const, 0, ""},
+ {"PT_READ_I", Const, 0, ""},
+ {"PT_READ_U", Const, 0, ""},
+ {"PT_SIGEXC", Const, 0, ""},
+ {"PT_STEP", Const, 0, ""},
+ {"PT_TEXT_ADDR", Const, 0, ""},
+ {"PT_TEXT_END_ADDR", Const, 0, ""},
+ {"PT_THUPDATE", Const, 0, ""},
+ {"PT_TRACE_ME", Const, 0, ""},
+ {"PT_WRITE_D", Const, 0, ""},
+ {"PT_WRITE_I", Const, 0, ""},
+ {"PT_WRITE_U", Const, 0, ""},
+ {"ParseDirent", Func, 0, "func(buf []byte, max int, names []string) (consumed int, count int, newnames []string)"},
+ {"ParseNetlinkMessage", Func, 0, "func(b []byte) ([]NetlinkMessage, error)"},
+ {"ParseNetlinkRouteAttr", Func, 0, "func(m *NetlinkMessage) ([]NetlinkRouteAttr, error)"},
+ {"ParseRoutingMessage", Func, 0, ""},
+ {"ParseRoutingSockaddr", Func, 0, ""},
+ {"ParseSocketControlMessage", Func, 0, "func(b []byte) ([]SocketControlMessage, error)"},
+ {"ParseUnixCredentials", Func, 0, "func(m *SocketControlMessage) (*Ucred, error)"},
+ {"ParseUnixRights", Func, 0, "func(m *SocketControlMessage) ([]int, error)"},
+ {"PathMax", Const, 0, ""},
+ {"Pathconf", Func, 0, ""},
+ {"Pause", Func, 0, "func() (err error)"},
+ {"Pipe", Func, 0, "func(p []int) error"},
+ {"Pipe2", Func, 1, "func(p []int, flags int) error"},
+ {"PivotRoot", Func, 0, "func(newroot string, putold string) (err error)"},
+ {"Pointer", Type, 11, ""},
+ {"PostQueuedCompletionStatus", Func, 0, ""},
+ {"Pread", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
+ {"Proc", Type, 0, ""},
+ {"Proc.Dll", Field, 0, ""},
+ {"Proc.Name", Field, 0, ""},
+ {"ProcAttr", Type, 0, ""},
+ {"ProcAttr.Dir", Field, 0, ""},
+ {"ProcAttr.Env", Field, 0, ""},
+ {"ProcAttr.Files", Field, 0, ""},
+ {"ProcAttr.Sys", Field, 0, ""},
+ {"Process32First", Func, 4, ""},
+ {"Process32Next", Func, 4, ""},
+ {"ProcessEntry32", Type, 4, ""},
+ {"ProcessEntry32.DefaultHeapID", Field, 4, ""},
+ {"ProcessEntry32.ExeFile", Field, 4, ""},
+ {"ProcessEntry32.Flags", Field, 4, ""},
+ {"ProcessEntry32.ModuleID", Field, 4, ""},
+ {"ProcessEntry32.ParentProcessID", Field, 4, ""},
+ {"ProcessEntry32.PriClassBase", Field, 4, ""},
+ {"ProcessEntry32.ProcessID", Field, 4, ""},
+ {"ProcessEntry32.Size", Field, 4, ""},
+ {"ProcessEntry32.Threads", Field, 4, ""},
+ {"ProcessEntry32.Usage", Field, 4, ""},
+ {"ProcessInformation", Type, 0, ""},
+ {"ProcessInformation.Process", Field, 0, ""},
+ {"ProcessInformation.ProcessId", Field, 0, ""},
+ {"ProcessInformation.Thread", Field, 0, ""},
+ {"ProcessInformation.ThreadId", Field, 0, ""},
+ {"Protoent", Type, 0, ""},
+ {"Protoent.Aliases", Field, 0, ""},
+ {"Protoent.Name", Field, 0, ""},
+ {"Protoent.Proto", Field, 0, ""},
+ {"PtraceAttach", Func, 0, "func(pid int) (err error)"},
+ {"PtraceCont", Func, 0, "func(pid int, signal int) (err error)"},
+ {"PtraceDetach", Func, 0, "func(pid int) (err error)"},
+ {"PtraceGetEventMsg", Func, 0, "func(pid int) (msg uint, err error)"},
+ {"PtraceGetRegs", Func, 0, "func(pid int, regsout *PtraceRegs) (err error)"},
+ {"PtracePeekData", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
+ {"PtracePeekText", Func, 0, "func(pid int, addr uintptr, out []byte) (count int, err error)"},
+ {"PtracePokeData", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
+ {"PtracePokeText", Func, 0, "func(pid int, addr uintptr, data []byte) (count int, err error)"},
+ {"PtraceRegs", Type, 0, ""},
+ {"PtraceRegs.Cs", Field, 0, ""},
+ {"PtraceRegs.Ds", Field, 0, ""},
+ {"PtraceRegs.Eax", Field, 0, ""},
+ {"PtraceRegs.Ebp", Field, 0, ""},
+ {"PtraceRegs.Ebx", Field, 0, ""},
+ {"PtraceRegs.Ecx", Field, 0, ""},
+ {"PtraceRegs.Edi", Field, 0, ""},
+ {"PtraceRegs.Edx", Field, 0, ""},
+ {"PtraceRegs.Eflags", Field, 0, ""},
+ {"PtraceRegs.Eip", Field, 0, ""},
+ {"PtraceRegs.Es", Field, 0, ""},
+ {"PtraceRegs.Esi", Field, 0, ""},
+ {"PtraceRegs.Esp", Field, 0, ""},
+ {"PtraceRegs.Fs", Field, 0, ""},
+ {"PtraceRegs.Fs_base", Field, 0, ""},
+ {"PtraceRegs.Gs", Field, 0, ""},
+ {"PtraceRegs.Gs_base", Field, 0, ""},
+ {"PtraceRegs.Orig_eax", Field, 0, ""},
+ {"PtraceRegs.Orig_rax", Field, 0, ""},
+ {"PtraceRegs.R10", Field, 0, ""},
+ {"PtraceRegs.R11", Field, 0, ""},
+ {"PtraceRegs.R12", Field, 0, ""},
+ {"PtraceRegs.R13", Field, 0, ""},
+ {"PtraceRegs.R14", Field, 0, ""},
+ {"PtraceRegs.R15", Field, 0, ""},
+ {"PtraceRegs.R8", Field, 0, ""},
+ {"PtraceRegs.R9", Field, 0, ""},
+ {"PtraceRegs.Rax", Field, 0, ""},
+ {"PtraceRegs.Rbp", Field, 0, ""},
+ {"PtraceRegs.Rbx", Field, 0, ""},
+ {"PtraceRegs.Rcx", Field, 0, ""},
+ {"PtraceRegs.Rdi", Field, 0, ""},
+ {"PtraceRegs.Rdx", Field, 0, ""},
+ {"PtraceRegs.Rip", Field, 0, ""},
+ {"PtraceRegs.Rsi", Field, 0, ""},
+ {"PtraceRegs.Rsp", Field, 0, ""},
+ {"PtraceRegs.Ss", Field, 0, ""},
+ {"PtraceRegs.Uregs", Field, 0, ""},
+ {"PtraceRegs.Xcs", Field, 0, ""},
+ {"PtraceRegs.Xds", Field, 0, ""},
+ {"PtraceRegs.Xes", Field, 0, ""},
+ {"PtraceRegs.Xfs", Field, 0, ""},
+ {"PtraceRegs.Xgs", Field, 0, ""},
+ {"PtraceRegs.Xss", Field, 0, ""},
+ {"PtraceSetOptions", Func, 0, "func(pid int, options int) (err error)"},
+ {"PtraceSetRegs", Func, 0, "func(pid int, regs *PtraceRegs) (err error)"},
+ {"PtraceSingleStep", Func, 0, "func(pid int) (err error)"},
+ {"PtraceSyscall", Func, 1, "func(pid int, signal int) (err error)"},
+ {"Pwrite", Func, 0, "func(fd int, p []byte, offset int64) (n int, err error)"},
+ {"REG_BINARY", Const, 0, ""},
+ {"REG_DWORD", Const, 0, ""},
+ {"REG_DWORD_BIG_ENDIAN", Const, 0, ""},
+ {"REG_DWORD_LITTLE_ENDIAN", Const, 0, ""},
+ {"REG_EXPAND_SZ", Const, 0, ""},
+ {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0, ""},
+ {"REG_LINK", Const, 0, ""},
+ {"REG_MULTI_SZ", Const, 0, ""},
+ {"REG_NONE", Const, 0, ""},
+ {"REG_QWORD", Const, 0, ""},
+ {"REG_QWORD_LITTLE_ENDIAN", Const, 0, ""},
+ {"REG_RESOURCE_LIST", Const, 0, ""},
+ {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0, ""},
+ {"REG_SZ", Const, 0, ""},
+ {"RLIMIT_AS", Const, 0, ""},
+ {"RLIMIT_CORE", Const, 0, ""},
+ {"RLIMIT_CPU", Const, 0, ""},
+ {"RLIMIT_CPU_USAGE_MONITOR", Const, 16, ""},
+ {"RLIMIT_DATA", Const, 0, ""},
+ {"RLIMIT_FSIZE", Const, 0, ""},
+ {"RLIMIT_NOFILE", Const, 0, ""},
+ {"RLIMIT_STACK", Const, 0, ""},
+ {"RLIM_INFINITY", Const, 0, ""},
+ {"RTAX_ADVMSS", Const, 0, ""},
+ {"RTAX_AUTHOR", Const, 0, ""},
+ {"RTAX_BRD", Const, 0, ""},
+ {"RTAX_CWND", Const, 0, ""},
+ {"RTAX_DST", Const, 0, ""},
+ {"RTAX_FEATURES", Const, 0, ""},
+ {"RTAX_FEATURE_ALLFRAG", Const, 0, ""},
+ {"RTAX_FEATURE_ECN", Const, 0, ""},
+ {"RTAX_FEATURE_SACK", Const, 0, ""},
+ {"RTAX_FEATURE_TIMESTAMP", Const, 0, ""},
+ {"RTAX_GATEWAY", Const, 0, ""},
+ {"RTAX_GENMASK", Const, 0, ""},
+ {"RTAX_HOPLIMIT", Const, 0, ""},
+ {"RTAX_IFA", Const, 0, ""},
+ {"RTAX_IFP", Const, 0, ""},
+ {"RTAX_INITCWND", Const, 0, ""},
+ {"RTAX_INITRWND", Const, 0, ""},
+ {"RTAX_LABEL", Const, 1, ""},
+ {"RTAX_LOCK", Const, 0, ""},
+ {"RTAX_MAX", Const, 0, ""},
+ {"RTAX_MTU", Const, 0, ""},
+ {"RTAX_NETMASK", Const, 0, ""},
+ {"RTAX_REORDERING", Const, 0, ""},
+ {"RTAX_RTO_MIN", Const, 0, ""},
+ {"RTAX_RTT", Const, 0, ""},
+ {"RTAX_RTTVAR", Const, 0, ""},
+ {"RTAX_SRC", Const, 1, ""},
+ {"RTAX_SRCMASK", Const, 1, ""},
+ {"RTAX_SSTHRESH", Const, 0, ""},
+ {"RTAX_TAG", Const, 1, ""},
+ {"RTAX_UNSPEC", Const, 0, ""},
+ {"RTAX_WINDOW", Const, 0, ""},
+ {"RTA_ALIGNTO", Const, 0, ""},
+ {"RTA_AUTHOR", Const, 0, ""},
+ {"RTA_BRD", Const, 0, ""},
+ {"RTA_CACHEINFO", Const, 0, ""},
+ {"RTA_DST", Const, 0, ""},
+ {"RTA_FLOW", Const, 0, ""},
+ {"RTA_GATEWAY", Const, 0, ""},
+ {"RTA_GENMASK", Const, 0, ""},
+ {"RTA_IFA", Const, 0, ""},
+ {"RTA_IFP", Const, 0, ""},
+ {"RTA_IIF", Const, 0, ""},
+ {"RTA_LABEL", Const, 1, ""},
+ {"RTA_MAX", Const, 0, ""},
+ {"RTA_METRICS", Const, 0, ""},
+ {"RTA_MULTIPATH", Const, 0, ""},
+ {"RTA_NETMASK", Const, 0, ""},
+ {"RTA_OIF", Const, 0, ""},
+ {"RTA_PREFSRC", Const, 0, ""},
+ {"RTA_PRIORITY", Const, 0, ""},
+ {"RTA_SRC", Const, 0, ""},
+ {"RTA_SRCMASK", Const, 1, ""},
+ {"RTA_TABLE", Const, 0, ""},
+ {"RTA_TAG", Const, 1, ""},
+ {"RTA_UNSPEC", Const, 0, ""},
+ {"RTCF_DIRECTSRC", Const, 0, ""},
+ {"RTCF_DOREDIRECT", Const, 0, ""},
+ {"RTCF_LOG", Const, 0, ""},
+ {"RTCF_MASQ", Const, 0, ""},
+ {"RTCF_NAT", Const, 0, ""},
+ {"RTCF_VALVE", Const, 0, ""},
+ {"RTF_ADDRCLASSMASK", Const, 0, ""},
+ {"RTF_ADDRCONF", Const, 0, ""},
+ {"RTF_ALLONLINK", Const, 0, ""},
+ {"RTF_ANNOUNCE", Const, 1, ""},
+ {"RTF_BLACKHOLE", Const, 0, ""},
+ {"RTF_BROADCAST", Const, 0, ""},
+ {"RTF_CACHE", Const, 0, ""},
+ {"RTF_CLONED", Const, 1, ""},
+ {"RTF_CLONING", Const, 0, ""},
+ {"RTF_CONDEMNED", Const, 0, ""},
+ {"RTF_DEFAULT", Const, 0, ""},
+ {"RTF_DELCLONE", Const, 0, ""},
+ {"RTF_DONE", Const, 0, ""},
+ {"RTF_DYNAMIC", Const, 0, ""},
+ {"RTF_FLOW", Const, 0, ""},
+ {"RTF_FMASK", Const, 0, ""},
+ {"RTF_GATEWAY", Const, 0, ""},
+ {"RTF_GWFLAG_COMPAT", Const, 3, ""},
+ {"RTF_HOST", Const, 0, ""},
+ {"RTF_IFREF", Const, 0, ""},
+ {"RTF_IFSCOPE", Const, 0, ""},
+ {"RTF_INTERFACE", Const, 0, ""},
+ {"RTF_IRTT", Const, 0, ""},
+ {"RTF_LINKRT", Const, 0, ""},
+ {"RTF_LLDATA", Const, 0, ""},
+ {"RTF_LLINFO", Const, 0, ""},
+ {"RTF_LOCAL", Const, 0, ""},
+ {"RTF_MASK", Const, 1, ""},
+ {"RTF_MODIFIED", Const, 0, ""},
+ {"RTF_MPATH", Const, 1, ""},
+ {"RTF_MPLS", Const, 1, ""},
+ {"RTF_MSS", Const, 0, ""},
+ {"RTF_MTU", Const, 0, ""},
+ {"RTF_MULTICAST", Const, 0, ""},
+ {"RTF_NAT", Const, 0, ""},
+ {"RTF_NOFORWARD", Const, 0, ""},
+ {"RTF_NONEXTHOP", Const, 0, ""},
+ {"RTF_NOPMTUDISC", Const, 0, ""},
+ {"RTF_PERMANENT_ARP", Const, 1, ""},
+ {"RTF_PINNED", Const, 0, ""},
+ {"RTF_POLICY", Const, 0, ""},
+ {"RTF_PRCLONING", Const, 0, ""},
+ {"RTF_PROTO1", Const, 0, ""},
+ {"RTF_PROTO2", Const, 0, ""},
+ {"RTF_PROTO3", Const, 0, ""},
+ {"RTF_PROXY", Const, 16, ""},
+ {"RTF_REINSTATE", Const, 0, ""},
+ {"RTF_REJECT", Const, 0, ""},
+ {"RTF_RNH_LOCKED", Const, 0, ""},
+ {"RTF_ROUTER", Const, 16, ""},
+ {"RTF_SOURCE", Const, 1, ""},
+ {"RTF_SRC", Const, 1, ""},
+ {"RTF_STATIC", Const, 0, ""},
+ {"RTF_STICKY", Const, 0, ""},
+ {"RTF_THROW", Const, 0, ""},
+ {"RTF_TUNNEL", Const, 1, ""},
+ {"RTF_UP", Const, 0, ""},
+ {"RTF_USETRAILERS", Const, 1, ""},
+ {"RTF_WASCLONED", Const, 0, ""},
+ {"RTF_WINDOW", Const, 0, ""},
+ {"RTF_XRESOLVE", Const, 0, ""},
+ {"RTM_ADD", Const, 0, ""},
+ {"RTM_BASE", Const, 0, ""},
+ {"RTM_CHANGE", Const, 0, ""},
+ {"RTM_CHGADDR", Const, 1, ""},
+ {"RTM_DELACTION", Const, 0, ""},
+ {"RTM_DELADDR", Const, 0, ""},
+ {"RTM_DELADDRLABEL", Const, 0, ""},
+ {"RTM_DELETE", Const, 0, ""},
+ {"RTM_DELLINK", Const, 0, ""},
+ {"RTM_DELMADDR", Const, 0, ""},
+ {"RTM_DELNEIGH", Const, 0, ""},
+ {"RTM_DELQDISC", Const, 0, ""},
+ {"RTM_DELROUTE", Const, 0, ""},
+ {"RTM_DELRULE", Const, 0, ""},
+ {"RTM_DELTCLASS", Const, 0, ""},
+ {"RTM_DELTFILTER", Const, 0, ""},
+ {"RTM_DESYNC", Const, 1, ""},
+ {"RTM_F_CLONED", Const, 0, ""},
+ {"RTM_F_EQUALIZE", Const, 0, ""},
+ {"RTM_F_NOTIFY", Const, 0, ""},
+ {"RTM_F_PREFIX", Const, 0, ""},
+ {"RTM_GET", Const, 0, ""},
+ {"RTM_GET2", Const, 0, ""},
+ {"RTM_GETACTION", Const, 0, ""},
+ {"RTM_GETADDR", Const, 0, ""},
+ {"RTM_GETADDRLABEL", Const, 0, ""},
+ {"RTM_GETANYCAST", Const, 0, ""},
+ {"RTM_GETDCB", Const, 0, ""},
+ {"RTM_GETLINK", Const, 0, ""},
+ {"RTM_GETMULTICAST", Const, 0, ""},
+ {"RTM_GETNEIGH", Const, 0, ""},
+ {"RTM_GETNEIGHTBL", Const, 0, ""},
+ {"RTM_GETQDISC", Const, 0, ""},
+ {"RTM_GETROUTE", Const, 0, ""},
+ {"RTM_GETRULE", Const, 0, ""},
+ {"RTM_GETTCLASS", Const, 0, ""},
+ {"RTM_GETTFILTER", Const, 0, ""},
+ {"RTM_IEEE80211", Const, 0, ""},
+ {"RTM_IFANNOUNCE", Const, 0, ""},
+ {"RTM_IFINFO", Const, 0, ""},
+ {"RTM_IFINFO2", Const, 0, ""},
+ {"RTM_LLINFO_UPD", Const, 1, ""},
+ {"RTM_LOCK", Const, 0, ""},
+ {"RTM_LOSING", Const, 0, ""},
+ {"RTM_MAX", Const, 0, ""},
+ {"RTM_MAXSIZE", Const, 1, ""},
+ {"RTM_MISS", Const, 0, ""},
+ {"RTM_NEWACTION", Const, 0, ""},
+ {"RTM_NEWADDR", Const, 0, ""},
+ {"RTM_NEWADDRLABEL", Const, 0, ""},
+ {"RTM_NEWLINK", Const, 0, ""},
+ {"RTM_NEWMADDR", Const, 0, ""},
+ {"RTM_NEWMADDR2", Const, 0, ""},
+ {"RTM_NEWNDUSEROPT", Const, 0, ""},
+ {"RTM_NEWNEIGH", Const, 0, ""},
+ {"RTM_NEWNEIGHTBL", Const, 0, ""},
+ {"RTM_NEWPREFIX", Const, 0, ""},
+ {"RTM_NEWQDISC", Const, 0, ""},
+ {"RTM_NEWROUTE", Const, 0, ""},
+ {"RTM_NEWRULE", Const, 0, ""},
+ {"RTM_NEWTCLASS", Const, 0, ""},
+ {"RTM_NEWTFILTER", Const, 0, ""},
+ {"RTM_NR_FAMILIES", Const, 0, ""},
+ {"RTM_NR_MSGTYPES", Const, 0, ""},
+ {"RTM_OIFINFO", Const, 1, ""},
+ {"RTM_OLDADD", Const, 0, ""},
+ {"RTM_OLDDEL", Const, 0, ""},
+ {"RTM_OOIFINFO", Const, 1, ""},
+ {"RTM_REDIRECT", Const, 0, ""},
+ {"RTM_RESOLVE", Const, 0, ""},
+ {"RTM_RTTUNIT", Const, 0, ""},
+ {"RTM_SETDCB", Const, 0, ""},
+ {"RTM_SETGATE", Const, 1, ""},
+ {"RTM_SETLINK", Const, 0, ""},
+ {"RTM_SETNEIGHTBL", Const, 0, ""},
+ {"RTM_VERSION", Const, 0, ""},
+ {"RTNH_ALIGNTO", Const, 0, ""},
+ {"RTNH_F_DEAD", Const, 0, ""},
+ {"RTNH_F_ONLINK", Const, 0, ""},
+ {"RTNH_F_PERVASIVE", Const, 0, ""},
+ {"RTNLGRP_IPV4_IFADDR", Const, 1, ""},
+ {"RTNLGRP_IPV4_MROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV4_ROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV4_RULE", Const, 1, ""},
+ {"RTNLGRP_IPV6_IFADDR", Const, 1, ""},
+ {"RTNLGRP_IPV6_IFINFO", Const, 1, ""},
+ {"RTNLGRP_IPV6_MROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV6_PREFIX", Const, 1, ""},
+ {"RTNLGRP_IPV6_ROUTE", Const, 1, ""},
+ {"RTNLGRP_IPV6_RULE", Const, 1, ""},
+ {"RTNLGRP_LINK", Const, 1, ""},
+ {"RTNLGRP_ND_USEROPT", Const, 1, ""},
+ {"RTNLGRP_NEIGH", Const, 1, ""},
+ {"RTNLGRP_NONE", Const, 1, ""},
+ {"RTNLGRP_NOTIFY", Const, 1, ""},
+ {"RTNLGRP_TC", Const, 1, ""},
+ {"RTN_ANYCAST", Const, 0, ""},
+ {"RTN_BLACKHOLE", Const, 0, ""},
+ {"RTN_BROADCAST", Const, 0, ""},
+ {"RTN_LOCAL", Const, 0, ""},
+ {"RTN_MAX", Const, 0, ""},
+ {"RTN_MULTICAST", Const, 0, ""},
+ {"RTN_NAT", Const, 0, ""},
+ {"RTN_PROHIBIT", Const, 0, ""},
+ {"RTN_THROW", Const, 0, ""},
+ {"RTN_UNICAST", Const, 0, ""},
+ {"RTN_UNREACHABLE", Const, 0, ""},
+ {"RTN_UNSPEC", Const, 0, ""},
+ {"RTN_XRESOLVE", Const, 0, ""},
+ {"RTPROT_BIRD", Const, 0, ""},
+ {"RTPROT_BOOT", Const, 0, ""},
+ {"RTPROT_DHCP", Const, 0, ""},
+ {"RTPROT_DNROUTED", Const, 0, ""},
+ {"RTPROT_GATED", Const, 0, ""},
+ {"RTPROT_KERNEL", Const, 0, ""},
+ {"RTPROT_MRT", Const, 0, ""},
+ {"RTPROT_NTK", Const, 0, ""},
+ {"RTPROT_RA", Const, 0, ""},
+ {"RTPROT_REDIRECT", Const, 0, ""},
+ {"RTPROT_STATIC", Const, 0, ""},
+ {"RTPROT_UNSPEC", Const, 0, ""},
+ {"RTPROT_XORP", Const, 0, ""},
+ {"RTPROT_ZEBRA", Const, 0, ""},
+ {"RTV_EXPIRE", Const, 0, ""},
+ {"RTV_HOPCOUNT", Const, 0, ""},
+ {"RTV_MTU", Const, 0, ""},
+ {"RTV_RPIPE", Const, 0, ""},
+ {"RTV_RTT", Const, 0, ""},
+ {"RTV_RTTVAR", Const, 0, ""},
+ {"RTV_SPIPE", Const, 0, ""},
+ {"RTV_SSTHRESH", Const, 0, ""},
+ {"RTV_WEIGHT", Const, 0, ""},
+ {"RT_CACHING_CONTEXT", Const, 1, ""},
+ {"RT_CLASS_DEFAULT", Const, 0, ""},
+ {"RT_CLASS_LOCAL", Const, 0, ""},
+ {"RT_CLASS_MAIN", Const, 0, ""},
+ {"RT_CLASS_MAX", Const, 0, ""},
+ {"RT_CLASS_UNSPEC", Const, 0, ""},
+ {"RT_DEFAULT_FIB", Const, 1, ""},
+ {"RT_NORTREF", Const, 1, ""},
+ {"RT_SCOPE_HOST", Const, 0, ""},
+ {"RT_SCOPE_LINK", Const, 0, ""},
+ {"RT_SCOPE_NOWHERE", Const, 0, ""},
+ {"RT_SCOPE_SITE", Const, 0, ""},
+ {"RT_SCOPE_UNIVERSE", Const, 0, ""},
+ {"RT_TABLEID_MAX", Const, 1, ""},
+ {"RT_TABLE_COMPAT", Const, 0, ""},
+ {"RT_TABLE_DEFAULT", Const, 0, ""},
+ {"RT_TABLE_LOCAL", Const, 0, ""},
+ {"RT_TABLE_MAIN", Const, 0, ""},
+ {"RT_TABLE_MAX", Const, 0, ""},
+ {"RT_TABLE_UNSPEC", Const, 0, ""},
+ {"RUSAGE_CHILDREN", Const, 0, ""},
+ {"RUSAGE_SELF", Const, 0, ""},
+ {"RUSAGE_THREAD", Const, 0, ""},
+ {"Radvisory_t", Type, 0, ""},
+ {"Radvisory_t.Count", Field, 0, ""},
+ {"Radvisory_t.Offset", Field, 0, ""},
+ {"Radvisory_t.Pad_cgo_0", Field, 0, ""},
+ {"RawConn", Type, 9, ""},
+ {"RawSockaddr", Type, 0, ""},
+ {"RawSockaddr.Data", Field, 0, ""},
+ {"RawSockaddr.Family", Field, 0, ""},
+ {"RawSockaddr.Len", Field, 0, ""},
+ {"RawSockaddrAny", Type, 0, ""},
+ {"RawSockaddrAny.Addr", Field, 0, ""},
+ {"RawSockaddrAny.Pad", Field, 0, ""},
+ {"RawSockaddrDatalink", Type, 0, ""},
+ {"RawSockaddrDatalink.Alen", Field, 0, ""},
+ {"RawSockaddrDatalink.Data", Field, 0, ""},
+ {"RawSockaddrDatalink.Family", Field, 0, ""},
+ {"RawSockaddrDatalink.Index", Field, 0, ""},
+ {"RawSockaddrDatalink.Len", Field, 0, ""},
+ {"RawSockaddrDatalink.Nlen", Field, 0, ""},
+ {"RawSockaddrDatalink.Pad_cgo_0", Field, 2, ""},
+ {"RawSockaddrDatalink.Slen", Field, 0, ""},
+ {"RawSockaddrDatalink.Type", Field, 0, ""},
+ {"RawSockaddrInet4", Type, 0, ""},
+ {"RawSockaddrInet4.Addr", Field, 0, ""},
+ {"RawSockaddrInet4.Family", Field, 0, ""},
+ {"RawSockaddrInet4.Len", Field, 0, ""},
+ {"RawSockaddrInet4.Port", Field, 0, ""},
+ {"RawSockaddrInet4.Zero", Field, 0, ""},
+ {"RawSockaddrInet6", Type, 0, ""},
+ {"RawSockaddrInet6.Addr", Field, 0, ""},
+ {"RawSockaddrInet6.Family", Field, 0, ""},
+ {"RawSockaddrInet6.Flowinfo", Field, 0, ""},
+ {"RawSockaddrInet6.Len", Field, 0, ""},
+ {"RawSockaddrInet6.Port", Field, 0, ""},
+ {"RawSockaddrInet6.Scope_id", Field, 0, ""},
+ {"RawSockaddrLinklayer", Type, 0, ""},
+ {"RawSockaddrLinklayer.Addr", Field, 0, ""},
+ {"RawSockaddrLinklayer.Family", Field, 0, ""},
+ {"RawSockaddrLinklayer.Halen", Field, 0, ""},
+ {"RawSockaddrLinklayer.Hatype", Field, 0, ""},
+ {"RawSockaddrLinklayer.Ifindex", Field, 0, ""},
+ {"RawSockaddrLinklayer.Pkttype", Field, 0, ""},
+ {"RawSockaddrLinklayer.Protocol", Field, 0, ""},
+ {"RawSockaddrNetlink", Type, 0, ""},
+ {"RawSockaddrNetlink.Family", Field, 0, ""},
+ {"RawSockaddrNetlink.Groups", Field, 0, ""},
+ {"RawSockaddrNetlink.Pad", Field, 0, ""},
+ {"RawSockaddrNetlink.Pid", Field, 0, ""},
+ {"RawSockaddrUnix", Type, 0, ""},
+ {"RawSockaddrUnix.Family", Field, 0, ""},
+ {"RawSockaddrUnix.Len", Field, 0, ""},
+ {"RawSockaddrUnix.Pad_cgo_0", Field, 2, ""},
+ {"RawSockaddrUnix.Path", Field, 0, ""},
+ {"RawSyscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"RawSyscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"Read", Func, 0, "func(fd int, p []byte) (n int, err error)"},
+ {"ReadConsole", Func, 1, ""},
+ {"ReadDirectoryChanges", Func, 0, ""},
+ {"ReadDirent", Func, 0, "func(fd int, buf []byte) (n int, err error)"},
+ {"ReadFile", Func, 0, ""},
+ {"Readlink", Func, 0, "func(path string, buf []byte) (n int, err error)"},
+ {"Reboot", Func, 0, "func(cmd int) (err error)"},
+ {"Recvfrom", Func, 0, "func(fd int, p []byte, flags int) (n int, from Sockaddr, err error)"},
+ {"Recvmsg", Func, 0, "func(fd int, p []byte, oob []byte, flags int) (n int, oobn int, recvflags int, from Sockaddr, err error)"},
+ {"RegCloseKey", Func, 0, ""},
+ {"RegEnumKeyEx", Func, 0, ""},
+ {"RegOpenKeyEx", Func, 0, ""},
+ {"RegQueryInfoKey", Func, 0, ""},
+ {"RegQueryValueEx", Func, 0, ""},
+ {"RemoveDirectory", Func, 0, ""},
+ {"Removexattr", Func, 1, "func(path string, attr string) (err error)"},
+ {"Rename", Func, 0, "func(oldpath string, newpath string) (err error)"},
+ {"Renameat", Func, 0, "func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)"},
+ {"Revoke", Func, 0, ""},
+ {"Rlimit", Type, 0, ""},
+ {"Rlimit.Cur", Field, 0, ""},
+ {"Rlimit.Max", Field, 0, ""},
+ {"Rmdir", Func, 0, "func(path string) error"},
+ {"RouteMessage", Type, 0, ""},
+ {"RouteMessage.Data", Field, 0, ""},
+ {"RouteMessage.Header", Field, 0, ""},
+ {"RouteRIB", Func, 0, ""},
+ {"RoutingMessage", Type, 0, ""},
+ {"RtAttr", Type, 0, ""},
+ {"RtAttr.Len", Field, 0, ""},
+ {"RtAttr.Type", Field, 0, ""},
+ {"RtGenmsg", Type, 0, ""},
+ {"RtGenmsg.Family", Field, 0, ""},
+ {"RtMetrics", Type, 0, ""},
+ {"RtMetrics.Expire", Field, 0, ""},
+ {"RtMetrics.Filler", Field, 0, ""},
+ {"RtMetrics.Hopcount", Field, 0, ""},
+ {"RtMetrics.Locks", Field, 0, ""},
+ {"RtMetrics.Mtu", Field, 0, ""},
+ {"RtMetrics.Pad", Field, 3, ""},
+ {"RtMetrics.Pksent", Field, 0, ""},
+ {"RtMetrics.Recvpipe", Field, 0, ""},
+ {"RtMetrics.Refcnt", Field, 2, ""},
+ {"RtMetrics.Rtt", Field, 0, ""},
+ {"RtMetrics.Rttvar", Field, 0, ""},
+ {"RtMetrics.Sendpipe", Field, 0, ""},
+ {"RtMetrics.Ssthresh", Field, 0, ""},
+ {"RtMetrics.Weight", Field, 0, ""},
+ {"RtMsg", Type, 0, ""},
+ {"RtMsg.Dst_len", Field, 0, ""},
+ {"RtMsg.Family", Field, 0, ""},
+ {"RtMsg.Flags", Field, 0, ""},
+ {"RtMsg.Protocol", Field, 0, ""},
+ {"RtMsg.Scope", Field, 0, ""},
+ {"RtMsg.Src_len", Field, 0, ""},
+ {"RtMsg.Table", Field, 0, ""},
+ {"RtMsg.Tos", Field, 0, ""},
+ {"RtMsg.Type", Field, 0, ""},
+ {"RtMsghdr", Type, 0, ""},
+ {"RtMsghdr.Addrs", Field, 0, ""},
+ {"RtMsghdr.Errno", Field, 0, ""},
+ {"RtMsghdr.Flags", Field, 0, ""},
+ {"RtMsghdr.Fmask", Field, 0, ""},
+ {"RtMsghdr.Hdrlen", Field, 2, ""},
+ {"RtMsghdr.Index", Field, 0, ""},
+ {"RtMsghdr.Inits", Field, 0, ""},
+ {"RtMsghdr.Mpls", Field, 2, ""},
+ {"RtMsghdr.Msglen", Field, 0, ""},
+ {"RtMsghdr.Pad_cgo_0", Field, 0, ""},
+ {"RtMsghdr.Pad_cgo_1", Field, 2, ""},
+ {"RtMsghdr.Pid", Field, 0, ""},
+ {"RtMsghdr.Priority", Field, 2, ""},
+ {"RtMsghdr.Rmx", Field, 0, ""},
+ {"RtMsghdr.Seq", Field, 0, ""},
+ {"RtMsghdr.Tableid", Field, 2, ""},
+ {"RtMsghdr.Type", Field, 0, ""},
+ {"RtMsghdr.Use", Field, 0, ""},
+ {"RtMsghdr.Version", Field, 0, ""},
+ {"RtNexthop", Type, 0, ""},
+ {"RtNexthop.Flags", Field, 0, ""},
+ {"RtNexthop.Hops", Field, 0, ""},
+ {"RtNexthop.Ifindex", Field, 0, ""},
+ {"RtNexthop.Len", Field, 0, ""},
+ {"Rusage", Type, 0, ""},
+ {"Rusage.CreationTime", Field, 0, ""},
+ {"Rusage.ExitTime", Field, 0, ""},
+ {"Rusage.Idrss", Field, 0, ""},
+ {"Rusage.Inblock", Field, 0, ""},
+ {"Rusage.Isrss", Field, 0, ""},
+ {"Rusage.Ixrss", Field, 0, ""},
+ {"Rusage.KernelTime", Field, 0, ""},
+ {"Rusage.Majflt", Field, 0, ""},
+ {"Rusage.Maxrss", Field, 0, ""},
+ {"Rusage.Minflt", Field, 0, ""},
+ {"Rusage.Msgrcv", Field, 0, ""},
+ {"Rusage.Msgsnd", Field, 0, ""},
+ {"Rusage.Nivcsw", Field, 0, ""},
+ {"Rusage.Nsignals", Field, 0, ""},
+ {"Rusage.Nswap", Field, 0, ""},
+ {"Rusage.Nvcsw", Field, 0, ""},
+ {"Rusage.Oublock", Field, 0, ""},
+ {"Rusage.Stime", Field, 0, ""},
+ {"Rusage.UserTime", Field, 0, ""},
+ {"Rusage.Utime", Field, 0, ""},
+ {"SCM_BINTIME", Const, 0, ""},
+ {"SCM_CREDENTIALS", Const, 0, ""},
+ {"SCM_CREDS", Const, 0, ""},
+ {"SCM_RIGHTS", Const, 0, ""},
+ {"SCM_TIMESTAMP", Const, 0, ""},
+ {"SCM_TIMESTAMPING", Const, 0, ""},
+ {"SCM_TIMESTAMPNS", Const, 0, ""},
+ {"SCM_TIMESTAMP_MONOTONIC", Const, 0, ""},
+ {"SHUT_RD", Const, 0, ""},
+ {"SHUT_RDWR", Const, 0, ""},
+ {"SHUT_WR", Const, 0, ""},
+ {"SID", Type, 0, ""},
+ {"SIDAndAttributes", Type, 0, ""},
+ {"SIDAndAttributes.Attributes", Field, 0, ""},
+ {"SIDAndAttributes.Sid", Field, 0, ""},
+ {"SIGABRT", Const, 0, ""},
+ {"SIGALRM", Const, 0, ""},
+ {"SIGBUS", Const, 0, ""},
+ {"SIGCHLD", Const, 0, ""},
+ {"SIGCLD", Const, 0, ""},
+ {"SIGCONT", Const, 0, ""},
+ {"SIGEMT", Const, 0, ""},
+ {"SIGFPE", Const, 0, ""},
+ {"SIGHUP", Const, 0, ""},
+ {"SIGILL", Const, 0, ""},
+ {"SIGINFO", Const, 0, ""},
+ {"SIGINT", Const, 0, ""},
+ {"SIGIO", Const, 0, ""},
+ {"SIGIOT", Const, 0, ""},
+ {"SIGKILL", Const, 0, ""},
+ {"SIGLIBRT", Const, 1, ""},
+ {"SIGLWP", Const, 0, ""},
+ {"SIGPIPE", Const, 0, ""},
+ {"SIGPOLL", Const, 0, ""},
+ {"SIGPROF", Const, 0, ""},
+ {"SIGPWR", Const, 0, ""},
+ {"SIGQUIT", Const, 0, ""},
+ {"SIGSEGV", Const, 0, ""},
+ {"SIGSTKFLT", Const, 0, ""},
+ {"SIGSTOP", Const, 0, ""},
+ {"SIGSYS", Const, 0, ""},
+ {"SIGTERM", Const, 0, ""},
+ {"SIGTHR", Const, 0, ""},
+ {"SIGTRAP", Const, 0, ""},
+ {"SIGTSTP", Const, 0, ""},
+ {"SIGTTIN", Const, 0, ""},
+ {"SIGTTOU", Const, 0, ""},
+ {"SIGUNUSED", Const, 0, ""},
+ {"SIGURG", Const, 0, ""},
+ {"SIGUSR1", Const, 0, ""},
+ {"SIGUSR2", Const, 0, ""},
+ {"SIGVTALRM", Const, 0, ""},
+ {"SIGWINCH", Const, 0, ""},
+ {"SIGXCPU", Const, 0, ""},
+ {"SIGXFSZ", Const, 0, ""},
+ {"SIOCADDDLCI", Const, 0, ""},
+ {"SIOCADDMULTI", Const, 0, ""},
+ {"SIOCADDRT", Const, 0, ""},
+ {"SIOCAIFADDR", Const, 0, ""},
+ {"SIOCAIFGROUP", Const, 0, ""},
+ {"SIOCALIFADDR", Const, 0, ""},
+ {"SIOCARPIPLL", Const, 0, ""},
+ {"SIOCATMARK", Const, 0, ""},
+ {"SIOCAUTOADDR", Const, 0, ""},
+ {"SIOCAUTONETMASK", Const, 0, ""},
+ {"SIOCBRDGADD", Const, 1, ""},
+ {"SIOCBRDGADDS", Const, 1, ""},
+ {"SIOCBRDGARL", Const, 1, ""},
+ {"SIOCBRDGDADDR", Const, 1, ""},
+ {"SIOCBRDGDEL", Const, 1, ""},
+ {"SIOCBRDGDELS", Const, 1, ""},
+ {"SIOCBRDGFLUSH", Const, 1, ""},
+ {"SIOCBRDGFRL", Const, 1, ""},
+ {"SIOCBRDGGCACHE", Const, 1, ""},
+ {"SIOCBRDGGFD", Const, 1, ""},
+ {"SIOCBRDGGHT", Const, 1, ""},
+ {"SIOCBRDGGIFFLGS", Const, 1, ""},
+ {"SIOCBRDGGMA", Const, 1, ""},
+ {"SIOCBRDGGPARAM", Const, 1, ""},
+ {"SIOCBRDGGPRI", Const, 1, ""},
+ {"SIOCBRDGGRL", Const, 1, ""},
+ {"SIOCBRDGGSIFS", Const, 1, ""},
+ {"SIOCBRDGGTO", Const, 1, ""},
+ {"SIOCBRDGIFS", Const, 1, ""},
+ {"SIOCBRDGRTS", Const, 1, ""},
+ {"SIOCBRDGSADDR", Const, 1, ""},
+ {"SIOCBRDGSCACHE", Const, 1, ""},
+ {"SIOCBRDGSFD", Const, 1, ""},
+ {"SIOCBRDGSHT", Const, 1, ""},
+ {"SIOCBRDGSIFCOST", Const, 1, ""},
+ {"SIOCBRDGSIFFLGS", Const, 1, ""},
+ {"SIOCBRDGSIFPRIO", Const, 1, ""},
+ {"SIOCBRDGSMA", Const, 1, ""},
+ {"SIOCBRDGSPRI", Const, 1, ""},
+ {"SIOCBRDGSPROTO", Const, 1, ""},
+ {"SIOCBRDGSTO", Const, 1, ""},
+ {"SIOCBRDGSTXHC", Const, 1, ""},
+ {"SIOCDARP", Const, 0, ""},
+ {"SIOCDELDLCI", Const, 0, ""},
+ {"SIOCDELMULTI", Const, 0, ""},
+ {"SIOCDELRT", Const, 0, ""},
+ {"SIOCDEVPRIVATE", Const, 0, ""},
+ {"SIOCDIFADDR", Const, 0, ""},
+ {"SIOCDIFGROUP", Const, 0, ""},
+ {"SIOCDIFPHYADDR", Const, 0, ""},
+ {"SIOCDLIFADDR", Const, 0, ""},
+ {"SIOCDRARP", Const, 0, ""},
+ {"SIOCGARP", Const, 0, ""},
+ {"SIOCGDRVSPEC", Const, 0, ""},
+ {"SIOCGETKALIVE", Const, 1, ""},
+ {"SIOCGETLABEL", Const, 1, ""},
+ {"SIOCGETPFLOW", Const, 1, ""},
+ {"SIOCGETPFSYNC", Const, 1, ""},
+ {"SIOCGETSGCNT", Const, 0, ""},
+ {"SIOCGETVIFCNT", Const, 0, ""},
+ {"SIOCGETVLAN", Const, 0, ""},
+ {"SIOCGHIWAT", Const, 0, ""},
+ {"SIOCGIFADDR", Const, 0, ""},
+ {"SIOCGIFADDRPREF", Const, 1, ""},
+ {"SIOCGIFALIAS", Const, 1, ""},
+ {"SIOCGIFALTMTU", Const, 0, ""},
+ {"SIOCGIFASYNCMAP", Const, 0, ""},
+ {"SIOCGIFBOND", Const, 0, ""},
+ {"SIOCGIFBR", Const, 0, ""},
+ {"SIOCGIFBRDADDR", Const, 0, ""},
+ {"SIOCGIFCAP", Const, 0, ""},
+ {"SIOCGIFCONF", Const, 0, ""},
+ {"SIOCGIFCOUNT", Const, 0, ""},
+ {"SIOCGIFDATA", Const, 1, ""},
+ {"SIOCGIFDESCR", Const, 0, ""},
+ {"SIOCGIFDEVMTU", Const, 0, ""},
+ {"SIOCGIFDLT", Const, 1, ""},
+ {"SIOCGIFDSTADDR", Const, 0, ""},
+ {"SIOCGIFENCAP", Const, 0, ""},
+ {"SIOCGIFFIB", Const, 1, ""},
+ {"SIOCGIFFLAGS", Const, 0, ""},
+ {"SIOCGIFGATTR", Const, 1, ""},
+ {"SIOCGIFGENERIC", Const, 0, ""},
+ {"SIOCGIFGMEMB", Const, 0, ""},
+ {"SIOCGIFGROUP", Const, 0, ""},
+ {"SIOCGIFHARDMTU", Const, 3, ""},
+ {"SIOCGIFHWADDR", Const, 0, ""},
+ {"SIOCGIFINDEX", Const, 0, ""},
+ {"SIOCGIFKPI", Const, 0, ""},
+ {"SIOCGIFMAC", Const, 0, ""},
+ {"SIOCGIFMAP", Const, 0, ""},
+ {"SIOCGIFMEDIA", Const, 0, ""},
+ {"SIOCGIFMEM", Const, 0, ""},
+ {"SIOCGIFMETRIC", Const, 0, ""},
+ {"SIOCGIFMTU", Const, 0, ""},
+ {"SIOCGIFNAME", Const, 0, ""},
+ {"SIOCGIFNETMASK", Const, 0, ""},
+ {"SIOCGIFPDSTADDR", Const, 0, ""},
+ {"SIOCGIFPFLAGS", Const, 0, ""},
+ {"SIOCGIFPHYS", Const, 0, ""},
+ {"SIOCGIFPRIORITY", Const, 1, ""},
+ {"SIOCGIFPSRCADDR", Const, 0, ""},
+ {"SIOCGIFRDOMAIN", Const, 1, ""},
+ {"SIOCGIFRTLABEL", Const, 1, ""},
+ {"SIOCGIFSLAVE", Const, 0, ""},
+ {"SIOCGIFSTATUS", Const, 0, ""},
+ {"SIOCGIFTIMESLOT", Const, 1, ""},
+ {"SIOCGIFTXQLEN", Const, 0, ""},
+ {"SIOCGIFVLAN", Const, 0, ""},
+ {"SIOCGIFWAKEFLAGS", Const, 0, ""},
+ {"SIOCGIFXFLAGS", Const, 1, ""},
+ {"SIOCGLIFADDR", Const, 0, ""},
+ {"SIOCGLIFPHYADDR", Const, 0, ""},
+ {"SIOCGLIFPHYRTABLE", Const, 1, ""},
+ {"SIOCGLIFPHYTTL", Const, 3, ""},
+ {"SIOCGLINKSTR", Const, 1, ""},
+ {"SIOCGLOWAT", Const, 0, ""},
+ {"SIOCGPGRP", Const, 0, ""},
+ {"SIOCGPRIVATE_0", Const, 0, ""},
+ {"SIOCGPRIVATE_1", Const, 0, ""},
+ {"SIOCGRARP", Const, 0, ""},
+ {"SIOCGSPPPPARAMS", Const, 3, ""},
+ {"SIOCGSTAMP", Const, 0, ""},
+ {"SIOCGSTAMPNS", Const, 0, ""},
+ {"SIOCGVH", Const, 1, ""},
+ {"SIOCGVNETID", Const, 3, ""},
+ {"SIOCIFCREATE", Const, 0, ""},
+ {"SIOCIFCREATE2", Const, 0, ""},
+ {"SIOCIFDESTROY", Const, 0, ""},
+ {"SIOCIFGCLONERS", Const, 0, ""},
+ {"SIOCINITIFADDR", Const, 1, ""},
+ {"SIOCPROTOPRIVATE", Const, 0, ""},
+ {"SIOCRSLVMULTI", Const, 0, ""},
+ {"SIOCRTMSG", Const, 0, ""},
+ {"SIOCSARP", Const, 0, ""},
+ {"SIOCSDRVSPEC", Const, 0, ""},
+ {"SIOCSETKALIVE", Const, 1, ""},
+ {"SIOCSETLABEL", Const, 1, ""},
+ {"SIOCSETPFLOW", Const, 1, ""},
+ {"SIOCSETPFSYNC", Const, 1, ""},
+ {"SIOCSETVLAN", Const, 0, ""},
+ {"SIOCSHIWAT", Const, 0, ""},
+ {"SIOCSIFADDR", Const, 0, ""},
+ {"SIOCSIFADDRPREF", Const, 1, ""},
+ {"SIOCSIFALTMTU", Const, 0, ""},
+ {"SIOCSIFASYNCMAP", Const, 0, ""},
+ {"SIOCSIFBOND", Const, 0, ""},
+ {"SIOCSIFBR", Const, 0, ""},
+ {"SIOCSIFBRDADDR", Const, 0, ""},
+ {"SIOCSIFCAP", Const, 0, ""},
+ {"SIOCSIFDESCR", Const, 0, ""},
+ {"SIOCSIFDSTADDR", Const, 0, ""},
+ {"SIOCSIFENCAP", Const, 0, ""},
+ {"SIOCSIFFIB", Const, 1, ""},
+ {"SIOCSIFFLAGS", Const, 0, ""},
+ {"SIOCSIFGATTR", Const, 1, ""},
+ {"SIOCSIFGENERIC", Const, 0, ""},
+ {"SIOCSIFHWADDR", Const, 0, ""},
+ {"SIOCSIFHWBROADCAST", Const, 0, ""},
+ {"SIOCSIFKPI", Const, 0, ""},
+ {"SIOCSIFLINK", Const, 0, ""},
+ {"SIOCSIFLLADDR", Const, 0, ""},
+ {"SIOCSIFMAC", Const, 0, ""},
+ {"SIOCSIFMAP", Const, 0, ""},
+ {"SIOCSIFMEDIA", Const, 0, ""},
+ {"SIOCSIFMEM", Const, 0, ""},
+ {"SIOCSIFMETRIC", Const, 0, ""},
+ {"SIOCSIFMTU", Const, 0, ""},
+ {"SIOCSIFNAME", Const, 0, ""},
+ {"SIOCSIFNETMASK", Const, 0, ""},
+ {"SIOCSIFPFLAGS", Const, 0, ""},
+ {"SIOCSIFPHYADDR", Const, 0, ""},
+ {"SIOCSIFPHYS", Const, 0, ""},
+ {"SIOCSIFPRIORITY", Const, 1, ""},
+ {"SIOCSIFRDOMAIN", Const, 1, ""},
+ {"SIOCSIFRTLABEL", Const, 1, ""},
+ {"SIOCSIFRVNET", Const, 0, ""},
+ {"SIOCSIFSLAVE", Const, 0, ""},
+ {"SIOCSIFTIMESLOT", Const, 1, ""},
+ {"SIOCSIFTXQLEN", Const, 0, ""},
+ {"SIOCSIFVLAN", Const, 0, ""},
+ {"SIOCSIFVNET", Const, 0, ""},
+ {"SIOCSIFXFLAGS", Const, 1, ""},
+ {"SIOCSLIFPHYADDR", Const, 0, ""},
+ {"SIOCSLIFPHYRTABLE", Const, 1, ""},
+ {"SIOCSLIFPHYTTL", Const, 3, ""},
+ {"SIOCSLINKSTR", Const, 1, ""},
+ {"SIOCSLOWAT", Const, 0, ""},
+ {"SIOCSPGRP", Const, 0, ""},
+ {"SIOCSRARP", Const, 0, ""},
+ {"SIOCSSPPPPARAMS", Const, 3, ""},
+ {"SIOCSVH", Const, 1, ""},
+ {"SIOCSVNETID", Const, 3, ""},
+ {"SIOCZIFDATA", Const, 1, ""},
+ {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1, ""},
+ {"SIO_GET_INTERFACE_LIST", Const, 0, ""},
+ {"SIO_KEEPALIVE_VALS", Const, 3, ""},
+ {"SIO_UDP_CONNRESET", Const, 4, ""},
+ {"SOCK_CLOEXEC", Const, 0, ""},
+ {"SOCK_DCCP", Const, 0, ""},
+ {"SOCK_DGRAM", Const, 0, ""},
+ {"SOCK_FLAGS_MASK", Const, 1, ""},
+ {"SOCK_MAXADDRLEN", Const, 0, ""},
+ {"SOCK_NONBLOCK", Const, 0, ""},
+ {"SOCK_NOSIGPIPE", Const, 1, ""},
+ {"SOCK_PACKET", Const, 0, ""},
+ {"SOCK_RAW", Const, 0, ""},
+ {"SOCK_RDM", Const, 0, ""},
+ {"SOCK_SEQPACKET", Const, 0, ""},
+ {"SOCK_STREAM", Const, 0, ""},
+ {"SOL_AAL", Const, 0, ""},
+ {"SOL_ATM", Const, 0, ""},
+ {"SOL_DECNET", Const, 0, ""},
+ {"SOL_ICMPV6", Const, 0, ""},
+ {"SOL_IP", Const, 0, ""},
+ {"SOL_IPV6", Const, 0, ""},
+ {"SOL_IRDA", Const, 0, ""},
+ {"SOL_PACKET", Const, 0, ""},
+ {"SOL_RAW", Const, 0, ""},
+ {"SOL_SOCKET", Const, 0, ""},
+ {"SOL_TCP", Const, 0, ""},
+ {"SOL_X25", Const, 0, ""},
+ {"SOMAXCONN", Const, 0, ""},
+ {"SO_ACCEPTCONN", Const, 0, ""},
+ {"SO_ACCEPTFILTER", Const, 0, ""},
+ {"SO_ATTACH_FILTER", Const, 0, ""},
+ {"SO_BINDANY", Const, 1, ""},
+ {"SO_BINDTODEVICE", Const, 0, ""},
+ {"SO_BINTIME", Const, 0, ""},
+ {"SO_BROADCAST", Const, 0, ""},
+ {"SO_BSDCOMPAT", Const, 0, ""},
+ {"SO_DEBUG", Const, 0, ""},
+ {"SO_DETACH_FILTER", Const, 0, ""},
+ {"SO_DOMAIN", Const, 0, ""},
+ {"SO_DONTROUTE", Const, 0, ""},
+ {"SO_DONTTRUNC", Const, 0, ""},
+ {"SO_ERROR", Const, 0, ""},
+ {"SO_KEEPALIVE", Const, 0, ""},
+ {"SO_LABEL", Const, 0, ""},
+ {"SO_LINGER", Const, 0, ""},
+ {"SO_LINGER_SEC", Const, 0, ""},
+ {"SO_LISTENINCQLEN", Const, 0, ""},
+ {"SO_LISTENQLEN", Const, 0, ""},
+ {"SO_LISTENQLIMIT", Const, 0, ""},
+ {"SO_MARK", Const, 0, ""},
+ {"SO_NETPROC", Const, 1, ""},
+ {"SO_NKE", Const, 0, ""},
+ {"SO_NOADDRERR", Const, 0, ""},
+ {"SO_NOHEADER", Const, 1, ""},
+ {"SO_NOSIGPIPE", Const, 0, ""},
+ {"SO_NOTIFYCONFLICT", Const, 0, ""},
+ {"SO_NO_CHECK", Const, 0, ""},
+ {"SO_NO_DDP", Const, 0, ""},
+ {"SO_NO_OFFLOAD", Const, 0, ""},
+ {"SO_NP_EXTENSIONS", Const, 0, ""},
+ {"SO_NREAD", Const, 0, ""},
+ {"SO_NUMRCVPKT", Const, 16, ""},
+ {"SO_NWRITE", Const, 0, ""},
+ {"SO_OOBINLINE", Const, 0, ""},
+ {"SO_OVERFLOWED", Const, 1, ""},
+ {"SO_PASSCRED", Const, 0, ""},
+ {"SO_PASSSEC", Const, 0, ""},
+ {"SO_PEERCRED", Const, 0, ""},
+ {"SO_PEERLABEL", Const, 0, ""},
+ {"SO_PEERNAME", Const, 0, ""},
+ {"SO_PEERSEC", Const, 0, ""},
+ {"SO_PRIORITY", Const, 0, ""},
+ {"SO_PROTOCOL", Const, 0, ""},
+ {"SO_PROTOTYPE", Const, 1, ""},
+ {"SO_RANDOMPORT", Const, 0, ""},
+ {"SO_RCVBUF", Const, 0, ""},
+ {"SO_RCVBUFFORCE", Const, 0, ""},
+ {"SO_RCVLOWAT", Const, 0, ""},
+ {"SO_RCVTIMEO", Const, 0, ""},
+ {"SO_RESTRICTIONS", Const, 0, ""},
+ {"SO_RESTRICT_DENYIN", Const, 0, ""},
+ {"SO_RESTRICT_DENYOUT", Const, 0, ""},
+ {"SO_RESTRICT_DENYSET", Const, 0, ""},
+ {"SO_REUSEADDR", Const, 0, ""},
+ {"SO_REUSEPORT", Const, 0, ""},
+ {"SO_REUSESHAREUID", Const, 0, ""},
+ {"SO_RTABLE", Const, 1, ""},
+ {"SO_RXQ_OVFL", Const, 0, ""},
+ {"SO_SECURITY_AUTHENTICATION", Const, 0, ""},
+ {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0, ""},
+ {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0, ""},
+ {"SO_SETFIB", Const, 0, ""},
+ {"SO_SNDBUF", Const, 0, ""},
+ {"SO_SNDBUFFORCE", Const, 0, ""},
+ {"SO_SNDLOWAT", Const, 0, ""},
+ {"SO_SNDTIMEO", Const, 0, ""},
+ {"SO_SPLICE", Const, 1, ""},
+ {"SO_TIMESTAMP", Const, 0, ""},
+ {"SO_TIMESTAMPING", Const, 0, ""},
+ {"SO_TIMESTAMPNS", Const, 0, ""},
+ {"SO_TIMESTAMP_MONOTONIC", Const, 0, ""},
+ {"SO_TYPE", Const, 0, ""},
+ {"SO_UPCALLCLOSEWAIT", Const, 0, ""},
+ {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0, ""},
+ {"SO_UPDATE_CONNECT_CONTEXT", Const, 1, ""},
+ {"SO_USELOOPBACK", Const, 0, ""},
+ {"SO_USER_COOKIE", Const, 1, ""},
+ {"SO_VENDOR", Const, 3, ""},
+ {"SO_WANTMORE", Const, 0, ""},
+ {"SO_WANTOOBFLAG", Const, 0, ""},
+ {"SSLExtraCertChainPolicyPara", Type, 0, ""},
+ {"SSLExtraCertChainPolicyPara.AuthType", Field, 0, ""},
+ {"SSLExtraCertChainPolicyPara.Checks", Field, 0, ""},
+ {"SSLExtraCertChainPolicyPara.ServerName", Field, 0, ""},
+ {"SSLExtraCertChainPolicyPara.Size", Field, 0, ""},
+ {"STANDARD_RIGHTS_ALL", Const, 0, ""},
+ {"STANDARD_RIGHTS_EXECUTE", Const, 0, ""},
+ {"STANDARD_RIGHTS_READ", Const, 0, ""},
+ {"STANDARD_RIGHTS_REQUIRED", Const, 0, ""},
+ {"STANDARD_RIGHTS_WRITE", Const, 0, ""},
+ {"STARTF_USESHOWWINDOW", Const, 0, ""},
+ {"STARTF_USESTDHANDLES", Const, 0, ""},
+ {"STD_ERROR_HANDLE", Const, 0, ""},
+ {"STD_INPUT_HANDLE", Const, 0, ""},
+ {"STD_OUTPUT_HANDLE", Const, 0, ""},
+ {"SUBLANG_ENGLISH_US", Const, 0, ""},
+ {"SW_FORCEMINIMIZE", Const, 0, ""},
+ {"SW_HIDE", Const, 0, ""},
+ {"SW_MAXIMIZE", Const, 0, ""},
+ {"SW_MINIMIZE", Const, 0, ""},
+ {"SW_NORMAL", Const, 0, ""},
+ {"SW_RESTORE", Const, 0, ""},
+ {"SW_SHOW", Const, 0, ""},
+ {"SW_SHOWDEFAULT", Const, 0, ""},
+ {"SW_SHOWMAXIMIZED", Const, 0, ""},
+ {"SW_SHOWMINIMIZED", Const, 0, ""},
+ {"SW_SHOWMINNOACTIVE", Const, 0, ""},
+ {"SW_SHOWNA", Const, 0, ""},
+ {"SW_SHOWNOACTIVATE", Const, 0, ""},
+ {"SW_SHOWNORMAL", Const, 0, ""},
+ {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4, ""},
+ {"SYNCHRONIZE", Const, 0, ""},
+ {"SYSCTL_VERSION", Const, 1, ""},
+ {"SYSCTL_VERS_0", Const, 1, ""},
+ {"SYSCTL_VERS_1", Const, 1, ""},
+ {"SYSCTL_VERS_MASK", Const, 1, ""},
+ {"SYS_ABORT2", Const, 0, ""},
+ {"SYS_ACCEPT", Const, 0, ""},
+ {"SYS_ACCEPT4", Const, 0, ""},
+ {"SYS_ACCEPT_NOCANCEL", Const, 0, ""},
+ {"SYS_ACCESS", Const, 0, ""},
+ {"SYS_ACCESS_EXTENDED", Const, 0, ""},
+ {"SYS_ACCT", Const, 0, ""},
+ {"SYS_ADD_KEY", Const, 0, ""},
+ {"SYS_ADD_PROFIL", Const, 0, ""},
+ {"SYS_ADJFREQ", Const, 1, ""},
+ {"SYS_ADJTIME", Const, 0, ""},
+ {"SYS_ADJTIMEX", Const, 0, ""},
+ {"SYS_AFS_SYSCALL", Const, 0, ""},
+ {"SYS_AIO_CANCEL", Const, 0, ""},
+ {"SYS_AIO_ERROR", Const, 0, ""},
+ {"SYS_AIO_FSYNC", Const, 0, ""},
+ {"SYS_AIO_MLOCK", Const, 14, ""},
+ {"SYS_AIO_READ", Const, 0, ""},
+ {"SYS_AIO_RETURN", Const, 0, ""},
+ {"SYS_AIO_SUSPEND", Const, 0, ""},
+ {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0, ""},
+ {"SYS_AIO_WAITCOMPLETE", Const, 14, ""},
+ {"SYS_AIO_WRITE", Const, 0, ""},
+ {"SYS_ALARM", Const, 0, ""},
+ {"SYS_ARCH_PRCTL", Const, 0, ""},
+ {"SYS_ARM_FADVISE64_64", Const, 0, ""},
+ {"SYS_ARM_SYNC_FILE_RANGE", Const, 0, ""},
+ {"SYS_ATGETMSG", Const, 0, ""},
+ {"SYS_ATPGETREQ", Const, 0, ""},
+ {"SYS_ATPGETRSP", Const, 0, ""},
+ {"SYS_ATPSNDREQ", Const, 0, ""},
+ {"SYS_ATPSNDRSP", Const, 0, ""},
+ {"SYS_ATPUTMSG", Const, 0, ""},
+ {"SYS_ATSOCKET", Const, 0, ""},
+ {"SYS_AUDIT", Const, 0, ""},
+ {"SYS_AUDITCTL", Const, 0, ""},
+ {"SYS_AUDITON", Const, 0, ""},
+ {"SYS_AUDIT_SESSION_JOIN", Const, 0, ""},
+ {"SYS_AUDIT_SESSION_PORT", Const, 0, ""},
+ {"SYS_AUDIT_SESSION_SELF", Const, 0, ""},
+ {"SYS_BDFLUSH", Const, 0, ""},
+ {"SYS_BIND", Const, 0, ""},
+ {"SYS_BINDAT", Const, 3, ""},
+ {"SYS_BREAK", Const, 0, ""},
+ {"SYS_BRK", Const, 0, ""},
+ {"SYS_BSDTHREAD_CREATE", Const, 0, ""},
+ {"SYS_BSDTHREAD_REGISTER", Const, 0, ""},
+ {"SYS_BSDTHREAD_TERMINATE", Const, 0, ""},
+ {"SYS_CAPGET", Const, 0, ""},
+ {"SYS_CAPSET", Const, 0, ""},
+ {"SYS_CAP_ENTER", Const, 0, ""},
+ {"SYS_CAP_FCNTLS_GET", Const, 1, ""},
+ {"SYS_CAP_FCNTLS_LIMIT", Const, 1, ""},
+ {"SYS_CAP_GETMODE", Const, 0, ""},
+ {"SYS_CAP_GETRIGHTS", Const, 0, ""},
+ {"SYS_CAP_IOCTLS_GET", Const, 1, ""},
+ {"SYS_CAP_IOCTLS_LIMIT", Const, 1, ""},
+ {"SYS_CAP_NEW", Const, 0, ""},
+ {"SYS_CAP_RIGHTS_GET", Const, 1, ""},
+ {"SYS_CAP_RIGHTS_LIMIT", Const, 1, ""},
+ {"SYS_CHDIR", Const, 0, ""},
+ {"SYS_CHFLAGS", Const, 0, ""},
+ {"SYS_CHFLAGSAT", Const, 3, ""},
+ {"SYS_CHMOD", Const, 0, ""},
+ {"SYS_CHMOD_EXTENDED", Const, 0, ""},
+ {"SYS_CHOWN", Const, 0, ""},
+ {"SYS_CHOWN32", Const, 0, ""},
+ {"SYS_CHROOT", Const, 0, ""},
+ {"SYS_CHUD", Const, 0, ""},
+ {"SYS_CLOCK_ADJTIME", Const, 0, ""},
+ {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1, ""},
+ {"SYS_CLOCK_GETRES", Const, 0, ""},
+ {"SYS_CLOCK_GETTIME", Const, 0, ""},
+ {"SYS_CLOCK_NANOSLEEP", Const, 0, ""},
+ {"SYS_CLOCK_SETTIME", Const, 0, ""},
+ {"SYS_CLONE", Const, 0, ""},
+ {"SYS_CLOSE", Const, 0, ""},
+ {"SYS_CLOSEFROM", Const, 0, ""},
+ {"SYS_CLOSE_NOCANCEL", Const, 0, ""},
+ {"SYS_CONNECT", Const, 0, ""},
+ {"SYS_CONNECTAT", Const, 3, ""},
+ {"SYS_CONNECT_NOCANCEL", Const, 0, ""},
+ {"SYS_COPYFILE", Const, 0, ""},
+ {"SYS_CPUSET", Const, 0, ""},
+ {"SYS_CPUSET_GETAFFINITY", Const, 0, ""},
+ {"SYS_CPUSET_GETID", Const, 0, ""},
+ {"SYS_CPUSET_SETAFFINITY", Const, 0, ""},
+ {"SYS_CPUSET_SETID", Const, 0, ""},
+ {"SYS_CREAT", Const, 0, ""},
+ {"SYS_CREATE_MODULE", Const, 0, ""},
+ {"SYS_CSOPS", Const, 0, ""},
+ {"SYS_CSOPS_AUDITTOKEN", Const, 16, ""},
+ {"SYS_DELETE", Const, 0, ""},
+ {"SYS_DELETE_MODULE", Const, 0, ""},
+ {"SYS_DUP", Const, 0, ""},
+ {"SYS_DUP2", Const, 0, ""},
+ {"SYS_DUP3", Const, 0, ""},
+ {"SYS_EACCESS", Const, 0, ""},
+ {"SYS_EPOLL_CREATE", Const, 0, ""},
+ {"SYS_EPOLL_CREATE1", Const, 0, ""},
+ {"SYS_EPOLL_CTL", Const, 0, ""},
+ {"SYS_EPOLL_CTL_OLD", Const, 0, ""},
+ {"SYS_EPOLL_PWAIT", Const, 0, ""},
+ {"SYS_EPOLL_WAIT", Const, 0, ""},
+ {"SYS_EPOLL_WAIT_OLD", Const, 0, ""},
+ {"SYS_EVENTFD", Const, 0, ""},
+ {"SYS_EVENTFD2", Const, 0, ""},
+ {"SYS_EXCHANGEDATA", Const, 0, ""},
+ {"SYS_EXECVE", Const, 0, ""},
+ {"SYS_EXIT", Const, 0, ""},
+ {"SYS_EXIT_GROUP", Const, 0, ""},
+ {"SYS_EXTATTRCTL", Const, 0, ""},
+ {"SYS_EXTATTR_DELETE_FD", Const, 0, ""},
+ {"SYS_EXTATTR_DELETE_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_DELETE_LINK", Const, 0, ""},
+ {"SYS_EXTATTR_GET_FD", Const, 0, ""},
+ {"SYS_EXTATTR_GET_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_GET_LINK", Const, 0, ""},
+ {"SYS_EXTATTR_LIST_FD", Const, 0, ""},
+ {"SYS_EXTATTR_LIST_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_LIST_LINK", Const, 0, ""},
+ {"SYS_EXTATTR_SET_FD", Const, 0, ""},
+ {"SYS_EXTATTR_SET_FILE", Const, 0, ""},
+ {"SYS_EXTATTR_SET_LINK", Const, 0, ""},
+ {"SYS_FACCESSAT", Const, 0, ""},
+ {"SYS_FADVISE64", Const, 0, ""},
+ {"SYS_FADVISE64_64", Const, 0, ""},
+ {"SYS_FALLOCATE", Const, 0, ""},
+ {"SYS_FANOTIFY_INIT", Const, 0, ""},
+ {"SYS_FANOTIFY_MARK", Const, 0, ""},
+ {"SYS_FCHDIR", Const, 0, ""},
+ {"SYS_FCHFLAGS", Const, 0, ""},
+ {"SYS_FCHMOD", Const, 0, ""},
+ {"SYS_FCHMODAT", Const, 0, ""},
+ {"SYS_FCHMOD_EXTENDED", Const, 0, ""},
+ {"SYS_FCHOWN", Const, 0, ""},
+ {"SYS_FCHOWN32", Const, 0, ""},
+ {"SYS_FCHOWNAT", Const, 0, ""},
+ {"SYS_FCHROOT", Const, 1, ""},
+ {"SYS_FCNTL", Const, 0, ""},
+ {"SYS_FCNTL64", Const, 0, ""},
+ {"SYS_FCNTL_NOCANCEL", Const, 0, ""},
+ {"SYS_FDATASYNC", Const, 0, ""},
+ {"SYS_FEXECVE", Const, 0, ""},
+ {"SYS_FFCLOCK_GETCOUNTER", Const, 0, ""},
+ {"SYS_FFCLOCK_GETESTIMATE", Const, 0, ""},
+ {"SYS_FFCLOCK_SETESTIMATE", Const, 0, ""},
+ {"SYS_FFSCTL", Const, 0, ""},
+ {"SYS_FGETATTRLIST", Const, 0, ""},
+ {"SYS_FGETXATTR", Const, 0, ""},
+ {"SYS_FHOPEN", Const, 0, ""},
+ {"SYS_FHSTAT", Const, 0, ""},
+ {"SYS_FHSTATFS", Const, 0, ""},
+ {"SYS_FILEPORT_MAKEFD", Const, 0, ""},
+ {"SYS_FILEPORT_MAKEPORT", Const, 0, ""},
+ {"SYS_FKTRACE", Const, 1, ""},
+ {"SYS_FLISTXATTR", Const, 0, ""},
+ {"SYS_FLOCK", Const, 0, ""},
+ {"SYS_FORK", Const, 0, ""},
+ {"SYS_FPATHCONF", Const, 0, ""},
+ {"SYS_FREEBSD6_FTRUNCATE", Const, 0, ""},
+ {"SYS_FREEBSD6_LSEEK", Const, 0, ""},
+ {"SYS_FREEBSD6_MMAP", Const, 0, ""},
+ {"SYS_FREEBSD6_PREAD", Const, 0, ""},
+ {"SYS_FREEBSD6_PWRITE", Const, 0, ""},
+ {"SYS_FREEBSD6_TRUNCATE", Const, 0, ""},
+ {"SYS_FREMOVEXATTR", Const, 0, ""},
+ {"SYS_FSCTL", Const, 0, ""},
+ {"SYS_FSETATTRLIST", Const, 0, ""},
+ {"SYS_FSETXATTR", Const, 0, ""},
+ {"SYS_FSGETPATH", Const, 0, ""},
+ {"SYS_FSTAT", Const, 0, ""},
+ {"SYS_FSTAT64", Const, 0, ""},
+ {"SYS_FSTAT64_EXTENDED", Const, 0, ""},
+ {"SYS_FSTATAT", Const, 0, ""},
+ {"SYS_FSTATAT64", Const, 0, ""},
+ {"SYS_FSTATFS", Const, 0, ""},
+ {"SYS_FSTATFS64", Const, 0, ""},
+ {"SYS_FSTATV", Const, 0, ""},
+ {"SYS_FSTATVFS1", Const, 1, ""},
+ {"SYS_FSTAT_EXTENDED", Const, 0, ""},
+ {"SYS_FSYNC", Const, 0, ""},
+ {"SYS_FSYNC_NOCANCEL", Const, 0, ""},
+ {"SYS_FSYNC_RANGE", Const, 1, ""},
+ {"SYS_FTIME", Const, 0, ""},
+ {"SYS_FTRUNCATE", Const, 0, ""},
+ {"SYS_FTRUNCATE64", Const, 0, ""},
+ {"SYS_FUTEX", Const, 0, ""},
+ {"SYS_FUTIMENS", Const, 1, ""},
+ {"SYS_FUTIMES", Const, 0, ""},
+ {"SYS_FUTIMESAT", Const, 0, ""},
+ {"SYS_GETATTRLIST", Const, 0, ""},
+ {"SYS_GETAUDIT", Const, 0, ""},
+ {"SYS_GETAUDIT_ADDR", Const, 0, ""},
+ {"SYS_GETAUID", Const, 0, ""},
+ {"SYS_GETCONTEXT", Const, 0, ""},
+ {"SYS_GETCPU", Const, 0, ""},
+ {"SYS_GETCWD", Const, 0, ""},
+ {"SYS_GETDENTS", Const, 0, ""},
+ {"SYS_GETDENTS64", Const, 0, ""},
+ {"SYS_GETDIRENTRIES", Const, 0, ""},
+ {"SYS_GETDIRENTRIES64", Const, 0, ""},
+ {"SYS_GETDIRENTRIESATTR", Const, 0, ""},
+ {"SYS_GETDTABLECOUNT", Const, 1, ""},
+ {"SYS_GETDTABLESIZE", Const, 0, ""},
+ {"SYS_GETEGID", Const, 0, ""},
+ {"SYS_GETEGID32", Const, 0, ""},
+ {"SYS_GETEUID", Const, 0, ""},
+ {"SYS_GETEUID32", Const, 0, ""},
+ {"SYS_GETFH", Const, 0, ""},
+ {"SYS_GETFSSTAT", Const, 0, ""},
+ {"SYS_GETFSSTAT64", Const, 0, ""},
+ {"SYS_GETGID", Const, 0, ""},
+ {"SYS_GETGID32", Const, 0, ""},
+ {"SYS_GETGROUPS", Const, 0, ""},
+ {"SYS_GETGROUPS32", Const, 0, ""},
+ {"SYS_GETHOSTUUID", Const, 0, ""},
+ {"SYS_GETITIMER", Const, 0, ""},
+ {"SYS_GETLCID", Const, 0, ""},
+ {"SYS_GETLOGIN", Const, 0, ""},
+ {"SYS_GETLOGINCLASS", Const, 0, ""},
+ {"SYS_GETPEERNAME", Const, 0, ""},
+ {"SYS_GETPGID", Const, 0, ""},
+ {"SYS_GETPGRP", Const, 0, ""},
+ {"SYS_GETPID", Const, 0, ""},
+ {"SYS_GETPMSG", Const, 0, ""},
+ {"SYS_GETPPID", Const, 0, ""},
+ {"SYS_GETPRIORITY", Const, 0, ""},
+ {"SYS_GETRESGID", Const, 0, ""},
+ {"SYS_GETRESGID32", Const, 0, ""},
+ {"SYS_GETRESUID", Const, 0, ""},
+ {"SYS_GETRESUID32", Const, 0, ""},
+ {"SYS_GETRLIMIT", Const, 0, ""},
+ {"SYS_GETRTABLE", Const, 1, ""},
+ {"SYS_GETRUSAGE", Const, 0, ""},
+ {"SYS_GETSGROUPS", Const, 0, ""},
+ {"SYS_GETSID", Const, 0, ""},
+ {"SYS_GETSOCKNAME", Const, 0, ""},
+ {"SYS_GETSOCKOPT", Const, 0, ""},
+ {"SYS_GETTHRID", Const, 1, ""},
+ {"SYS_GETTID", Const, 0, ""},
+ {"SYS_GETTIMEOFDAY", Const, 0, ""},
+ {"SYS_GETUID", Const, 0, ""},
+ {"SYS_GETUID32", Const, 0, ""},
+ {"SYS_GETVFSSTAT", Const, 1, ""},
+ {"SYS_GETWGROUPS", Const, 0, ""},
+ {"SYS_GETXATTR", Const, 0, ""},
+ {"SYS_GET_KERNEL_SYMS", Const, 0, ""},
+ {"SYS_GET_MEMPOLICY", Const, 0, ""},
+ {"SYS_GET_ROBUST_LIST", Const, 0, ""},
+ {"SYS_GET_THREAD_AREA", Const, 0, ""},
+ {"SYS_GSSD_SYSCALL", Const, 14, ""},
+ {"SYS_GTTY", Const, 0, ""},
+ {"SYS_IDENTITYSVC", Const, 0, ""},
+ {"SYS_IDLE", Const, 0, ""},
+ {"SYS_INITGROUPS", Const, 0, ""},
+ {"SYS_INIT_MODULE", Const, 0, ""},
+ {"SYS_INOTIFY_ADD_WATCH", Const, 0, ""},
+ {"SYS_INOTIFY_INIT", Const, 0, ""},
+ {"SYS_INOTIFY_INIT1", Const, 0, ""},
+ {"SYS_INOTIFY_RM_WATCH", Const, 0, ""},
+ {"SYS_IOCTL", Const, 0, ""},
+ {"SYS_IOPERM", Const, 0, ""},
+ {"SYS_IOPL", Const, 0, ""},
+ {"SYS_IOPOLICYSYS", Const, 0, ""},
+ {"SYS_IOPRIO_GET", Const, 0, ""},
+ {"SYS_IOPRIO_SET", Const, 0, ""},
+ {"SYS_IO_CANCEL", Const, 0, ""},
+ {"SYS_IO_DESTROY", Const, 0, ""},
+ {"SYS_IO_GETEVENTS", Const, 0, ""},
+ {"SYS_IO_SETUP", Const, 0, ""},
+ {"SYS_IO_SUBMIT", Const, 0, ""},
+ {"SYS_IPC", Const, 0, ""},
+ {"SYS_ISSETUGID", Const, 0, ""},
+ {"SYS_JAIL", Const, 0, ""},
+ {"SYS_JAIL_ATTACH", Const, 0, ""},
+ {"SYS_JAIL_GET", Const, 0, ""},
+ {"SYS_JAIL_REMOVE", Const, 0, ""},
+ {"SYS_JAIL_SET", Const, 0, ""},
+ {"SYS_KAS_INFO", Const, 16, ""},
+ {"SYS_KDEBUG_TRACE", Const, 0, ""},
+ {"SYS_KENV", Const, 0, ""},
+ {"SYS_KEVENT", Const, 0, ""},
+ {"SYS_KEVENT64", Const, 0, ""},
+ {"SYS_KEXEC_LOAD", Const, 0, ""},
+ {"SYS_KEYCTL", Const, 0, ""},
+ {"SYS_KILL", Const, 0, ""},
+ {"SYS_KLDFIND", Const, 0, ""},
+ {"SYS_KLDFIRSTMOD", Const, 0, ""},
+ {"SYS_KLDLOAD", Const, 0, ""},
+ {"SYS_KLDNEXT", Const, 0, ""},
+ {"SYS_KLDSTAT", Const, 0, ""},
+ {"SYS_KLDSYM", Const, 0, ""},
+ {"SYS_KLDUNLOAD", Const, 0, ""},
+ {"SYS_KLDUNLOADF", Const, 0, ""},
+ {"SYS_KMQ_NOTIFY", Const, 14, ""},
+ {"SYS_KMQ_OPEN", Const, 14, ""},
+ {"SYS_KMQ_SETATTR", Const, 14, ""},
+ {"SYS_KMQ_TIMEDRECEIVE", Const, 14, ""},
+ {"SYS_KMQ_TIMEDSEND", Const, 14, ""},
+ {"SYS_KMQ_UNLINK", Const, 14, ""},
+ {"SYS_KQUEUE", Const, 0, ""},
+ {"SYS_KQUEUE1", Const, 1, ""},
+ {"SYS_KSEM_CLOSE", Const, 14, ""},
+ {"SYS_KSEM_DESTROY", Const, 14, ""},
+ {"SYS_KSEM_GETVALUE", Const, 14, ""},
+ {"SYS_KSEM_INIT", Const, 14, ""},
+ {"SYS_KSEM_OPEN", Const, 14, ""},
+ {"SYS_KSEM_POST", Const, 14, ""},
+ {"SYS_KSEM_TIMEDWAIT", Const, 14, ""},
+ {"SYS_KSEM_TRYWAIT", Const, 14, ""},
+ {"SYS_KSEM_UNLINK", Const, 14, ""},
+ {"SYS_KSEM_WAIT", Const, 14, ""},
+ {"SYS_KTIMER_CREATE", Const, 0, ""},
+ {"SYS_KTIMER_DELETE", Const, 0, ""},
+ {"SYS_KTIMER_GETOVERRUN", Const, 0, ""},
+ {"SYS_KTIMER_GETTIME", Const, 0, ""},
+ {"SYS_KTIMER_SETTIME", Const, 0, ""},
+ {"SYS_KTRACE", Const, 0, ""},
+ {"SYS_LCHFLAGS", Const, 0, ""},
+ {"SYS_LCHMOD", Const, 0, ""},
+ {"SYS_LCHOWN", Const, 0, ""},
+ {"SYS_LCHOWN32", Const, 0, ""},
+ {"SYS_LEDGER", Const, 16, ""},
+ {"SYS_LGETFH", Const, 0, ""},
+ {"SYS_LGETXATTR", Const, 0, ""},
+ {"SYS_LINK", Const, 0, ""},
+ {"SYS_LINKAT", Const, 0, ""},
+ {"SYS_LIO_LISTIO", Const, 0, ""},
+ {"SYS_LISTEN", Const, 0, ""},
+ {"SYS_LISTXATTR", Const, 0, ""},
+ {"SYS_LLISTXATTR", Const, 0, ""},
+ {"SYS_LOCK", Const, 0, ""},
+ {"SYS_LOOKUP_DCOOKIE", Const, 0, ""},
+ {"SYS_LPATHCONF", Const, 0, ""},
+ {"SYS_LREMOVEXATTR", Const, 0, ""},
+ {"SYS_LSEEK", Const, 0, ""},
+ {"SYS_LSETXATTR", Const, 0, ""},
+ {"SYS_LSTAT", Const, 0, ""},
+ {"SYS_LSTAT64", Const, 0, ""},
+ {"SYS_LSTAT64_EXTENDED", Const, 0, ""},
+ {"SYS_LSTATV", Const, 0, ""},
+ {"SYS_LSTAT_EXTENDED", Const, 0, ""},
+ {"SYS_LUTIMES", Const, 0, ""},
+ {"SYS_MAC_SYSCALL", Const, 0, ""},
+ {"SYS_MADVISE", Const, 0, ""},
+ {"SYS_MADVISE1", Const, 0, ""},
+ {"SYS_MAXSYSCALL", Const, 0, ""},
+ {"SYS_MBIND", Const, 0, ""},
+ {"SYS_MIGRATE_PAGES", Const, 0, ""},
+ {"SYS_MINCORE", Const, 0, ""},
+ {"SYS_MINHERIT", Const, 0, ""},
+ {"SYS_MKCOMPLEX", Const, 0, ""},
+ {"SYS_MKDIR", Const, 0, ""},
+ {"SYS_MKDIRAT", Const, 0, ""},
+ {"SYS_MKDIR_EXTENDED", Const, 0, ""},
+ {"SYS_MKFIFO", Const, 0, ""},
+ {"SYS_MKFIFOAT", Const, 0, ""},
+ {"SYS_MKFIFO_EXTENDED", Const, 0, ""},
+ {"SYS_MKNOD", Const, 0, ""},
+ {"SYS_MKNODAT", Const, 0, ""},
+ {"SYS_MLOCK", Const, 0, ""},
+ {"SYS_MLOCKALL", Const, 0, ""},
+ {"SYS_MMAP", Const, 0, ""},
+ {"SYS_MMAP2", Const, 0, ""},
+ {"SYS_MODCTL", Const, 1, ""},
+ {"SYS_MODFIND", Const, 0, ""},
+ {"SYS_MODFNEXT", Const, 0, ""},
+ {"SYS_MODIFY_LDT", Const, 0, ""},
+ {"SYS_MODNEXT", Const, 0, ""},
+ {"SYS_MODSTAT", Const, 0, ""},
+ {"SYS_MODWATCH", Const, 0, ""},
+ {"SYS_MOUNT", Const, 0, ""},
+ {"SYS_MOVE_PAGES", Const, 0, ""},
+ {"SYS_MPROTECT", Const, 0, ""},
+ {"SYS_MPX", Const, 0, ""},
+ {"SYS_MQUERY", Const, 1, ""},
+ {"SYS_MQ_GETSETATTR", Const, 0, ""},
+ {"SYS_MQ_NOTIFY", Const, 0, ""},
+ {"SYS_MQ_OPEN", Const, 0, ""},
+ {"SYS_MQ_TIMEDRECEIVE", Const, 0, ""},
+ {"SYS_MQ_TIMEDSEND", Const, 0, ""},
+ {"SYS_MQ_UNLINK", Const, 0, ""},
+ {"SYS_MREMAP", Const, 0, ""},
+ {"SYS_MSGCTL", Const, 0, ""},
+ {"SYS_MSGGET", Const, 0, ""},
+ {"SYS_MSGRCV", Const, 0, ""},
+ {"SYS_MSGRCV_NOCANCEL", Const, 0, ""},
+ {"SYS_MSGSND", Const, 0, ""},
+ {"SYS_MSGSND_NOCANCEL", Const, 0, ""},
+ {"SYS_MSGSYS", Const, 0, ""},
+ {"SYS_MSYNC", Const, 0, ""},
+ {"SYS_MSYNC_NOCANCEL", Const, 0, ""},
+ {"SYS_MUNLOCK", Const, 0, ""},
+ {"SYS_MUNLOCKALL", Const, 0, ""},
+ {"SYS_MUNMAP", Const, 0, ""},
+ {"SYS_NAME_TO_HANDLE_AT", Const, 0, ""},
+ {"SYS_NANOSLEEP", Const, 0, ""},
+ {"SYS_NEWFSTATAT", Const, 0, ""},
+ {"SYS_NFSCLNT", Const, 0, ""},
+ {"SYS_NFSSERVCTL", Const, 0, ""},
+ {"SYS_NFSSVC", Const, 0, ""},
+ {"SYS_NFSTAT", Const, 0, ""},
+ {"SYS_NICE", Const, 0, ""},
+ {"SYS_NLM_SYSCALL", Const, 14, ""},
+ {"SYS_NLSTAT", Const, 0, ""},
+ {"SYS_NMOUNT", Const, 0, ""},
+ {"SYS_NSTAT", Const, 0, ""},
+ {"SYS_NTP_ADJTIME", Const, 0, ""},
+ {"SYS_NTP_GETTIME", Const, 0, ""},
+ {"SYS_NUMA_GETAFFINITY", Const, 14, ""},
+ {"SYS_NUMA_SETAFFINITY", Const, 14, ""},
+ {"SYS_OABI_SYSCALL_BASE", Const, 0, ""},
+ {"SYS_OBREAK", Const, 0, ""},
+ {"SYS_OLDFSTAT", Const, 0, ""},
+ {"SYS_OLDLSTAT", Const, 0, ""},
+ {"SYS_OLDOLDUNAME", Const, 0, ""},
+ {"SYS_OLDSTAT", Const, 0, ""},
+ {"SYS_OLDUNAME", Const, 0, ""},
+ {"SYS_OPEN", Const, 0, ""},
+ {"SYS_OPENAT", Const, 0, ""},
+ {"SYS_OPENBSD_POLL", Const, 0, ""},
+ {"SYS_OPEN_BY_HANDLE_AT", Const, 0, ""},
+ {"SYS_OPEN_DPROTECTED_NP", Const, 16, ""},
+ {"SYS_OPEN_EXTENDED", Const, 0, ""},
+ {"SYS_OPEN_NOCANCEL", Const, 0, ""},
+ {"SYS_OVADVISE", Const, 0, ""},
+ {"SYS_PACCEPT", Const, 1, ""},
+ {"SYS_PATHCONF", Const, 0, ""},
+ {"SYS_PAUSE", Const, 0, ""},
+ {"SYS_PCICONFIG_IOBASE", Const, 0, ""},
+ {"SYS_PCICONFIG_READ", Const, 0, ""},
+ {"SYS_PCICONFIG_WRITE", Const, 0, ""},
+ {"SYS_PDFORK", Const, 0, ""},
+ {"SYS_PDGETPID", Const, 0, ""},
+ {"SYS_PDKILL", Const, 0, ""},
+ {"SYS_PERF_EVENT_OPEN", Const, 0, ""},
+ {"SYS_PERSONALITY", Const, 0, ""},
+ {"SYS_PID_HIBERNATE", Const, 0, ""},
+ {"SYS_PID_RESUME", Const, 0, ""},
+ {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0, ""},
+ {"SYS_PID_SUSPEND", Const, 0, ""},
+ {"SYS_PIPE", Const, 0, ""},
+ {"SYS_PIPE2", Const, 0, ""},
+ {"SYS_PIVOT_ROOT", Const, 0, ""},
+ {"SYS_PMC_CONTROL", Const, 1, ""},
+ {"SYS_PMC_GET_INFO", Const, 1, ""},
+ {"SYS_POLL", Const, 0, ""},
+ {"SYS_POLLTS", Const, 1, ""},
+ {"SYS_POLL_NOCANCEL", Const, 0, ""},
+ {"SYS_POSIX_FADVISE", Const, 0, ""},
+ {"SYS_POSIX_FALLOCATE", Const, 0, ""},
+ {"SYS_POSIX_OPENPT", Const, 0, ""},
+ {"SYS_POSIX_SPAWN", Const, 0, ""},
+ {"SYS_PPOLL", Const, 0, ""},
+ {"SYS_PRCTL", Const, 0, ""},
+ {"SYS_PREAD", Const, 0, ""},
+ {"SYS_PREAD64", Const, 0, ""},
+ {"SYS_PREADV", Const, 0, ""},
+ {"SYS_PREAD_NOCANCEL", Const, 0, ""},
+ {"SYS_PRLIMIT64", Const, 0, ""},
+ {"SYS_PROCCTL", Const, 3, ""},
+ {"SYS_PROCESS_POLICY", Const, 0, ""},
+ {"SYS_PROCESS_VM_READV", Const, 0, ""},
+ {"SYS_PROCESS_VM_WRITEV", Const, 0, ""},
+ {"SYS_PROC_INFO", Const, 0, ""},
+ {"SYS_PROF", Const, 0, ""},
+ {"SYS_PROFIL", Const, 0, ""},
+ {"SYS_PSELECT", Const, 0, ""},
+ {"SYS_PSELECT6", Const, 0, ""},
+ {"SYS_PSET_ASSIGN", Const, 1, ""},
+ {"SYS_PSET_CREATE", Const, 1, ""},
+ {"SYS_PSET_DESTROY", Const, 1, ""},
+ {"SYS_PSYNCH_CVBROAD", Const, 0, ""},
+ {"SYS_PSYNCH_CVCLRPREPOST", Const, 0, ""},
+ {"SYS_PSYNCH_CVSIGNAL", Const, 0, ""},
+ {"SYS_PSYNCH_CVWAIT", Const, 0, ""},
+ {"SYS_PSYNCH_MUTEXDROP", Const, 0, ""},
+ {"SYS_PSYNCH_MUTEXWAIT", Const, 0, ""},
+ {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0, ""},
+ {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_RDLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_UNLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_UNLOCK2", Const, 0, ""},
+ {"SYS_PSYNCH_RW_UPGRADE", Const, 0, ""},
+ {"SYS_PSYNCH_RW_WRLOCK", Const, 0, ""},
+ {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0, ""},
+ {"SYS_PTRACE", Const, 0, ""},
+ {"SYS_PUTPMSG", Const, 0, ""},
+ {"SYS_PWRITE", Const, 0, ""},
+ {"SYS_PWRITE64", Const, 0, ""},
+ {"SYS_PWRITEV", Const, 0, ""},
+ {"SYS_PWRITE_NOCANCEL", Const, 0, ""},
+ {"SYS_QUERY_MODULE", Const, 0, ""},
+ {"SYS_QUOTACTL", Const, 0, ""},
+ {"SYS_RASCTL", Const, 1, ""},
+ {"SYS_RCTL_ADD_RULE", Const, 0, ""},
+ {"SYS_RCTL_GET_LIMITS", Const, 0, ""},
+ {"SYS_RCTL_GET_RACCT", Const, 0, ""},
+ {"SYS_RCTL_GET_RULES", Const, 0, ""},
+ {"SYS_RCTL_REMOVE_RULE", Const, 0, ""},
+ {"SYS_READ", Const, 0, ""},
+ {"SYS_READAHEAD", Const, 0, ""},
+ {"SYS_READDIR", Const, 0, ""},
+ {"SYS_READLINK", Const, 0, ""},
+ {"SYS_READLINKAT", Const, 0, ""},
+ {"SYS_READV", Const, 0, ""},
+ {"SYS_READV_NOCANCEL", Const, 0, ""},
+ {"SYS_READ_NOCANCEL", Const, 0, ""},
+ {"SYS_REBOOT", Const, 0, ""},
+ {"SYS_RECV", Const, 0, ""},
+ {"SYS_RECVFROM", Const, 0, ""},
+ {"SYS_RECVFROM_NOCANCEL", Const, 0, ""},
+ {"SYS_RECVMMSG", Const, 0, ""},
+ {"SYS_RECVMSG", Const, 0, ""},
+ {"SYS_RECVMSG_NOCANCEL", Const, 0, ""},
+ {"SYS_REMAP_FILE_PAGES", Const, 0, ""},
+ {"SYS_REMOVEXATTR", Const, 0, ""},
+ {"SYS_RENAME", Const, 0, ""},
+ {"SYS_RENAMEAT", Const, 0, ""},
+ {"SYS_REQUEST_KEY", Const, 0, ""},
+ {"SYS_RESTART_SYSCALL", Const, 0, ""},
+ {"SYS_REVOKE", Const, 0, ""},
+ {"SYS_RFORK", Const, 0, ""},
+ {"SYS_RMDIR", Const, 0, ""},
+ {"SYS_RTPRIO", Const, 0, ""},
+ {"SYS_RTPRIO_THREAD", Const, 0, ""},
+ {"SYS_RT_SIGACTION", Const, 0, ""},
+ {"SYS_RT_SIGPENDING", Const, 0, ""},
+ {"SYS_RT_SIGPROCMASK", Const, 0, ""},
+ {"SYS_RT_SIGQUEUEINFO", Const, 0, ""},
+ {"SYS_RT_SIGRETURN", Const, 0, ""},
+ {"SYS_RT_SIGSUSPEND", Const, 0, ""},
+ {"SYS_RT_SIGTIMEDWAIT", Const, 0, ""},
+ {"SYS_RT_TGSIGQUEUEINFO", Const, 0, ""},
+ {"SYS_SBRK", Const, 0, ""},
+ {"SYS_SCHED_GETAFFINITY", Const, 0, ""},
+ {"SYS_SCHED_GETPARAM", Const, 0, ""},
+ {"SYS_SCHED_GETSCHEDULER", Const, 0, ""},
+ {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0, ""},
+ {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0, ""},
+ {"SYS_SCHED_RR_GET_INTERVAL", Const, 0, ""},
+ {"SYS_SCHED_SETAFFINITY", Const, 0, ""},
+ {"SYS_SCHED_SETPARAM", Const, 0, ""},
+ {"SYS_SCHED_SETSCHEDULER", Const, 0, ""},
+ {"SYS_SCHED_YIELD", Const, 0, ""},
+ {"SYS_SCTP_GENERIC_RECVMSG", Const, 0, ""},
+ {"SYS_SCTP_GENERIC_SENDMSG", Const, 0, ""},
+ {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0, ""},
+ {"SYS_SCTP_PEELOFF", Const, 0, ""},
+ {"SYS_SEARCHFS", Const, 0, ""},
+ {"SYS_SECURITY", Const, 0, ""},
+ {"SYS_SELECT", Const, 0, ""},
+ {"SYS_SELECT_NOCANCEL", Const, 0, ""},
+ {"SYS_SEMCONFIG", Const, 1, ""},
+ {"SYS_SEMCTL", Const, 0, ""},
+ {"SYS_SEMGET", Const, 0, ""},
+ {"SYS_SEMOP", Const, 0, ""},
+ {"SYS_SEMSYS", Const, 0, ""},
+ {"SYS_SEMTIMEDOP", Const, 0, ""},
+ {"SYS_SEM_CLOSE", Const, 0, ""},
+ {"SYS_SEM_DESTROY", Const, 0, ""},
+ {"SYS_SEM_GETVALUE", Const, 0, ""},
+ {"SYS_SEM_INIT", Const, 0, ""},
+ {"SYS_SEM_OPEN", Const, 0, ""},
+ {"SYS_SEM_POST", Const, 0, ""},
+ {"SYS_SEM_TRYWAIT", Const, 0, ""},
+ {"SYS_SEM_UNLINK", Const, 0, ""},
+ {"SYS_SEM_WAIT", Const, 0, ""},
+ {"SYS_SEM_WAIT_NOCANCEL", Const, 0, ""},
+ {"SYS_SEND", Const, 0, ""},
+ {"SYS_SENDFILE", Const, 0, ""},
+ {"SYS_SENDFILE64", Const, 0, ""},
+ {"SYS_SENDMMSG", Const, 0, ""},
+ {"SYS_SENDMSG", Const, 0, ""},
+ {"SYS_SENDMSG_NOCANCEL", Const, 0, ""},
+ {"SYS_SENDTO", Const, 0, ""},
+ {"SYS_SENDTO_NOCANCEL", Const, 0, ""},
+ {"SYS_SETATTRLIST", Const, 0, ""},
+ {"SYS_SETAUDIT", Const, 0, ""},
+ {"SYS_SETAUDIT_ADDR", Const, 0, ""},
+ {"SYS_SETAUID", Const, 0, ""},
+ {"SYS_SETCONTEXT", Const, 0, ""},
+ {"SYS_SETDOMAINNAME", Const, 0, ""},
+ {"SYS_SETEGID", Const, 0, ""},
+ {"SYS_SETEUID", Const, 0, ""},
+ {"SYS_SETFIB", Const, 0, ""},
+ {"SYS_SETFSGID", Const, 0, ""},
+ {"SYS_SETFSGID32", Const, 0, ""},
+ {"SYS_SETFSUID", Const, 0, ""},
+ {"SYS_SETFSUID32", Const, 0, ""},
+ {"SYS_SETGID", Const, 0, ""},
+ {"SYS_SETGID32", Const, 0, ""},
+ {"SYS_SETGROUPS", Const, 0, ""},
+ {"SYS_SETGROUPS32", Const, 0, ""},
+ {"SYS_SETHOSTNAME", Const, 0, ""},
+ {"SYS_SETITIMER", Const, 0, ""},
+ {"SYS_SETLCID", Const, 0, ""},
+ {"SYS_SETLOGIN", Const, 0, ""},
+ {"SYS_SETLOGINCLASS", Const, 0, ""},
+ {"SYS_SETNS", Const, 0, ""},
+ {"SYS_SETPGID", Const, 0, ""},
+ {"SYS_SETPRIORITY", Const, 0, ""},
+ {"SYS_SETPRIVEXEC", Const, 0, ""},
+ {"SYS_SETREGID", Const, 0, ""},
+ {"SYS_SETREGID32", Const, 0, ""},
+ {"SYS_SETRESGID", Const, 0, ""},
+ {"SYS_SETRESGID32", Const, 0, ""},
+ {"SYS_SETRESUID", Const, 0, ""},
+ {"SYS_SETRESUID32", Const, 0, ""},
+ {"SYS_SETREUID", Const, 0, ""},
+ {"SYS_SETREUID32", Const, 0, ""},
+ {"SYS_SETRLIMIT", Const, 0, ""},
+ {"SYS_SETRTABLE", Const, 1, ""},
+ {"SYS_SETSGROUPS", Const, 0, ""},
+ {"SYS_SETSID", Const, 0, ""},
+ {"SYS_SETSOCKOPT", Const, 0, ""},
+ {"SYS_SETTID", Const, 0, ""},
+ {"SYS_SETTID_WITH_PID", Const, 0, ""},
+ {"SYS_SETTIMEOFDAY", Const, 0, ""},
+ {"SYS_SETUID", Const, 0, ""},
+ {"SYS_SETUID32", Const, 0, ""},
+ {"SYS_SETWGROUPS", Const, 0, ""},
+ {"SYS_SETXATTR", Const, 0, ""},
+ {"SYS_SET_MEMPOLICY", Const, 0, ""},
+ {"SYS_SET_ROBUST_LIST", Const, 0, ""},
+ {"SYS_SET_THREAD_AREA", Const, 0, ""},
+ {"SYS_SET_TID_ADDRESS", Const, 0, ""},
+ {"SYS_SGETMASK", Const, 0, ""},
+ {"SYS_SHARED_REGION_CHECK_NP", Const, 0, ""},
+ {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0, ""},
+ {"SYS_SHMAT", Const, 0, ""},
+ {"SYS_SHMCTL", Const, 0, ""},
+ {"SYS_SHMDT", Const, 0, ""},
+ {"SYS_SHMGET", Const, 0, ""},
+ {"SYS_SHMSYS", Const, 0, ""},
+ {"SYS_SHM_OPEN", Const, 0, ""},
+ {"SYS_SHM_UNLINK", Const, 0, ""},
+ {"SYS_SHUTDOWN", Const, 0, ""},
+ {"SYS_SIGACTION", Const, 0, ""},
+ {"SYS_SIGALTSTACK", Const, 0, ""},
+ {"SYS_SIGNAL", Const, 0, ""},
+ {"SYS_SIGNALFD", Const, 0, ""},
+ {"SYS_SIGNALFD4", Const, 0, ""},
+ {"SYS_SIGPENDING", Const, 0, ""},
+ {"SYS_SIGPROCMASK", Const, 0, ""},
+ {"SYS_SIGQUEUE", Const, 0, ""},
+ {"SYS_SIGQUEUEINFO", Const, 1, ""},
+ {"SYS_SIGRETURN", Const, 0, ""},
+ {"SYS_SIGSUSPEND", Const, 0, ""},
+ {"SYS_SIGSUSPEND_NOCANCEL", Const, 0, ""},
+ {"SYS_SIGTIMEDWAIT", Const, 0, ""},
+ {"SYS_SIGWAIT", Const, 0, ""},
+ {"SYS_SIGWAITINFO", Const, 0, ""},
+ {"SYS_SOCKET", Const, 0, ""},
+ {"SYS_SOCKETCALL", Const, 0, ""},
+ {"SYS_SOCKETPAIR", Const, 0, ""},
+ {"SYS_SPLICE", Const, 0, ""},
+ {"SYS_SSETMASK", Const, 0, ""},
+ {"SYS_SSTK", Const, 0, ""},
+ {"SYS_STACK_SNAPSHOT", Const, 0, ""},
+ {"SYS_STAT", Const, 0, ""},
+ {"SYS_STAT64", Const, 0, ""},
+ {"SYS_STAT64_EXTENDED", Const, 0, ""},
+ {"SYS_STATFS", Const, 0, ""},
+ {"SYS_STATFS64", Const, 0, ""},
+ {"SYS_STATV", Const, 0, ""},
+ {"SYS_STATVFS1", Const, 1, ""},
+ {"SYS_STAT_EXTENDED", Const, 0, ""},
+ {"SYS_STIME", Const, 0, ""},
+ {"SYS_STTY", Const, 0, ""},
+ {"SYS_SWAPCONTEXT", Const, 0, ""},
+ {"SYS_SWAPCTL", Const, 1, ""},
+ {"SYS_SWAPOFF", Const, 0, ""},
+ {"SYS_SWAPON", Const, 0, ""},
+ {"SYS_SYMLINK", Const, 0, ""},
+ {"SYS_SYMLINKAT", Const, 0, ""},
+ {"SYS_SYNC", Const, 0, ""},
+ {"SYS_SYNCFS", Const, 0, ""},
+ {"SYS_SYNC_FILE_RANGE", Const, 0, ""},
+ {"SYS_SYSARCH", Const, 0, ""},
+ {"SYS_SYSCALL", Const, 0, ""},
+ {"SYS_SYSCALL_BASE", Const, 0, ""},
+ {"SYS_SYSFS", Const, 0, ""},
+ {"SYS_SYSINFO", Const, 0, ""},
+ {"SYS_SYSLOG", Const, 0, ""},
+ {"SYS_TEE", Const, 0, ""},
+ {"SYS_TGKILL", Const, 0, ""},
+ {"SYS_THREAD_SELFID", Const, 0, ""},
+ {"SYS_THR_CREATE", Const, 0, ""},
+ {"SYS_THR_EXIT", Const, 0, ""},
+ {"SYS_THR_KILL", Const, 0, ""},
+ {"SYS_THR_KILL2", Const, 0, ""},
+ {"SYS_THR_NEW", Const, 0, ""},
+ {"SYS_THR_SELF", Const, 0, ""},
+ {"SYS_THR_SET_NAME", Const, 0, ""},
+ {"SYS_THR_SUSPEND", Const, 0, ""},
+ {"SYS_THR_WAKE", Const, 0, ""},
+ {"SYS_TIME", Const, 0, ""},
+ {"SYS_TIMERFD_CREATE", Const, 0, ""},
+ {"SYS_TIMERFD_GETTIME", Const, 0, ""},
+ {"SYS_TIMERFD_SETTIME", Const, 0, ""},
+ {"SYS_TIMER_CREATE", Const, 0, ""},
+ {"SYS_TIMER_DELETE", Const, 0, ""},
+ {"SYS_TIMER_GETOVERRUN", Const, 0, ""},
+ {"SYS_TIMER_GETTIME", Const, 0, ""},
+ {"SYS_TIMER_SETTIME", Const, 0, ""},
+ {"SYS_TIMES", Const, 0, ""},
+ {"SYS_TKILL", Const, 0, ""},
+ {"SYS_TRUNCATE", Const, 0, ""},
+ {"SYS_TRUNCATE64", Const, 0, ""},
+ {"SYS_TUXCALL", Const, 0, ""},
+ {"SYS_UGETRLIMIT", Const, 0, ""},
+ {"SYS_ULIMIT", Const, 0, ""},
+ {"SYS_UMASK", Const, 0, ""},
+ {"SYS_UMASK_EXTENDED", Const, 0, ""},
+ {"SYS_UMOUNT", Const, 0, ""},
+ {"SYS_UMOUNT2", Const, 0, ""},
+ {"SYS_UNAME", Const, 0, ""},
+ {"SYS_UNDELETE", Const, 0, ""},
+ {"SYS_UNLINK", Const, 0, ""},
+ {"SYS_UNLINKAT", Const, 0, ""},
+ {"SYS_UNMOUNT", Const, 0, ""},
+ {"SYS_UNSHARE", Const, 0, ""},
+ {"SYS_USELIB", Const, 0, ""},
+ {"SYS_USTAT", Const, 0, ""},
+ {"SYS_UTIME", Const, 0, ""},
+ {"SYS_UTIMENSAT", Const, 0, ""},
+ {"SYS_UTIMES", Const, 0, ""},
+ {"SYS_UTRACE", Const, 0, ""},
+ {"SYS_UUIDGEN", Const, 0, ""},
+ {"SYS_VADVISE", Const, 1, ""},
+ {"SYS_VFORK", Const, 0, ""},
+ {"SYS_VHANGUP", Const, 0, ""},
+ {"SYS_VM86", Const, 0, ""},
+ {"SYS_VM86OLD", Const, 0, ""},
+ {"SYS_VMSPLICE", Const, 0, ""},
+ {"SYS_VM_PRESSURE_MONITOR", Const, 0, ""},
+ {"SYS_VSERVER", Const, 0, ""},
+ {"SYS_WAIT4", Const, 0, ""},
+ {"SYS_WAIT4_NOCANCEL", Const, 0, ""},
+ {"SYS_WAIT6", Const, 1, ""},
+ {"SYS_WAITEVENT", Const, 0, ""},
+ {"SYS_WAITID", Const, 0, ""},
+ {"SYS_WAITID_NOCANCEL", Const, 0, ""},
+ {"SYS_WAITPID", Const, 0, ""},
+ {"SYS_WATCHEVENT", Const, 0, ""},
+ {"SYS_WORKQ_KERNRETURN", Const, 0, ""},
+ {"SYS_WORKQ_OPEN", Const, 0, ""},
+ {"SYS_WRITE", Const, 0, ""},
+ {"SYS_WRITEV", Const, 0, ""},
+ {"SYS_WRITEV_NOCANCEL", Const, 0, ""},
+ {"SYS_WRITE_NOCANCEL", Const, 0, ""},
+ {"SYS_YIELD", Const, 0, ""},
+ {"SYS__LLSEEK", Const, 0, ""},
+ {"SYS__LWP_CONTINUE", Const, 1, ""},
+ {"SYS__LWP_CREATE", Const, 1, ""},
+ {"SYS__LWP_CTL", Const, 1, ""},
+ {"SYS__LWP_DETACH", Const, 1, ""},
+ {"SYS__LWP_EXIT", Const, 1, ""},
+ {"SYS__LWP_GETNAME", Const, 1, ""},
+ {"SYS__LWP_GETPRIVATE", Const, 1, ""},
+ {"SYS__LWP_KILL", Const, 1, ""},
+ {"SYS__LWP_PARK", Const, 1, ""},
+ {"SYS__LWP_SELF", Const, 1, ""},
+ {"SYS__LWP_SETNAME", Const, 1, ""},
+ {"SYS__LWP_SETPRIVATE", Const, 1, ""},
+ {"SYS__LWP_SUSPEND", Const, 1, ""},
+ {"SYS__LWP_UNPARK", Const, 1, ""},
+ {"SYS__LWP_UNPARK_ALL", Const, 1, ""},
+ {"SYS__LWP_WAIT", Const, 1, ""},
+ {"SYS__LWP_WAKEUP", Const, 1, ""},
+ {"SYS__NEWSELECT", Const, 0, ""},
+ {"SYS__PSET_BIND", Const, 1, ""},
+ {"SYS__SCHED_GETAFFINITY", Const, 1, ""},
+ {"SYS__SCHED_GETPARAM", Const, 1, ""},
+ {"SYS__SCHED_SETAFFINITY", Const, 1, ""},
+ {"SYS__SCHED_SETPARAM", Const, 1, ""},
+ {"SYS__SYSCTL", Const, 0, ""},
+ {"SYS__UMTX_LOCK", Const, 0, ""},
+ {"SYS__UMTX_OP", Const, 0, ""},
+ {"SYS__UMTX_UNLOCK", Const, 0, ""},
+ {"SYS___ACL_ACLCHECK_FD", Const, 0, ""},
+ {"SYS___ACL_ACLCHECK_FILE", Const, 0, ""},
+ {"SYS___ACL_ACLCHECK_LINK", Const, 0, ""},
+ {"SYS___ACL_DELETE_FD", Const, 0, ""},
+ {"SYS___ACL_DELETE_FILE", Const, 0, ""},
+ {"SYS___ACL_DELETE_LINK", Const, 0, ""},
+ {"SYS___ACL_GET_FD", Const, 0, ""},
+ {"SYS___ACL_GET_FILE", Const, 0, ""},
+ {"SYS___ACL_GET_LINK", Const, 0, ""},
+ {"SYS___ACL_SET_FD", Const, 0, ""},
+ {"SYS___ACL_SET_FILE", Const, 0, ""},
+ {"SYS___ACL_SET_LINK", Const, 0, ""},
+ {"SYS___CAP_RIGHTS_GET", Const, 14, ""},
+ {"SYS___CLONE", Const, 1, ""},
+ {"SYS___DISABLE_THREADSIGNAL", Const, 0, ""},
+ {"SYS___GETCWD", Const, 0, ""},
+ {"SYS___GETLOGIN", Const, 1, ""},
+ {"SYS___GET_TCB", Const, 1, ""},
+ {"SYS___MAC_EXECVE", Const, 0, ""},
+ {"SYS___MAC_GETFSSTAT", Const, 0, ""},
+ {"SYS___MAC_GET_FD", Const, 0, ""},
+ {"SYS___MAC_GET_FILE", Const, 0, ""},
+ {"SYS___MAC_GET_LCID", Const, 0, ""},
+ {"SYS___MAC_GET_LCTX", Const, 0, ""},
+ {"SYS___MAC_GET_LINK", Const, 0, ""},
+ {"SYS___MAC_GET_MOUNT", Const, 0, ""},
+ {"SYS___MAC_GET_PID", Const, 0, ""},
+ {"SYS___MAC_GET_PROC", Const, 0, ""},
+ {"SYS___MAC_MOUNT", Const, 0, ""},
+ {"SYS___MAC_SET_FD", Const, 0, ""},
+ {"SYS___MAC_SET_FILE", Const, 0, ""},
+ {"SYS___MAC_SET_LCTX", Const, 0, ""},
+ {"SYS___MAC_SET_LINK", Const, 0, ""},
+ {"SYS___MAC_SET_PROC", Const, 0, ""},
+ {"SYS___MAC_SYSCALL", Const, 0, ""},
+ {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0, ""},
+ {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
+ {"SYS___POSIX_CHOWN", Const, 1, ""},
+ {"SYS___POSIX_FCHOWN", Const, 1, ""},
+ {"SYS___POSIX_LCHOWN", Const, 1, ""},
+ {"SYS___POSIX_RENAME", Const, 1, ""},
+ {"SYS___PTHREAD_CANCELED", Const, 0, ""},
+ {"SYS___PTHREAD_CHDIR", Const, 0, ""},
+ {"SYS___PTHREAD_FCHDIR", Const, 0, ""},
+ {"SYS___PTHREAD_KILL", Const, 0, ""},
+ {"SYS___PTHREAD_MARKCANCEL", Const, 0, ""},
+ {"SYS___PTHREAD_SIGMASK", Const, 0, ""},
+ {"SYS___QUOTACTL", Const, 1, ""},
+ {"SYS___SEMCTL", Const, 1, ""},
+ {"SYS___SEMWAIT_SIGNAL", Const, 0, ""},
+ {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0, ""},
+ {"SYS___SETLOGIN", Const, 1, ""},
+ {"SYS___SETUGID", Const, 0, ""},
+ {"SYS___SET_TCB", Const, 1, ""},
+ {"SYS___SIGACTION_SIGTRAMP", Const, 1, ""},
+ {"SYS___SIGTIMEDWAIT", Const, 1, ""},
+ {"SYS___SIGWAIT", Const, 0, ""},
+ {"SYS___SIGWAIT_NOCANCEL", Const, 0, ""},
+ {"SYS___SYSCTL", Const, 0, ""},
+ {"SYS___TFORK", Const, 1, ""},
+ {"SYS___THREXIT", Const, 1, ""},
+ {"SYS___THRSIGDIVERT", Const, 1, ""},
+ {"SYS___THRSLEEP", Const, 1, ""},
+ {"SYS___THRWAKEUP", Const, 1, ""},
+ {"S_ARCH1", Const, 1, ""},
+ {"S_ARCH2", Const, 1, ""},
+ {"S_BLKSIZE", Const, 0, ""},
+ {"S_IEXEC", Const, 0, ""},
+ {"S_IFBLK", Const, 0, ""},
+ {"S_IFCHR", Const, 0, ""},
+ {"S_IFDIR", Const, 0, ""},
+ {"S_IFIFO", Const, 0, ""},
+ {"S_IFLNK", Const, 0, ""},
+ {"S_IFMT", Const, 0, ""},
+ {"S_IFREG", Const, 0, ""},
+ {"S_IFSOCK", Const, 0, ""},
+ {"S_IFWHT", Const, 0, ""},
+ {"S_IREAD", Const, 0, ""},
+ {"S_IRGRP", Const, 0, ""},
+ {"S_IROTH", Const, 0, ""},
+ {"S_IRUSR", Const, 0, ""},
+ {"S_IRWXG", Const, 0, ""},
+ {"S_IRWXO", Const, 0, ""},
+ {"S_IRWXU", Const, 0, ""},
+ {"S_ISGID", Const, 0, ""},
+ {"S_ISTXT", Const, 0, ""},
+ {"S_ISUID", Const, 0, ""},
+ {"S_ISVTX", Const, 0, ""},
+ {"S_IWGRP", Const, 0, ""},
+ {"S_IWOTH", Const, 0, ""},
+ {"S_IWRITE", Const, 0, ""},
+ {"S_IWUSR", Const, 0, ""},
+ {"S_IXGRP", Const, 0, ""},
+ {"S_IXOTH", Const, 0, ""},
+ {"S_IXUSR", Const, 0, ""},
+ {"S_LOGIN_SET", Const, 1, ""},
+ {"SecurityAttributes", Type, 0, ""},
+ {"SecurityAttributes.InheritHandle", Field, 0, ""},
+ {"SecurityAttributes.Length", Field, 0, ""},
+ {"SecurityAttributes.SecurityDescriptor", Field, 0, ""},
+ {"Seek", Func, 0, "func(fd int, offset int64, whence int) (off int64, err error)"},
+ {"Select", Func, 0, "func(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)"},
+ {"Sendfile", Func, 0, "func(outfd int, infd int, offset *int64, count int) (written int, err error)"},
+ {"Sendmsg", Func, 0, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (err error)"},
+ {"SendmsgN", Func, 3, "func(fd int, p []byte, oob []byte, to Sockaddr, flags int) (n int, err error)"},
+ {"Sendto", Func, 0, "func(fd int, p []byte, flags int, to Sockaddr) (err error)"},
+ {"Servent", Type, 0, ""},
+ {"Servent.Aliases", Field, 0, ""},
+ {"Servent.Name", Field, 0, ""},
+ {"Servent.Port", Field, 0, ""},
+ {"Servent.Proto", Field, 0, ""},
+ {"SetBpf", Func, 0, ""},
+ {"SetBpfBuflen", Func, 0, ""},
+ {"SetBpfDatalink", Func, 0, ""},
+ {"SetBpfHeadercmpl", Func, 0, ""},
+ {"SetBpfImmediate", Func, 0, ""},
+ {"SetBpfInterface", Func, 0, ""},
+ {"SetBpfPromisc", Func, 0, ""},
+ {"SetBpfTimeout", Func, 0, ""},
+ {"SetCurrentDirectory", Func, 0, ""},
+ {"SetEndOfFile", Func, 0, ""},
+ {"SetEnvironmentVariable", Func, 0, ""},
+ {"SetFileAttributes", Func, 0, ""},
+ {"SetFileCompletionNotificationModes", Func, 2, ""},
+ {"SetFilePointer", Func, 0, ""},
+ {"SetFileTime", Func, 0, ""},
+ {"SetHandleInformation", Func, 0, ""},
+ {"SetKevent", Func, 0, ""},
+ {"SetLsfPromisc", Func, 0, "func(name string, m bool) error"},
+ {"SetNonblock", Func, 0, "func(fd int, nonblocking bool) (err error)"},
+ {"Setdomainname", Func, 0, "func(p []byte) (err error)"},
+ {"Setegid", Func, 0, "func(egid int) (err error)"},
+ {"Setenv", Func, 0, "func(key string, value string) error"},
+ {"Seteuid", Func, 0, "func(euid int) (err error)"},
+ {"Setfsgid", Func, 0, "func(gid int) (err error)"},
+ {"Setfsuid", Func, 0, "func(uid int) (err error)"},
+ {"Setgid", Func, 0, "func(gid int) (err error)"},
+ {"Setgroups", Func, 0, "func(gids []int) (err error)"},
+ {"Sethostname", Func, 0, "func(p []byte) (err error)"},
+ {"Setlogin", Func, 0, ""},
+ {"Setpgid", Func, 0, "func(pid int, pgid int) (err error)"},
+ {"Setpriority", Func, 0, "func(which int, who int, prio int) (err error)"},
+ {"Setprivexec", Func, 0, ""},
+ {"Setregid", Func, 0, "func(rgid int, egid int) (err error)"},
+ {"Setresgid", Func, 0, "func(rgid int, egid int, sgid int) (err error)"},
+ {"Setresuid", Func, 0, "func(ruid int, euid int, suid int) (err error)"},
+ {"Setreuid", Func, 0, "func(ruid int, euid int) (err error)"},
+ {"Setrlimit", Func, 0, "func(resource int, rlim *Rlimit) error"},
+ {"Setsid", Func, 0, "func() (pid int, err error)"},
+ {"Setsockopt", Func, 0, ""},
+ {"SetsockoptByte", Func, 0, "func(fd int, level int, opt int, value byte) (err error)"},
+ {"SetsockoptICMPv6Filter", Func, 2, "func(fd int, level int, opt int, filter *ICMPv6Filter) error"},
+ {"SetsockoptIPMreq", Func, 0, "func(fd int, level int, opt int, mreq *IPMreq) (err error)"},
+ {"SetsockoptIPMreqn", Func, 0, "func(fd int, level int, opt int, mreq *IPMreqn) (err error)"},
+ {"SetsockoptIPv6Mreq", Func, 0, "func(fd int, level int, opt int, mreq *IPv6Mreq) (err error)"},
+ {"SetsockoptInet4Addr", Func, 0, "func(fd int, level int, opt int, value [4]byte) (err error)"},
+ {"SetsockoptInt", Func, 0, "func(fd int, level int, opt int, value int) (err error)"},
+ {"SetsockoptLinger", Func, 0, "func(fd int, level int, opt int, l *Linger) (err error)"},
+ {"SetsockoptString", Func, 0, "func(fd int, level int, opt int, s string) (err error)"},
+ {"SetsockoptTimeval", Func, 0, "func(fd int, level int, opt int, tv *Timeval) (err error)"},
+ {"Settimeofday", Func, 0, "func(tv *Timeval) (err error)"},
+ {"Setuid", Func, 0, "func(uid int) (err error)"},
+ {"Setxattr", Func, 1, "func(path string, attr string, data []byte, flags int) (err error)"},
+ {"Shutdown", Func, 0, "func(fd int, how int) (err error)"},
+ {"SidTypeAlias", Const, 0, ""},
+ {"SidTypeComputer", Const, 0, ""},
+ {"SidTypeDeletedAccount", Const, 0, ""},
+ {"SidTypeDomain", Const, 0, ""},
+ {"SidTypeGroup", Const, 0, ""},
+ {"SidTypeInvalid", Const, 0, ""},
+ {"SidTypeLabel", Const, 0, ""},
+ {"SidTypeUnknown", Const, 0, ""},
+ {"SidTypeUser", Const, 0, ""},
+ {"SidTypeWellKnownGroup", Const, 0, ""},
+ {"Signal", Type, 0, ""},
+ {"SizeofBpfHdr", Const, 0, ""},
+ {"SizeofBpfInsn", Const, 0, ""},
+ {"SizeofBpfProgram", Const, 0, ""},
+ {"SizeofBpfStat", Const, 0, ""},
+ {"SizeofBpfVersion", Const, 0, ""},
+ {"SizeofBpfZbuf", Const, 0, ""},
+ {"SizeofBpfZbufHeader", Const, 0, ""},
+ {"SizeofCmsghdr", Const, 0, ""},
+ {"SizeofICMPv6Filter", Const, 2, ""},
+ {"SizeofIPMreq", Const, 0, ""},
+ {"SizeofIPMreqn", Const, 0, ""},
+ {"SizeofIPv6MTUInfo", Const, 2, ""},
+ {"SizeofIPv6Mreq", Const, 0, ""},
+ {"SizeofIfAddrmsg", Const, 0, ""},
+ {"SizeofIfAnnounceMsghdr", Const, 1, ""},
+ {"SizeofIfData", Const, 0, ""},
+ {"SizeofIfInfomsg", Const, 0, ""},
+ {"SizeofIfMsghdr", Const, 0, ""},
+ {"SizeofIfaMsghdr", Const, 0, ""},
+ {"SizeofIfmaMsghdr", Const, 0, ""},
+ {"SizeofIfmaMsghdr2", Const, 0, ""},
+ {"SizeofInet4Pktinfo", Const, 0, ""},
+ {"SizeofInet6Pktinfo", Const, 0, ""},
+ {"SizeofInotifyEvent", Const, 0, ""},
+ {"SizeofLinger", Const, 0, ""},
+ {"SizeofMsghdr", Const, 0, ""},
+ {"SizeofNlAttr", Const, 0, ""},
+ {"SizeofNlMsgerr", Const, 0, ""},
+ {"SizeofNlMsghdr", Const, 0, ""},
+ {"SizeofRtAttr", Const, 0, ""},
+ {"SizeofRtGenmsg", Const, 0, ""},
+ {"SizeofRtMetrics", Const, 0, ""},
+ {"SizeofRtMsg", Const, 0, ""},
+ {"SizeofRtMsghdr", Const, 0, ""},
+ {"SizeofRtNexthop", Const, 0, ""},
+ {"SizeofSockFilter", Const, 0, ""},
+ {"SizeofSockFprog", Const, 0, ""},
+ {"SizeofSockaddrAny", Const, 0, ""},
+ {"SizeofSockaddrDatalink", Const, 0, ""},
+ {"SizeofSockaddrInet4", Const, 0, ""},
+ {"SizeofSockaddrInet6", Const, 0, ""},
+ {"SizeofSockaddrLinklayer", Const, 0, ""},
+ {"SizeofSockaddrNetlink", Const, 0, ""},
+ {"SizeofSockaddrUnix", Const, 0, ""},
+ {"SizeofTCPInfo", Const, 1, ""},
+ {"SizeofUcred", Const, 0, ""},
+ {"SlicePtrFromStrings", Func, 1, "func(ss []string) ([]*byte, error)"},
+ {"SockFilter", Type, 0, ""},
+ {"SockFilter.Code", Field, 0, ""},
+ {"SockFilter.Jf", Field, 0, ""},
+ {"SockFilter.Jt", Field, 0, ""},
+ {"SockFilter.K", Field, 0, ""},
+ {"SockFprog", Type, 0, ""},
+ {"SockFprog.Filter", Field, 0, ""},
+ {"SockFprog.Len", Field, 0, ""},
+ {"SockFprog.Pad_cgo_0", Field, 0, ""},
+ {"Sockaddr", Type, 0, ""},
+ {"SockaddrDatalink", Type, 0, ""},
+ {"SockaddrDatalink.Alen", Field, 0, ""},
+ {"SockaddrDatalink.Data", Field, 0, ""},
+ {"SockaddrDatalink.Family", Field, 0, ""},
+ {"SockaddrDatalink.Index", Field, 0, ""},
+ {"SockaddrDatalink.Len", Field, 0, ""},
+ {"SockaddrDatalink.Nlen", Field, 0, ""},
+ {"SockaddrDatalink.Slen", Field, 0, ""},
+ {"SockaddrDatalink.Type", Field, 0, ""},
+ {"SockaddrGen", Type, 0, ""},
+ {"SockaddrInet4", Type, 0, ""},
+ {"SockaddrInet4.Addr", Field, 0, ""},
+ {"SockaddrInet4.Port", Field, 0, ""},
+ {"SockaddrInet6", Type, 0, ""},
+ {"SockaddrInet6.Addr", Field, 0, ""},
+ {"SockaddrInet6.Port", Field, 0, ""},
+ {"SockaddrInet6.ZoneId", Field, 0, ""},
+ {"SockaddrLinklayer", Type, 0, ""},
+ {"SockaddrLinklayer.Addr", Field, 0, ""},
+ {"SockaddrLinklayer.Halen", Field, 0, ""},
+ {"SockaddrLinklayer.Hatype", Field, 0, ""},
+ {"SockaddrLinklayer.Ifindex", Field, 0, ""},
+ {"SockaddrLinklayer.Pkttype", Field, 0, ""},
+ {"SockaddrLinklayer.Protocol", Field, 0, ""},
+ {"SockaddrNetlink", Type, 0, ""},
+ {"SockaddrNetlink.Family", Field, 0, ""},
+ {"SockaddrNetlink.Groups", Field, 0, ""},
+ {"SockaddrNetlink.Pad", Field, 0, ""},
+ {"SockaddrNetlink.Pid", Field, 0, ""},
+ {"SockaddrUnix", Type, 0, ""},
+ {"SockaddrUnix.Name", Field, 0, ""},
+ {"Socket", Func, 0, "func(domain int, typ int, proto int) (fd int, err error)"},
+ {"SocketControlMessage", Type, 0, ""},
+ {"SocketControlMessage.Data", Field, 0, ""},
+ {"SocketControlMessage.Header", Field, 0, ""},
+ {"SocketDisableIPv6", Var, 0, ""},
+ {"Socketpair", Func, 0, "func(domain int, typ int, proto int) (fd [2]int, err error)"},
+ {"Splice", Func, 0, "func(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)"},
+ {"StartProcess", Func, 0, "func(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error)"},
+ {"StartupInfo", Type, 0, ""},
+ {"StartupInfo.Cb", Field, 0, ""},
+ {"StartupInfo.Desktop", Field, 0, ""},
+ {"StartupInfo.FillAttribute", Field, 0, ""},
+ {"StartupInfo.Flags", Field, 0, ""},
+ {"StartupInfo.ShowWindow", Field, 0, ""},
+ {"StartupInfo.StdErr", Field, 0, ""},
+ {"StartupInfo.StdInput", Field, 0, ""},
+ {"StartupInfo.StdOutput", Field, 0, ""},
+ {"StartupInfo.Title", Field, 0, ""},
+ {"StartupInfo.X", Field, 0, ""},
+ {"StartupInfo.XCountChars", Field, 0, ""},
+ {"StartupInfo.XSize", Field, 0, ""},
+ {"StartupInfo.Y", Field, 0, ""},
+ {"StartupInfo.YCountChars", Field, 0, ""},
+ {"StartupInfo.YSize", Field, 0, ""},
+ {"Stat", Func, 0, "func(path string, stat *Stat_t) (err error)"},
+ {"Stat_t", Type, 0, ""},
+ {"Stat_t.Atim", Field, 0, ""},
+ {"Stat_t.Atim_ext", Field, 12, ""},
+ {"Stat_t.Atimespec", Field, 0, ""},
+ {"Stat_t.Birthtimespec", Field, 0, ""},
+ {"Stat_t.Blksize", Field, 0, ""},
+ {"Stat_t.Blocks", Field, 0, ""},
+ {"Stat_t.Btim_ext", Field, 12, ""},
+ {"Stat_t.Ctim", Field, 0, ""},
+ {"Stat_t.Ctim_ext", Field, 12, ""},
+ {"Stat_t.Ctimespec", Field, 0, ""},
+ {"Stat_t.Dev", Field, 0, ""},
+ {"Stat_t.Flags", Field, 0, ""},
+ {"Stat_t.Gen", Field, 0, ""},
+ {"Stat_t.Gid", Field, 0, ""},
+ {"Stat_t.Ino", Field, 0, ""},
+ {"Stat_t.Lspare", Field, 0, ""},
+ {"Stat_t.Lspare0", Field, 2, ""},
+ {"Stat_t.Lspare1", Field, 2, ""},
+ {"Stat_t.Mode", Field, 0, ""},
+ {"Stat_t.Mtim", Field, 0, ""},
+ {"Stat_t.Mtim_ext", Field, 12, ""},
+ {"Stat_t.Mtimespec", Field, 0, ""},
+ {"Stat_t.Nlink", Field, 0, ""},
+ {"Stat_t.Pad_cgo_0", Field, 0, ""},
+ {"Stat_t.Pad_cgo_1", Field, 0, ""},
+ {"Stat_t.Pad_cgo_2", Field, 0, ""},
+ {"Stat_t.Padding0", Field, 12, ""},
+ {"Stat_t.Padding1", Field, 12, ""},
+ {"Stat_t.Qspare", Field, 0, ""},
+ {"Stat_t.Rdev", Field, 0, ""},
+ {"Stat_t.Size", Field, 0, ""},
+ {"Stat_t.Spare", Field, 2, ""},
+ {"Stat_t.Uid", Field, 0, ""},
+ {"Stat_t.X__pad0", Field, 0, ""},
+ {"Stat_t.X__pad1", Field, 0, ""},
+ {"Stat_t.X__pad2", Field, 0, ""},
+ {"Stat_t.X__st_birthtim", Field, 2, ""},
+ {"Stat_t.X__st_ino", Field, 0, ""},
+ {"Stat_t.X__unused", Field, 0, ""},
+ {"Statfs", Func, 0, "func(path string, buf *Statfs_t) (err error)"},
+ {"Statfs_t", Type, 0, ""},
+ {"Statfs_t.Asyncreads", Field, 0, ""},
+ {"Statfs_t.Asyncwrites", Field, 0, ""},
+ {"Statfs_t.Bavail", Field, 0, ""},
+ {"Statfs_t.Bfree", Field, 0, ""},
+ {"Statfs_t.Blocks", Field, 0, ""},
+ {"Statfs_t.Bsize", Field, 0, ""},
+ {"Statfs_t.Charspare", Field, 0, ""},
+ {"Statfs_t.F_asyncreads", Field, 2, ""},
+ {"Statfs_t.F_asyncwrites", Field, 2, ""},
+ {"Statfs_t.F_bavail", Field, 2, ""},
+ {"Statfs_t.F_bfree", Field, 2, ""},
+ {"Statfs_t.F_blocks", Field, 2, ""},
+ {"Statfs_t.F_bsize", Field, 2, ""},
+ {"Statfs_t.F_ctime", Field, 2, ""},
+ {"Statfs_t.F_favail", Field, 2, ""},
+ {"Statfs_t.F_ffree", Field, 2, ""},
+ {"Statfs_t.F_files", Field, 2, ""},
+ {"Statfs_t.F_flags", Field, 2, ""},
+ {"Statfs_t.F_fsid", Field, 2, ""},
+ {"Statfs_t.F_fstypename", Field, 2, ""},
+ {"Statfs_t.F_iosize", Field, 2, ""},
+ {"Statfs_t.F_mntfromname", Field, 2, ""},
+ {"Statfs_t.F_mntfromspec", Field, 3, ""},
+ {"Statfs_t.F_mntonname", Field, 2, ""},
+ {"Statfs_t.F_namemax", Field, 2, ""},
+ {"Statfs_t.F_owner", Field, 2, ""},
+ {"Statfs_t.F_spare", Field, 2, ""},
+ {"Statfs_t.F_syncreads", Field, 2, ""},
+ {"Statfs_t.F_syncwrites", Field, 2, ""},
+ {"Statfs_t.Ffree", Field, 0, ""},
+ {"Statfs_t.Files", Field, 0, ""},
+ {"Statfs_t.Flags", Field, 0, ""},
+ {"Statfs_t.Frsize", Field, 0, ""},
+ {"Statfs_t.Fsid", Field, 0, ""},
+ {"Statfs_t.Fssubtype", Field, 0, ""},
+ {"Statfs_t.Fstypename", Field, 0, ""},
+ {"Statfs_t.Iosize", Field, 0, ""},
+ {"Statfs_t.Mntfromname", Field, 0, ""},
+ {"Statfs_t.Mntonname", Field, 0, ""},
+ {"Statfs_t.Mount_info", Field, 2, ""},
+ {"Statfs_t.Namelen", Field, 0, ""},
+ {"Statfs_t.Namemax", Field, 0, ""},
+ {"Statfs_t.Owner", Field, 0, ""},
+ {"Statfs_t.Pad_cgo_0", Field, 0, ""},
+ {"Statfs_t.Pad_cgo_1", Field, 2, ""},
+ {"Statfs_t.Reserved", Field, 0, ""},
+ {"Statfs_t.Spare", Field, 0, ""},
+ {"Statfs_t.Syncreads", Field, 0, ""},
+ {"Statfs_t.Syncwrites", Field, 0, ""},
+ {"Statfs_t.Type", Field, 0, ""},
+ {"Statfs_t.Version", Field, 0, ""},
+ {"Stderr", Var, 0, ""},
+ {"Stdin", Var, 0, ""},
+ {"Stdout", Var, 0, ""},
+ {"StringBytePtr", Func, 0, "func(s string) *byte"},
+ {"StringByteSlice", Func, 0, "func(s string) []byte"},
+ {"StringSlicePtr", Func, 0, "func(ss []string) []*byte"},
+ {"StringToSid", Func, 0, ""},
+ {"StringToUTF16", Func, 0, ""},
+ {"StringToUTF16Ptr", Func, 0, ""},
+ {"Symlink", Func, 0, "func(oldpath string, newpath string) (err error)"},
+ {"Sync", Func, 0, "func()"},
+ {"SyncFileRange", Func, 0, "func(fd int, off int64, n int64, flags int) (err error)"},
+ {"SysProcAttr", Type, 0, ""},
+ {"SysProcAttr.AdditionalInheritedHandles", Field, 17, ""},
+ {"SysProcAttr.AmbientCaps", Field, 9, ""},
+ {"SysProcAttr.CgroupFD", Field, 20, ""},
+ {"SysProcAttr.Chroot", Field, 0, ""},
+ {"SysProcAttr.Cloneflags", Field, 2, ""},
+ {"SysProcAttr.CmdLine", Field, 0, ""},
+ {"SysProcAttr.CreationFlags", Field, 1, ""},
+ {"SysProcAttr.Credential", Field, 0, ""},
+ {"SysProcAttr.Ctty", Field, 1, ""},
+ {"SysProcAttr.Foreground", Field, 5, ""},
+ {"SysProcAttr.GidMappings", Field, 4, ""},
+ {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5, ""},
+ {"SysProcAttr.HideWindow", Field, 0, ""},
+ {"SysProcAttr.Jail", Field, 21, ""},
+ {"SysProcAttr.NoInheritHandles", Field, 16, ""},
+ {"SysProcAttr.Noctty", Field, 0, ""},
+ {"SysProcAttr.ParentProcess", Field, 17, ""},
+ {"SysProcAttr.Pdeathsig", Field, 0, ""},
+ {"SysProcAttr.Pgid", Field, 5, ""},
+ {"SysProcAttr.PidFD", Field, 22, ""},
+ {"SysProcAttr.ProcessAttributes", Field, 13, ""},
+ {"SysProcAttr.Ptrace", Field, 0, ""},
+ {"SysProcAttr.Setctty", Field, 0, ""},
+ {"SysProcAttr.Setpgid", Field, 0, ""},
+ {"SysProcAttr.Setsid", Field, 0, ""},
+ {"SysProcAttr.ThreadAttributes", Field, 13, ""},
+ {"SysProcAttr.Token", Field, 10, ""},
+ {"SysProcAttr.UidMappings", Field, 4, ""},
+ {"SysProcAttr.Unshareflags", Field, 7, ""},
+ {"SysProcAttr.UseCgroupFD", Field, 20, ""},
+ {"SysProcIDMap", Type, 4, ""},
+ {"SysProcIDMap.ContainerID", Field, 4, ""},
+ {"SysProcIDMap.HostID", Field, 4, ""},
+ {"SysProcIDMap.Size", Field, 4, ""},
+ {"Syscall", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"Syscall12", Func, 0, ""},
+ {"Syscall15", Func, 0, ""},
+ {"Syscall18", Func, 12, ""},
+ {"Syscall6", Func, 0, "func(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err Errno)"},
+ {"Syscall9", Func, 0, ""},
+ {"SyscallN", Func, 18, ""},
+ {"Sysctl", Func, 0, ""},
+ {"SysctlUint32", Func, 0, ""},
+ {"Sysctlnode", Type, 2, ""},
+ {"Sysctlnode.Flags", Field, 2, ""},
+ {"Sysctlnode.Name", Field, 2, ""},
+ {"Sysctlnode.Num", Field, 2, ""},
+ {"Sysctlnode.Un", Field, 2, ""},
+ {"Sysctlnode.Ver", Field, 2, ""},
+ {"Sysctlnode.X__rsvd", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_desc", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_func", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_parent", Field, 2, ""},
+ {"Sysctlnode.X_sysctl_size", Field, 2, ""},
+ {"Sysinfo", Func, 0, "func(info *Sysinfo_t) (err error)"},
+ {"Sysinfo_t", Type, 0, ""},
+ {"Sysinfo_t.Bufferram", Field, 0, ""},
+ {"Sysinfo_t.Freehigh", Field, 0, ""},
+ {"Sysinfo_t.Freeram", Field, 0, ""},
+ {"Sysinfo_t.Freeswap", Field, 0, ""},
+ {"Sysinfo_t.Loads", Field, 0, ""},
+ {"Sysinfo_t.Pad", Field, 0, ""},
+ {"Sysinfo_t.Pad_cgo_0", Field, 0, ""},
+ {"Sysinfo_t.Pad_cgo_1", Field, 0, ""},
+ {"Sysinfo_t.Procs", Field, 0, ""},
+ {"Sysinfo_t.Sharedram", Field, 0, ""},
+ {"Sysinfo_t.Totalhigh", Field, 0, ""},
+ {"Sysinfo_t.Totalram", Field, 0, ""},
+ {"Sysinfo_t.Totalswap", Field, 0, ""},
+ {"Sysinfo_t.Unit", Field, 0, ""},
+ {"Sysinfo_t.Uptime", Field, 0, ""},
+ {"Sysinfo_t.X_f", Field, 0, ""},
+ {"Systemtime", Type, 0, ""},
+ {"Systemtime.Day", Field, 0, ""},
+ {"Systemtime.DayOfWeek", Field, 0, ""},
+ {"Systemtime.Hour", Field, 0, ""},
+ {"Systemtime.Milliseconds", Field, 0, ""},
+ {"Systemtime.Minute", Field, 0, ""},
+ {"Systemtime.Month", Field, 0, ""},
+ {"Systemtime.Second", Field, 0, ""},
+ {"Systemtime.Year", Field, 0, ""},
+ {"TCGETS", Const, 0, ""},
+ {"TCIFLUSH", Const, 1, ""},
+ {"TCIOFLUSH", Const, 1, ""},
+ {"TCOFLUSH", Const, 1, ""},
+ {"TCPInfo", Type, 1, ""},
+ {"TCPInfo.Advmss", Field, 1, ""},
+ {"TCPInfo.Ato", Field, 1, ""},
+ {"TCPInfo.Backoff", Field, 1, ""},
+ {"TCPInfo.Ca_state", Field, 1, ""},
+ {"TCPInfo.Fackets", Field, 1, ""},
+ {"TCPInfo.Last_ack_recv", Field, 1, ""},
+ {"TCPInfo.Last_ack_sent", Field, 1, ""},
+ {"TCPInfo.Last_data_recv", Field, 1, ""},
+ {"TCPInfo.Last_data_sent", Field, 1, ""},
+ {"TCPInfo.Lost", Field, 1, ""},
+ {"TCPInfo.Options", Field, 1, ""},
+ {"TCPInfo.Pad_cgo_0", Field, 1, ""},
+ {"TCPInfo.Pmtu", Field, 1, ""},
+ {"TCPInfo.Probes", Field, 1, ""},
+ {"TCPInfo.Rcv_mss", Field, 1, ""},
+ {"TCPInfo.Rcv_rtt", Field, 1, ""},
+ {"TCPInfo.Rcv_space", Field, 1, ""},
+ {"TCPInfo.Rcv_ssthresh", Field, 1, ""},
+ {"TCPInfo.Reordering", Field, 1, ""},
+ {"TCPInfo.Retrans", Field, 1, ""},
+ {"TCPInfo.Retransmits", Field, 1, ""},
+ {"TCPInfo.Rto", Field, 1, ""},
+ {"TCPInfo.Rtt", Field, 1, ""},
+ {"TCPInfo.Rttvar", Field, 1, ""},
+ {"TCPInfo.Sacked", Field, 1, ""},
+ {"TCPInfo.Snd_cwnd", Field, 1, ""},
+ {"TCPInfo.Snd_mss", Field, 1, ""},
+ {"TCPInfo.Snd_ssthresh", Field, 1, ""},
+ {"TCPInfo.State", Field, 1, ""},
+ {"TCPInfo.Total_retrans", Field, 1, ""},
+ {"TCPInfo.Unacked", Field, 1, ""},
+ {"TCPKeepalive", Type, 3, ""},
+ {"TCPKeepalive.Interval", Field, 3, ""},
+ {"TCPKeepalive.OnOff", Field, 3, ""},
+ {"TCPKeepalive.Time", Field, 3, ""},
+ {"TCP_CA_NAME_MAX", Const, 0, ""},
+ {"TCP_CONGCTL", Const, 1, ""},
+ {"TCP_CONGESTION", Const, 0, ""},
+ {"TCP_CONNECTIONTIMEOUT", Const, 0, ""},
+ {"TCP_CORK", Const, 0, ""},
+ {"TCP_DEFER_ACCEPT", Const, 0, ""},
+ {"TCP_ENABLE_ECN", Const, 16, ""},
+ {"TCP_INFO", Const, 0, ""},
+ {"TCP_KEEPALIVE", Const, 0, ""},
+ {"TCP_KEEPCNT", Const, 0, ""},
+ {"TCP_KEEPIDLE", Const, 0, ""},
+ {"TCP_KEEPINIT", Const, 1, ""},
+ {"TCP_KEEPINTVL", Const, 0, ""},
+ {"TCP_LINGER2", Const, 0, ""},
+ {"TCP_MAXBURST", Const, 0, ""},
+ {"TCP_MAXHLEN", Const, 0, ""},
+ {"TCP_MAXOLEN", Const, 0, ""},
+ {"TCP_MAXSEG", Const, 0, ""},
+ {"TCP_MAXWIN", Const, 0, ""},
+ {"TCP_MAX_SACK", Const, 0, ""},
+ {"TCP_MAX_WINSHIFT", Const, 0, ""},
+ {"TCP_MD5SIG", Const, 0, ""},
+ {"TCP_MD5SIG_MAXKEYLEN", Const, 0, ""},
+ {"TCP_MINMSS", Const, 0, ""},
+ {"TCP_MINMSSOVERLOAD", Const, 0, ""},
+ {"TCP_MSS", Const, 0, ""},
+ {"TCP_NODELAY", Const, 0, ""},
+ {"TCP_NOOPT", Const, 0, ""},
+ {"TCP_NOPUSH", Const, 0, ""},
+ {"TCP_NOTSENT_LOWAT", Const, 16, ""},
+ {"TCP_NSTATES", Const, 1, ""},
+ {"TCP_QUICKACK", Const, 0, ""},
+ {"TCP_RXT_CONNDROPTIME", Const, 0, ""},
+ {"TCP_RXT_FINDROP", Const, 0, ""},
+ {"TCP_SACK_ENABLE", Const, 1, ""},
+ {"TCP_SENDMOREACKS", Const, 16, ""},
+ {"TCP_SYNCNT", Const, 0, ""},
+ {"TCP_VENDOR", Const, 3, ""},
+ {"TCP_WINDOW_CLAMP", Const, 0, ""},
+ {"TCSAFLUSH", Const, 1, ""},
+ {"TCSETS", Const, 0, ""},
+ {"TF_DISCONNECT", Const, 0, ""},
+ {"TF_REUSE_SOCKET", Const, 0, ""},
+ {"TF_USE_DEFAULT_WORKER", Const, 0, ""},
+ {"TF_USE_KERNEL_APC", Const, 0, ""},
+ {"TF_USE_SYSTEM_THREAD", Const, 0, ""},
+ {"TF_WRITE_BEHIND", Const, 0, ""},
+ {"TH32CS_INHERIT", Const, 4, ""},
+ {"TH32CS_SNAPALL", Const, 4, ""},
+ {"TH32CS_SNAPHEAPLIST", Const, 4, ""},
+ {"TH32CS_SNAPMODULE", Const, 4, ""},
+ {"TH32CS_SNAPMODULE32", Const, 4, ""},
+ {"TH32CS_SNAPPROCESS", Const, 4, ""},
+ {"TH32CS_SNAPTHREAD", Const, 4, ""},
+ {"TIME_ZONE_ID_DAYLIGHT", Const, 0, ""},
+ {"TIME_ZONE_ID_STANDARD", Const, 0, ""},
+ {"TIME_ZONE_ID_UNKNOWN", Const, 0, ""},
+ {"TIOCCBRK", Const, 0, ""},
+ {"TIOCCDTR", Const, 0, ""},
+ {"TIOCCONS", Const, 0, ""},
+ {"TIOCDCDTIMESTAMP", Const, 0, ""},
+ {"TIOCDRAIN", Const, 0, ""},
+ {"TIOCDSIMICROCODE", Const, 0, ""},
+ {"TIOCEXCL", Const, 0, ""},
+ {"TIOCEXT", Const, 0, ""},
+ {"TIOCFLAG_CDTRCTS", Const, 1, ""},
+ {"TIOCFLAG_CLOCAL", Const, 1, ""},
+ {"TIOCFLAG_CRTSCTS", Const, 1, ""},
+ {"TIOCFLAG_MDMBUF", Const, 1, ""},
+ {"TIOCFLAG_PPS", Const, 1, ""},
+ {"TIOCFLAG_SOFTCAR", Const, 1, ""},
+ {"TIOCFLUSH", Const, 0, ""},
+ {"TIOCGDEV", Const, 0, ""},
+ {"TIOCGDRAINWAIT", Const, 0, ""},
+ {"TIOCGETA", Const, 0, ""},
+ {"TIOCGETD", Const, 0, ""},
+ {"TIOCGFLAGS", Const, 1, ""},
+ {"TIOCGICOUNT", Const, 0, ""},
+ {"TIOCGLCKTRMIOS", Const, 0, ""},
+ {"TIOCGLINED", Const, 1, ""},
+ {"TIOCGPGRP", Const, 0, ""},
+ {"TIOCGPTN", Const, 0, ""},
+ {"TIOCGQSIZE", Const, 1, ""},
+ {"TIOCGRANTPT", Const, 1, ""},
+ {"TIOCGRS485", Const, 0, ""},
+ {"TIOCGSERIAL", Const, 0, ""},
+ {"TIOCGSID", Const, 0, ""},
+ {"TIOCGSIZE", Const, 1, ""},
+ {"TIOCGSOFTCAR", Const, 0, ""},
+ {"TIOCGTSTAMP", Const, 1, ""},
+ {"TIOCGWINSZ", Const, 0, ""},
+ {"TIOCINQ", Const, 0, ""},
+ {"TIOCIXOFF", Const, 0, ""},
+ {"TIOCIXON", Const, 0, ""},
+ {"TIOCLINUX", Const, 0, ""},
+ {"TIOCMBIC", Const, 0, ""},
+ {"TIOCMBIS", Const, 0, ""},
+ {"TIOCMGDTRWAIT", Const, 0, ""},
+ {"TIOCMGET", Const, 0, ""},
+ {"TIOCMIWAIT", Const, 0, ""},
+ {"TIOCMODG", Const, 0, ""},
+ {"TIOCMODS", Const, 0, ""},
+ {"TIOCMSDTRWAIT", Const, 0, ""},
+ {"TIOCMSET", Const, 0, ""},
+ {"TIOCM_CAR", Const, 0, ""},
+ {"TIOCM_CD", Const, 0, ""},
+ {"TIOCM_CTS", Const, 0, ""},
+ {"TIOCM_DCD", Const, 0, ""},
+ {"TIOCM_DSR", Const, 0, ""},
+ {"TIOCM_DTR", Const, 0, ""},
+ {"TIOCM_LE", Const, 0, ""},
+ {"TIOCM_RI", Const, 0, ""},
+ {"TIOCM_RNG", Const, 0, ""},
+ {"TIOCM_RTS", Const, 0, ""},
+ {"TIOCM_SR", Const, 0, ""},
+ {"TIOCM_ST", Const, 0, ""},
+ {"TIOCNOTTY", Const, 0, ""},
+ {"TIOCNXCL", Const, 0, ""},
+ {"TIOCOUTQ", Const, 0, ""},
+ {"TIOCPKT", Const, 0, ""},
+ {"TIOCPKT_DATA", Const, 0, ""},
+ {"TIOCPKT_DOSTOP", Const, 0, ""},
+ {"TIOCPKT_FLUSHREAD", Const, 0, ""},
+ {"TIOCPKT_FLUSHWRITE", Const, 0, ""},
+ {"TIOCPKT_IOCTL", Const, 0, ""},
+ {"TIOCPKT_NOSTOP", Const, 0, ""},
+ {"TIOCPKT_START", Const, 0, ""},
+ {"TIOCPKT_STOP", Const, 0, ""},
+ {"TIOCPTMASTER", Const, 0, ""},
+ {"TIOCPTMGET", Const, 1, ""},
+ {"TIOCPTSNAME", Const, 1, ""},
+ {"TIOCPTYGNAME", Const, 0, ""},
+ {"TIOCPTYGRANT", Const, 0, ""},
+ {"TIOCPTYUNLK", Const, 0, ""},
+ {"TIOCRCVFRAME", Const, 1, ""},
+ {"TIOCREMOTE", Const, 0, ""},
+ {"TIOCSBRK", Const, 0, ""},
+ {"TIOCSCONS", Const, 0, ""},
+ {"TIOCSCTTY", Const, 0, ""},
+ {"TIOCSDRAINWAIT", Const, 0, ""},
+ {"TIOCSDTR", Const, 0, ""},
+ {"TIOCSERCONFIG", Const, 0, ""},
+ {"TIOCSERGETLSR", Const, 0, ""},
+ {"TIOCSERGETMULTI", Const, 0, ""},
+ {"TIOCSERGSTRUCT", Const, 0, ""},
+ {"TIOCSERGWILD", Const, 0, ""},
+ {"TIOCSERSETMULTI", Const, 0, ""},
+ {"TIOCSERSWILD", Const, 0, ""},
+ {"TIOCSER_TEMT", Const, 0, ""},
+ {"TIOCSETA", Const, 0, ""},
+ {"TIOCSETAF", Const, 0, ""},
+ {"TIOCSETAW", Const, 0, ""},
+ {"TIOCSETD", Const, 0, ""},
+ {"TIOCSFLAGS", Const, 1, ""},
+ {"TIOCSIG", Const, 0, ""},
+ {"TIOCSLCKTRMIOS", Const, 0, ""},
+ {"TIOCSLINED", Const, 1, ""},
+ {"TIOCSPGRP", Const, 0, ""},
+ {"TIOCSPTLCK", Const, 0, ""},
+ {"TIOCSQSIZE", Const, 1, ""},
+ {"TIOCSRS485", Const, 0, ""},
+ {"TIOCSSERIAL", Const, 0, ""},
+ {"TIOCSSIZE", Const, 1, ""},
+ {"TIOCSSOFTCAR", Const, 0, ""},
+ {"TIOCSTART", Const, 0, ""},
+ {"TIOCSTAT", Const, 0, ""},
+ {"TIOCSTI", Const, 0, ""},
+ {"TIOCSTOP", Const, 0, ""},
+ {"TIOCSTSTAMP", Const, 1, ""},
+ {"TIOCSWINSZ", Const, 0, ""},
+ {"TIOCTIMESTAMP", Const, 0, ""},
+ {"TIOCUCNTL", Const, 0, ""},
+ {"TIOCVHANGUP", Const, 0, ""},
+ {"TIOCXMTFRAME", Const, 1, ""},
+ {"TOKEN_ADJUST_DEFAULT", Const, 0, ""},
+ {"TOKEN_ADJUST_GROUPS", Const, 0, ""},
+ {"TOKEN_ADJUST_PRIVILEGES", Const, 0, ""},
+ {"TOKEN_ADJUST_SESSIONID", Const, 11, ""},
+ {"TOKEN_ALL_ACCESS", Const, 0, ""},
+ {"TOKEN_ASSIGN_PRIMARY", Const, 0, ""},
+ {"TOKEN_DUPLICATE", Const, 0, ""},
+ {"TOKEN_EXECUTE", Const, 0, ""},
+ {"TOKEN_IMPERSONATE", Const, 0, ""},
+ {"TOKEN_QUERY", Const, 0, ""},
+ {"TOKEN_QUERY_SOURCE", Const, 0, ""},
+ {"TOKEN_READ", Const, 0, ""},
+ {"TOKEN_WRITE", Const, 0, ""},
+ {"TOSTOP", Const, 0, ""},
+ {"TRUNCATE_EXISTING", Const, 0, ""},
+ {"TUNATTACHFILTER", Const, 0, ""},
+ {"TUNDETACHFILTER", Const, 0, ""},
+ {"TUNGETFEATURES", Const, 0, ""},
+ {"TUNGETIFF", Const, 0, ""},
+ {"TUNGETSNDBUF", Const, 0, ""},
+ {"TUNGETVNETHDRSZ", Const, 0, ""},
+ {"TUNSETDEBUG", Const, 0, ""},
+ {"TUNSETGROUP", Const, 0, ""},
+ {"TUNSETIFF", Const, 0, ""},
+ {"TUNSETLINK", Const, 0, ""},
+ {"TUNSETNOCSUM", Const, 0, ""},
+ {"TUNSETOFFLOAD", Const, 0, ""},
+ {"TUNSETOWNER", Const, 0, ""},
+ {"TUNSETPERSIST", Const, 0, ""},
+ {"TUNSETSNDBUF", Const, 0, ""},
+ {"TUNSETTXFILTER", Const, 0, ""},
+ {"TUNSETVNETHDRSZ", Const, 0, ""},
+ {"Tee", Func, 0, "func(rfd int, wfd int, len int, flags int) (n int64, err error)"},
+ {"TerminateProcess", Func, 0, ""},
+ {"Termios", Type, 0, ""},
+ {"Termios.Cc", Field, 0, ""},
+ {"Termios.Cflag", Field, 0, ""},
+ {"Termios.Iflag", Field, 0, ""},
+ {"Termios.Ispeed", Field, 0, ""},
+ {"Termios.Lflag", Field, 0, ""},
+ {"Termios.Line", Field, 0, ""},
+ {"Termios.Oflag", Field, 0, ""},
+ {"Termios.Ospeed", Field, 0, ""},
+ {"Termios.Pad_cgo_0", Field, 0, ""},
+ {"Tgkill", Func, 0, "func(tgid int, tid int, sig Signal) (err error)"},
+ {"Time", Func, 0, "func(t *Time_t) (tt Time_t, err error)"},
+ {"Time_t", Type, 0, ""},
+ {"Times", Func, 0, "func(tms *Tms) (ticks uintptr, err error)"},
+ {"Timespec", Type, 0, ""},
+ {"Timespec.Nsec", Field, 0, ""},
+ {"Timespec.Pad_cgo_0", Field, 2, ""},
+ {"Timespec.Sec", Field, 0, ""},
+ {"TimespecToNsec", Func, 0, "func(ts Timespec) int64"},
+ {"Timeval", Type, 0, ""},
+ {"Timeval.Pad_cgo_0", Field, 0, ""},
+ {"Timeval.Sec", Field, 0, ""},
+ {"Timeval.Usec", Field, 0, ""},
+ {"Timeval32", Type, 0, ""},
+ {"Timeval32.Sec", Field, 0, ""},
+ {"Timeval32.Usec", Field, 0, ""},
+ {"TimevalToNsec", Func, 0, "func(tv Timeval) int64"},
+ {"Timex", Type, 0, ""},
+ {"Timex.Calcnt", Field, 0, ""},
+ {"Timex.Constant", Field, 0, ""},
+ {"Timex.Errcnt", Field, 0, ""},
+ {"Timex.Esterror", Field, 0, ""},
+ {"Timex.Freq", Field, 0, ""},
+ {"Timex.Jitcnt", Field, 0, ""},
+ {"Timex.Jitter", Field, 0, ""},
+ {"Timex.Maxerror", Field, 0, ""},
+ {"Timex.Modes", Field, 0, ""},
+ {"Timex.Offset", Field, 0, ""},
+ {"Timex.Pad_cgo_0", Field, 0, ""},
+ {"Timex.Pad_cgo_1", Field, 0, ""},
+ {"Timex.Pad_cgo_2", Field, 0, ""},
+ {"Timex.Pad_cgo_3", Field, 0, ""},
+ {"Timex.Ppsfreq", Field, 0, ""},
+ {"Timex.Precision", Field, 0, ""},
+ {"Timex.Shift", Field, 0, ""},
+ {"Timex.Stabil", Field, 0, ""},
+ {"Timex.Status", Field, 0, ""},
+ {"Timex.Stbcnt", Field, 0, ""},
+ {"Timex.Tai", Field, 0, ""},
+ {"Timex.Tick", Field, 0, ""},
+ {"Timex.Time", Field, 0, ""},
+ {"Timex.Tolerance", Field, 0, ""},
+ {"Timezoneinformation", Type, 0, ""},
+ {"Timezoneinformation.Bias", Field, 0, ""},
+ {"Timezoneinformation.DaylightBias", Field, 0, ""},
+ {"Timezoneinformation.DaylightDate", Field, 0, ""},
+ {"Timezoneinformation.DaylightName", Field, 0, ""},
+ {"Timezoneinformation.StandardBias", Field, 0, ""},
+ {"Timezoneinformation.StandardDate", Field, 0, ""},
+ {"Timezoneinformation.StandardName", Field, 0, ""},
+ {"Tms", Type, 0, ""},
+ {"Tms.Cstime", Field, 0, ""},
+ {"Tms.Cutime", Field, 0, ""},
+ {"Tms.Stime", Field, 0, ""},
+ {"Tms.Utime", Field, 0, ""},
+ {"Token", Type, 0, ""},
+ {"TokenAccessInformation", Const, 0, ""},
+ {"TokenAuditPolicy", Const, 0, ""},
+ {"TokenDefaultDacl", Const, 0, ""},
+ {"TokenElevation", Const, 0, ""},
+ {"TokenElevationType", Const, 0, ""},
+ {"TokenGroups", Const, 0, ""},
+ {"TokenGroupsAndPrivileges", Const, 0, ""},
+ {"TokenHasRestrictions", Const, 0, ""},
+ {"TokenImpersonationLevel", Const, 0, ""},
+ {"TokenIntegrityLevel", Const, 0, ""},
+ {"TokenLinkedToken", Const, 0, ""},
+ {"TokenLogonSid", Const, 0, ""},
+ {"TokenMandatoryPolicy", Const, 0, ""},
+ {"TokenOrigin", Const, 0, ""},
+ {"TokenOwner", Const, 0, ""},
+ {"TokenPrimaryGroup", Const, 0, ""},
+ {"TokenPrivileges", Const, 0, ""},
+ {"TokenRestrictedSids", Const, 0, ""},
+ {"TokenSandBoxInert", Const, 0, ""},
+ {"TokenSessionId", Const, 0, ""},
+ {"TokenSessionReference", Const, 0, ""},
+ {"TokenSource", Const, 0, ""},
+ {"TokenStatistics", Const, 0, ""},
+ {"TokenType", Const, 0, ""},
+ {"TokenUIAccess", Const, 0, ""},
+ {"TokenUser", Const, 0, ""},
+ {"TokenVirtualizationAllowed", Const, 0, ""},
+ {"TokenVirtualizationEnabled", Const, 0, ""},
+ {"Tokenprimarygroup", Type, 0, ""},
+ {"Tokenprimarygroup.PrimaryGroup", Field, 0, ""},
+ {"Tokenuser", Type, 0, ""},
+ {"Tokenuser.User", Field, 0, ""},
+ {"TranslateAccountName", Func, 0, ""},
+ {"TranslateName", Func, 0, ""},
+ {"TransmitFile", Func, 0, ""},
+ {"TransmitFileBuffers", Type, 0, ""},
+ {"TransmitFileBuffers.Head", Field, 0, ""},
+ {"TransmitFileBuffers.HeadLength", Field, 0, ""},
+ {"TransmitFileBuffers.Tail", Field, 0, ""},
+ {"TransmitFileBuffers.TailLength", Field, 0, ""},
+ {"Truncate", Func, 0, "func(path string, length int64) (err error)"},
+ {"UNIX_PATH_MAX", Const, 12, ""},
+ {"USAGE_MATCH_TYPE_AND", Const, 0, ""},
+ {"USAGE_MATCH_TYPE_OR", Const, 0, ""},
+ {"UTF16FromString", Func, 1, ""},
+ {"UTF16PtrFromString", Func, 1, ""},
+ {"UTF16ToString", Func, 0, ""},
+ {"Ucred", Type, 0, ""},
+ {"Ucred.Gid", Field, 0, ""},
+ {"Ucred.Pid", Field, 0, ""},
+ {"Ucred.Uid", Field, 0, ""},
+ {"Umask", Func, 0, "func(mask int) (oldmask int)"},
+ {"Uname", Func, 0, "func(buf *Utsname) (err error)"},
+ {"Undelete", Func, 0, ""},
+ {"UnixCredentials", Func, 0, "func(ucred *Ucred) []byte"},
+ {"UnixRights", Func, 0, "func(fds ...int) []byte"},
+ {"Unlink", Func, 0, "func(path string) error"},
+ {"Unlinkat", Func, 0, "func(dirfd int, path string) error"},
+ {"UnmapViewOfFile", Func, 0, ""},
+ {"Unmount", Func, 0, "func(target string, flags int) (err error)"},
+ {"Unsetenv", Func, 4, "func(key string) error"},
+ {"Unshare", Func, 0, "func(flags int) (err error)"},
+ {"UserInfo10", Type, 0, ""},
+ {"UserInfo10.Comment", Field, 0, ""},
+ {"UserInfo10.FullName", Field, 0, ""},
+ {"UserInfo10.Name", Field, 0, ""},
+ {"UserInfo10.UsrComment", Field, 0, ""},
+ {"Ustat", Func, 0, "func(dev int, ubuf *Ustat_t) (err error)"},
+ {"Ustat_t", Type, 0, ""},
+ {"Ustat_t.Fname", Field, 0, ""},
+ {"Ustat_t.Fpack", Field, 0, ""},
+ {"Ustat_t.Pad_cgo_0", Field, 0, ""},
+ {"Ustat_t.Pad_cgo_1", Field, 0, ""},
+ {"Ustat_t.Tfree", Field, 0, ""},
+ {"Ustat_t.Tinode", Field, 0, ""},
+ {"Utimbuf", Type, 0, ""},
+ {"Utimbuf.Actime", Field, 0, ""},
+ {"Utimbuf.Modtime", Field, 0, ""},
+ {"Utime", Func, 0, "func(path string, buf *Utimbuf) (err error)"},
+ {"Utimes", Func, 0, "func(path string, tv []Timeval) (err error)"},
+ {"UtimesNano", Func, 1, "func(path string, ts []Timespec) (err error)"},
+ {"Utsname", Type, 0, ""},
+ {"Utsname.Domainname", Field, 0, ""},
+ {"Utsname.Machine", Field, 0, ""},
+ {"Utsname.Nodename", Field, 0, ""},
+ {"Utsname.Release", Field, 0, ""},
+ {"Utsname.Sysname", Field, 0, ""},
+ {"Utsname.Version", Field, 0, ""},
+ {"VDISCARD", Const, 0, ""},
+ {"VDSUSP", Const, 1, ""},
+ {"VEOF", Const, 0, ""},
+ {"VEOL", Const, 0, ""},
+ {"VEOL2", Const, 0, ""},
+ {"VERASE", Const, 0, ""},
+ {"VERASE2", Const, 1, ""},
+ {"VINTR", Const, 0, ""},
+ {"VKILL", Const, 0, ""},
+ {"VLNEXT", Const, 0, ""},
+ {"VMIN", Const, 0, ""},
+ {"VQUIT", Const, 0, ""},
+ {"VREPRINT", Const, 0, ""},
+ {"VSTART", Const, 0, ""},
+ {"VSTATUS", Const, 1, ""},
+ {"VSTOP", Const, 0, ""},
+ {"VSUSP", Const, 0, ""},
+ {"VSWTC", Const, 0, ""},
+ {"VT0", Const, 1, ""},
+ {"VT1", Const, 1, ""},
+ {"VTDLY", Const, 1, ""},
+ {"VTIME", Const, 0, ""},
+ {"VWERASE", Const, 0, ""},
+ {"VirtualLock", Func, 0, ""},
+ {"VirtualUnlock", Func, 0, ""},
+ {"WAIT_ABANDONED", Const, 0, ""},
+ {"WAIT_FAILED", Const, 0, ""},
+ {"WAIT_OBJECT_0", Const, 0, ""},
+ {"WAIT_TIMEOUT", Const, 0, ""},
+ {"WALL", Const, 0, ""},
+ {"WALLSIG", Const, 1, ""},
+ {"WALTSIG", Const, 1, ""},
+ {"WCLONE", Const, 0, ""},
+ {"WCONTINUED", Const, 0, ""},
+ {"WCOREFLAG", Const, 0, ""},
+ {"WEXITED", Const, 0, ""},
+ {"WLINUXCLONE", Const, 0, ""},
+ {"WNOHANG", Const, 0, ""},
+ {"WNOTHREAD", Const, 0, ""},
+ {"WNOWAIT", Const, 0, ""},
+ {"WNOZOMBIE", Const, 1, ""},
+ {"WOPTSCHECKED", Const, 1, ""},
+ {"WORDSIZE", Const, 0, ""},
+ {"WSABuf", Type, 0, ""},
+ {"WSABuf.Buf", Field, 0, ""},
+ {"WSABuf.Len", Field, 0, ""},
+ {"WSACleanup", Func, 0, ""},
+ {"WSADESCRIPTION_LEN", Const, 0, ""},
+ {"WSAData", Type, 0, ""},
+ {"WSAData.Description", Field, 0, ""},
+ {"WSAData.HighVersion", Field, 0, ""},
+ {"WSAData.MaxSockets", Field, 0, ""},
+ {"WSAData.MaxUdpDg", Field, 0, ""},
+ {"WSAData.SystemStatus", Field, 0, ""},
+ {"WSAData.VendorInfo", Field, 0, ""},
+ {"WSAData.Version", Field, 0, ""},
+ {"WSAEACCES", Const, 2, ""},
+ {"WSAECONNABORTED", Const, 9, ""},
+ {"WSAECONNRESET", Const, 3, ""},
+ {"WSAENOPROTOOPT", Const, 23, ""},
+ {"WSAEnumProtocols", Func, 2, ""},
+ {"WSAID_CONNECTEX", Var, 1, ""},
+ {"WSAIoctl", Func, 0, ""},
+ {"WSAPROTOCOL_LEN", Const, 2, ""},
+ {"WSAProtocolChain", Type, 2, ""},
+ {"WSAProtocolChain.ChainEntries", Field, 2, ""},
+ {"WSAProtocolChain.ChainLen", Field, 2, ""},
+ {"WSAProtocolInfo", Type, 2, ""},
+ {"WSAProtocolInfo.AddressFamily", Field, 2, ""},
+ {"WSAProtocolInfo.CatalogEntryId", Field, 2, ""},
+ {"WSAProtocolInfo.MaxSockAddr", Field, 2, ""},
+ {"WSAProtocolInfo.MessageSize", Field, 2, ""},
+ {"WSAProtocolInfo.MinSockAddr", Field, 2, ""},
+ {"WSAProtocolInfo.NetworkByteOrder", Field, 2, ""},
+ {"WSAProtocolInfo.Protocol", Field, 2, ""},
+ {"WSAProtocolInfo.ProtocolChain", Field, 2, ""},
+ {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2, ""},
+ {"WSAProtocolInfo.ProtocolName", Field, 2, ""},
+ {"WSAProtocolInfo.ProviderFlags", Field, 2, ""},
+ {"WSAProtocolInfo.ProviderId", Field, 2, ""},
+ {"WSAProtocolInfo.ProviderReserved", Field, 2, ""},
+ {"WSAProtocolInfo.SecurityScheme", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags1", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags2", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags3", Field, 2, ""},
+ {"WSAProtocolInfo.ServiceFlags4", Field, 2, ""},
+ {"WSAProtocolInfo.SocketType", Field, 2, ""},
+ {"WSAProtocolInfo.Version", Field, 2, ""},
+ {"WSARecv", Func, 0, ""},
+ {"WSARecvFrom", Func, 0, ""},
+ {"WSASYS_STATUS_LEN", Const, 0, ""},
+ {"WSASend", Func, 0, ""},
+ {"WSASendTo", Func, 0, ""},
+ {"WSASendto", Func, 0, ""},
+ {"WSAStartup", Func, 0, ""},
+ {"WSTOPPED", Const, 0, ""},
+ {"WTRAPPED", Const, 1, ""},
+ {"WUNTRACED", Const, 0, ""},
+ {"Wait4", Func, 0, "func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)"},
+ {"WaitForSingleObject", Func, 0, ""},
+ {"WaitStatus", Type, 0, ""},
+ {"WaitStatus.ExitCode", Field, 0, ""},
+ {"Win32FileAttributeData", Type, 0, ""},
+ {"Win32FileAttributeData.CreationTime", Field, 0, ""},
+ {"Win32FileAttributeData.FileAttributes", Field, 0, ""},
+ {"Win32FileAttributeData.FileSizeHigh", Field, 0, ""},
+ {"Win32FileAttributeData.FileSizeLow", Field, 0, ""},
+ {"Win32FileAttributeData.LastAccessTime", Field, 0, ""},
+ {"Win32FileAttributeData.LastWriteTime", Field, 0, ""},
+ {"Win32finddata", Type, 0, ""},
+ {"Win32finddata.AlternateFileName", Field, 0, ""},
+ {"Win32finddata.CreationTime", Field, 0, ""},
+ {"Win32finddata.FileAttributes", Field, 0, ""},
+ {"Win32finddata.FileName", Field, 0, ""},
+ {"Win32finddata.FileSizeHigh", Field, 0, ""},
+ {"Win32finddata.FileSizeLow", Field, 0, ""},
+ {"Win32finddata.LastAccessTime", Field, 0, ""},
+ {"Win32finddata.LastWriteTime", Field, 0, ""},
+ {"Win32finddata.Reserved0", Field, 0, ""},
+ {"Win32finddata.Reserved1", Field, 0, ""},
+ {"Write", Func, 0, "func(fd int, p []byte) (n int, err error)"},
+ {"WriteConsole", Func, 1, ""},
+ {"WriteFile", Func, 0, ""},
+ {"X509_ASN_ENCODING", Const, 0, ""},
+ {"XCASE", Const, 0, ""},
+ {"XP1_CONNECTIONLESS", Const, 2, ""},
+ {"XP1_CONNECT_DATA", Const, 2, ""},
+ {"XP1_DISCONNECT_DATA", Const, 2, ""},
+ {"XP1_EXPEDITED_DATA", Const, 2, ""},
+ {"XP1_GRACEFUL_CLOSE", Const, 2, ""},
+ {"XP1_GUARANTEED_DELIVERY", Const, 2, ""},
+ {"XP1_GUARANTEED_ORDER", Const, 2, ""},
+ {"XP1_IFS_HANDLES", Const, 2, ""},
+ {"XP1_MESSAGE_ORIENTED", Const, 2, ""},
+ {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2, ""},
+ {"XP1_MULTIPOINT_DATA_PLANE", Const, 2, ""},
+ {"XP1_PARTIAL_MESSAGE", Const, 2, ""},
+ {"XP1_PSEUDO_STREAM", Const, 2, ""},
+ {"XP1_QOS_SUPPORTED", Const, 2, ""},
+ {"XP1_SAN_SUPPORT_SDP", Const, 2, ""},
+ {"XP1_SUPPORT_BROADCAST", Const, 2, ""},
+ {"XP1_SUPPORT_MULTIPOINT", Const, 2, ""},
+ {"XP1_UNI_RECV", Const, 2, ""},
+ {"XP1_UNI_SEND", Const, 2, ""},
+ },
+ "syscall/js": {
+ {"CopyBytesToGo", Func, 0, ""},
+ {"CopyBytesToJS", Func, 0, ""},
+ {"Error", Type, 0, ""},
+ {"Func", Type, 0, ""},
+ {"FuncOf", Func, 0, ""},
+ {"Global", Func, 0, ""},
+ {"Null", Func, 0, ""},
+ {"Type", Type, 0, ""},
+ {"TypeBoolean", Const, 0, ""},
+ {"TypeFunction", Const, 0, ""},
+ {"TypeNull", Const, 0, ""},
+ {"TypeNumber", Const, 0, ""},
+ {"TypeObject", Const, 0, ""},
+ {"TypeString", Const, 0, ""},
+ {"TypeSymbol", Const, 0, ""},
+ {"TypeUndefined", Const, 0, ""},
+ {"Undefined", Func, 0, ""},
+ {"Value", Type, 0, ""},
+ {"ValueError", Type, 0, ""},
+ {"ValueOf", Func, 0, ""},
+ },
+ "testing": {
+ {"(*B).Attr", Method, 25, ""},
+ {"(*B).Chdir", Method, 24, ""},
+ {"(*B).Cleanup", Method, 14, ""},
+ {"(*B).Context", Method, 24, ""},
+ {"(*B).Elapsed", Method, 20, ""},
+ {"(*B).Error", Method, 0, ""},
+ {"(*B).Errorf", Method, 0, ""},
+ {"(*B).Fail", Method, 0, ""},
+ {"(*B).FailNow", Method, 0, ""},
+ {"(*B).Failed", Method, 0, ""},
+ {"(*B).Fatal", Method, 0, ""},
+ {"(*B).Fatalf", Method, 0, ""},
+ {"(*B).Helper", Method, 9, ""},
+ {"(*B).Log", Method, 0, ""},
+ {"(*B).Logf", Method, 0, ""},
+ {"(*B).Loop", Method, 24, ""},
+ {"(*B).Name", Method, 8, ""},
+ {"(*B).Output", Method, 25, ""},
+ {"(*B).ReportAllocs", Method, 1, ""},
+ {"(*B).ReportMetric", Method, 13, ""},
+ {"(*B).ResetTimer", Method, 0, ""},
+ {"(*B).Run", Method, 7, ""},
+ {"(*B).RunParallel", Method, 3, ""},
+ {"(*B).SetBytes", Method, 0, ""},
+ {"(*B).SetParallelism", Method, 3, ""},
+ {"(*B).Setenv", Method, 17, ""},
+ {"(*B).Skip", Method, 1, ""},
+ {"(*B).SkipNow", Method, 1, ""},
+ {"(*B).Skipf", Method, 1, ""},
+ {"(*B).Skipped", Method, 1, ""},
+ {"(*B).StartTimer", Method, 0, ""},
+ {"(*B).StopTimer", Method, 0, ""},
+ {"(*B).TempDir", Method, 15, ""},
+ {"(*F).Add", Method, 18, ""},
+ {"(*F).Attr", Method, 25, ""},
+ {"(*F).Chdir", Method, 24, ""},
+ {"(*F).Cleanup", Method, 18, ""},
+ {"(*F).Context", Method, 24, ""},
+ {"(*F).Error", Method, 18, ""},
+ {"(*F).Errorf", Method, 18, ""},
+ {"(*F).Fail", Method, 18, ""},
+ {"(*F).FailNow", Method, 18, ""},
+ {"(*F).Failed", Method, 18, ""},
+ {"(*F).Fatal", Method, 18, ""},
+ {"(*F).Fatalf", Method, 18, ""},
+ {"(*F).Fuzz", Method, 18, ""},
+ {"(*F).Helper", Method, 18, ""},
+ {"(*F).Log", Method, 18, ""},
+ {"(*F).Logf", Method, 18, ""},
+ {"(*F).Name", Method, 18, ""},
+ {"(*F).Output", Method, 25, ""},
+ {"(*F).Setenv", Method, 18, ""},
+ {"(*F).Skip", Method, 18, ""},
+ {"(*F).SkipNow", Method, 18, ""},
+ {"(*F).Skipf", Method, 18, ""},
+ {"(*F).Skipped", Method, 18, ""},
+ {"(*F).TempDir", Method, 18, ""},
+ {"(*M).Run", Method, 4, ""},
+ {"(*PB).Next", Method, 3, ""},
+ {"(*T).Attr", Method, 25, ""},
+ {"(*T).Chdir", Method, 24, ""},
+ {"(*T).Cleanup", Method, 14, ""},
+ {"(*T).Context", Method, 24, ""},
+ {"(*T).Deadline", Method, 15, ""},
+ {"(*T).Error", Method, 0, ""},
+ {"(*T).Errorf", Method, 0, ""},
+ {"(*T).Fail", Method, 0, ""},
+ {"(*T).FailNow", Method, 0, ""},
+ {"(*T).Failed", Method, 0, ""},
+ {"(*T).Fatal", Method, 0, ""},
+ {"(*T).Fatalf", Method, 0, ""},
+ {"(*T).Helper", Method, 9, ""},
+ {"(*T).Log", Method, 0, ""},
+ {"(*T).Logf", Method, 0, ""},
+ {"(*T).Name", Method, 8, ""},
+ {"(*T).Output", Method, 25, ""},
+ {"(*T).Parallel", Method, 0, ""},
+ {"(*T).Run", Method, 7, ""},
+ {"(*T).Setenv", Method, 17, ""},
+ {"(*T).Skip", Method, 1, ""},
+ {"(*T).SkipNow", Method, 1, ""},
+ {"(*T).Skipf", Method, 1, ""},
+ {"(*T).Skipped", Method, 1, ""},
+ {"(*T).TempDir", Method, 15, ""},
+ {"(BenchmarkResult).AllocedBytesPerOp", Method, 1, ""},
+ {"(BenchmarkResult).AllocsPerOp", Method, 1, ""},
+ {"(BenchmarkResult).MemString", Method, 1, ""},
+ {"(BenchmarkResult).NsPerOp", Method, 0, ""},
+ {"(BenchmarkResult).String", Method, 0, ""},
+ {"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"},
+ {"B", Type, 0, ""},
+ {"B.N", Field, 0, ""},
+ {"Benchmark", Func, 0, "func(f func(b *B)) BenchmarkResult"},
+ {"BenchmarkResult", Type, 0, ""},
+ {"BenchmarkResult.Bytes", Field, 0, ""},
+ {"BenchmarkResult.Extra", Field, 13, ""},
+ {"BenchmarkResult.MemAllocs", Field, 1, ""},
+ {"BenchmarkResult.MemBytes", Field, 1, ""},
+ {"BenchmarkResult.N", Field, 0, ""},
+ {"BenchmarkResult.T", Field, 0, ""},
+ {"Cover", Type, 2, ""},
+ {"Cover.Blocks", Field, 2, ""},
+ {"Cover.Counters", Field, 2, ""},
+ {"Cover.CoveredPackages", Field, 2, ""},
+ {"Cover.Mode", Field, 2, ""},
+ {"CoverBlock", Type, 2, ""},
+ {"CoverBlock.Col0", Field, 2, ""},
+ {"CoverBlock.Col1", Field, 2, ""},
+ {"CoverBlock.Line0", Field, 2, ""},
+ {"CoverBlock.Line1", Field, 2, ""},
+ {"CoverBlock.Stmts", Field, 2, ""},
+ {"CoverMode", Func, 8, "func() string"},
+ {"Coverage", Func, 4, "func() float64"},
+ {"F", Type, 18, ""},
+ {"Init", Func, 13, "func()"},
+ {"InternalBenchmark", Type, 0, ""},
+ {"InternalBenchmark.F", Field, 0, ""},
+ {"InternalBenchmark.Name", Field, 0, ""},
+ {"InternalExample", Type, 0, ""},
+ {"InternalExample.F", Field, 0, ""},
+ {"InternalExample.Name", Field, 0, ""},
+ {"InternalExample.Output", Field, 0, ""},
+ {"InternalExample.Unordered", Field, 7, ""},
+ {"InternalFuzzTarget", Type, 18, ""},
+ {"InternalFuzzTarget.Fn", Field, 18, ""},
+ {"InternalFuzzTarget.Name", Field, 18, ""},
+ {"InternalTest", Type, 0, ""},
+ {"InternalTest.F", Field, 0, ""},
+ {"InternalTest.Name", Field, 0, ""},
+ {"M", Type, 4, ""},
+ {"Main", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample)"},
+ {"MainStart", Func, 4, "func(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, fuzzTargets []InternalFuzzTarget, examples []InternalExample) *M"},
+ {"PB", Type, 3, ""},
+ {"RegisterCover", Func, 2, "func(c Cover)"},
+ {"RunBenchmarks", Func, 0, "func(matchString func(pat string, str string) (bool, error), benchmarks []InternalBenchmark)"},
+ {"RunExamples", Func, 0, "func(matchString func(pat string, str string) (bool, error), examples []InternalExample) (ok bool)"},
+ {"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"},
+ {"Short", Func, 0, "func() bool"},
+ {"T", Type, 0, ""},
+ {"TB", Type, 2, ""},
+ {"Testing", Func, 21, "func() bool"},
+ {"Verbose", Func, 1, "func() bool"},
+ },
+ "testing/fstest": {
+ {"(MapFS).Glob", Method, 16, ""},
+ {"(MapFS).Lstat", Method, 25, ""},
+ {"(MapFS).Open", Method, 16, ""},
+ {"(MapFS).ReadDir", Method, 16, ""},
+ {"(MapFS).ReadFile", Method, 16, ""},
+ {"(MapFS).ReadLink", Method, 25, ""},
+ {"(MapFS).Stat", Method, 16, ""},
+ {"(MapFS).Sub", Method, 16, ""},
+ {"MapFS", Type, 16, ""},
+ {"MapFile", Type, 16, ""},
+ {"MapFile.Data", Field, 16, ""},
+ {"MapFile.ModTime", Field, 16, ""},
+ {"MapFile.Mode", Field, 16, ""},
+ {"MapFile.Sys", Field, 16, ""},
+ {"TestFS", Func, 16, "func(fsys fs.FS, expected ...string) error"},
+ },
+ "testing/iotest": {
+ {"DataErrReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"ErrReader", Func, 16, "func(err error) io.Reader"},
+ {"ErrTimeout", Var, 0, ""},
+ {"HalfReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"NewReadLogger", Func, 0, "func(prefix string, r io.Reader) io.Reader"},
+ {"NewWriteLogger", Func, 0, "func(prefix string, w io.Writer) io.Writer"},
+ {"OneByteReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"TestReader", Func, 16, "func(r io.Reader, content []byte) error"},
+ {"TimeoutReader", Func, 0, "func(r io.Reader) io.Reader"},
+ {"TruncateWriter", Func, 0, "func(w io.Writer, n int64) io.Writer"},
+ },
+ "testing/quick": {
+ {"(*CheckEqualError).Error", Method, 0, ""},
+ {"(*CheckError).Error", Method, 0, ""},
+ {"(SetupError).Error", Method, 0, ""},
+ {"Check", Func, 0, "func(f any, config *Config) error"},
+ {"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"},
+ {"CheckEqualError", Type, 0, ""},
+ {"CheckEqualError.CheckError", Field, 0, ""},
+ {"CheckEqualError.Out1", Field, 0, ""},
+ {"CheckEqualError.Out2", Field, 0, ""},
+ {"CheckError", Type, 0, ""},
+ {"CheckError.Count", Field, 0, ""},
+ {"CheckError.In", Field, 0, ""},
+ {"Config", Type, 0, ""},
+ {"Config.MaxCount", Field, 0, ""},
+ {"Config.MaxCountScale", Field, 0, ""},
+ {"Config.Rand", Field, 0, ""},
+ {"Config.Values", Field, 0, ""},
+ {"Generator", Type, 0, ""},
+ {"SetupError", Type, 0, ""},
+ {"Value", Func, 0, "func(t reflect.Type, rand *rand.Rand) (value reflect.Value, ok bool)"},
+ },
+ "testing/slogtest": {
+ {"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"},
+ {"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"},
+ },
+ "testing/synctest": {
+ {"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"},
+ {"Wait", Func, 25, "func()"},
+ },
+ "text/scanner": {
+ {"(*Position).IsValid", Method, 0, ""},
+ {"(*Scanner).Init", Method, 0, ""},
+ {"(*Scanner).IsValid", Method, 0, ""},
+ {"(*Scanner).Next", Method, 0, ""},
+ {"(*Scanner).Peek", Method, 0, ""},
+ {"(*Scanner).Pos", Method, 0, ""},
+ {"(*Scanner).Scan", Method, 0, ""},
+ {"(*Scanner).TokenText", Method, 0, ""},
+ {"(Position).String", Method, 0, ""},
+ {"(Scanner).String", Method, 0, ""},
+ {"Char", Const, 0, ""},
+ {"Comment", Const, 0, ""},
+ {"EOF", Const, 0, ""},
+ {"Float", Const, 0, ""},
+ {"GoTokens", Const, 0, ""},
+ {"GoWhitespace", Const, 0, ""},
+ {"Ident", Const, 0, ""},
+ {"Int", Const, 0, ""},
+ {"Position", Type, 0, ""},
+ {"Position.Column", Field, 0, ""},
+ {"Position.Filename", Field, 0, ""},
+ {"Position.Line", Field, 0, ""},
+ {"Position.Offset", Field, 0, ""},
+ {"RawString", Const, 0, ""},
+ {"ScanChars", Const, 0, ""},
+ {"ScanComments", Const, 0, ""},
+ {"ScanFloats", Const, 0, ""},
+ {"ScanIdents", Const, 0, ""},
+ {"ScanInts", Const, 0, ""},
+ {"ScanRawStrings", Const, 0, ""},
+ {"ScanStrings", Const, 0, ""},
+ {"Scanner", Type, 0, ""},
+ {"Scanner.Error", Field, 0, ""},
+ {"Scanner.ErrorCount", Field, 0, ""},
+ {"Scanner.IsIdentRune", Field, 4, ""},
+ {"Scanner.Mode", Field, 0, ""},
+ {"Scanner.Position", Field, 0, ""},
+ {"Scanner.Whitespace", Field, 0, ""},
+ {"SkipComments", Const, 0, ""},
+ {"String", Const, 0, ""},
+ {"TokenString", Func, 0, "func(tok rune) string"},
+ },
+ "text/tabwriter": {
+ {"(*Writer).Flush", Method, 0, ""},
+ {"(*Writer).Init", Method, 0, ""},
+ {"(*Writer).Write", Method, 0, ""},
+ {"AlignRight", Const, 0, ""},
+ {"Debug", Const, 0, ""},
+ {"DiscardEmptyColumns", Const, 0, ""},
+ {"Escape", Const, 0, ""},
+ {"FilterHTML", Const, 0, ""},
+ {"NewWriter", Func, 0, "func(output io.Writer, minwidth int, tabwidth int, padding int, padchar byte, flags uint) *Writer"},
+ {"StripEscape", Const, 0, ""},
+ {"TabIndent", Const, 0, ""},
+ {"Writer", Type, 0, ""},
+ },
+ "text/template": {
+ {"(*Template).AddParseTree", Method, 0, ""},
+ {"(*Template).Clone", Method, 0, ""},
+ {"(*Template).DefinedTemplates", Method, 5, ""},
+ {"(*Template).Delims", Method, 0, ""},
+ {"(*Template).Execute", Method, 0, ""},
+ {"(*Template).ExecuteTemplate", Method, 0, ""},
+ {"(*Template).Funcs", Method, 0, ""},
+ {"(*Template).Lookup", Method, 0, ""},
+ {"(*Template).Name", Method, 0, ""},
+ {"(*Template).New", Method, 0, ""},
+ {"(*Template).Option", Method, 5, ""},
+ {"(*Template).Parse", Method, 0, ""},
+ {"(*Template).ParseFS", Method, 16, ""},
+ {"(*Template).ParseFiles", Method, 0, ""},
+ {"(*Template).ParseGlob", Method, 0, ""},
+ {"(*Template).Templates", Method, 0, ""},
+ {"(ExecError).Error", Method, 6, ""},
+ {"(ExecError).Unwrap", Method, 13, ""},
+ {"(Template).Copy", Method, 2, ""},
+ {"(Template).ErrorContext", Method, 1, ""},
+ {"ExecError", Type, 6, ""},
+ {"ExecError.Err", Field, 6, ""},
+ {"ExecError.Name", Field, 6, ""},
+ {"FuncMap", Type, 0, ""},
+ {"HTMLEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"HTMLEscapeString", Func, 0, "func(s string) string"},
+ {"HTMLEscaper", Func, 0, "func(args ...any) string"},
+ {"IsTrue", Func, 6, "func(val any) (truth bool, ok bool)"},
+ {"JSEscape", Func, 0, "func(w io.Writer, b []byte)"},
+ {"JSEscapeString", Func, 0, "func(s string) string"},
+ {"JSEscaper", Func, 0, "func(args ...any) string"},
+ {"Must", Func, 0, "func(t *Template, err error) *Template"},
+ {"New", Func, 0, "func(name string) *Template"},
+ {"ParseFS", Func, 16, "func(fsys fs.FS, patterns ...string) (*Template, error)"},
+ {"ParseFiles", Func, 0, "func(filenames ...string) (*Template, error)"},
+ {"ParseGlob", Func, 0, "func(pattern string) (*Template, error)"},
+ {"Template", Type, 0, ""},
+ {"Template.Tree", Field, 0, ""},
+ {"URLQueryEscaper", Func, 0, "func(args ...any) string"},
+ },
+ "text/template/parse": {
+ {"(*ActionNode).Copy", Method, 0, ""},
+ {"(*ActionNode).String", Method, 0, ""},
+ {"(*BoolNode).Copy", Method, 0, ""},
+ {"(*BoolNode).String", Method, 0, ""},
+ {"(*BranchNode).Copy", Method, 4, ""},
+ {"(*BranchNode).String", Method, 0, ""},
+ {"(*BreakNode).Copy", Method, 18, ""},
+ {"(*BreakNode).String", Method, 18, ""},
+ {"(*ChainNode).Add", Method, 1, ""},
+ {"(*ChainNode).Copy", Method, 1, ""},
+ {"(*ChainNode).String", Method, 1, ""},
+ {"(*CommandNode).Copy", Method, 0, ""},
+ {"(*CommandNode).String", Method, 0, ""},
+ {"(*CommentNode).Copy", Method, 16, ""},
+ {"(*CommentNode).String", Method, 16, ""},
+ {"(*ContinueNode).Copy", Method, 18, ""},
+ {"(*ContinueNode).String", Method, 18, ""},
+ {"(*DotNode).Copy", Method, 0, ""},
+ {"(*DotNode).String", Method, 0, ""},
+ {"(*DotNode).Type", Method, 0, ""},
+ {"(*FieldNode).Copy", Method, 0, ""},
+ {"(*FieldNode).String", Method, 0, ""},
+ {"(*IdentifierNode).Copy", Method, 0, ""},
+ {"(*IdentifierNode).SetPos", Method, 1, ""},
+ {"(*IdentifierNode).SetTree", Method, 4, ""},
+ {"(*IdentifierNode).String", Method, 0, ""},
+ {"(*IfNode).Copy", Method, 0, ""},
+ {"(*IfNode).String", Method, 0, ""},
+ {"(*ListNode).Copy", Method, 0, ""},
+ {"(*ListNode).CopyList", Method, 0, ""},
+ {"(*ListNode).String", Method, 0, ""},
+ {"(*NilNode).Copy", Method, 1, ""},
+ {"(*NilNode).String", Method, 1, ""},
+ {"(*NilNode).Type", Method, 1, ""},
+ {"(*NumberNode).Copy", Method, 0, ""},
+ {"(*NumberNode).String", Method, 0, ""},
+ {"(*PipeNode).Copy", Method, 0, ""},
+ {"(*PipeNode).CopyPipe", Method, 0, ""},
+ {"(*PipeNode).String", Method, 0, ""},
+ {"(*RangeNode).Copy", Method, 0, ""},
+ {"(*RangeNode).String", Method, 0, ""},
+ {"(*StringNode).Copy", Method, 0, ""},
+ {"(*StringNode).String", Method, 0, ""},
+ {"(*TemplateNode).Copy", Method, 0, ""},
+ {"(*TemplateNode).String", Method, 0, ""},
+ {"(*TextNode).Copy", Method, 0, ""},
+ {"(*TextNode).String", Method, 0, ""},
+ {"(*Tree).Copy", Method, 2, ""},
+ {"(*Tree).ErrorContext", Method, 1, ""},
+ {"(*Tree).Parse", Method, 0, ""},
+ {"(*VariableNode).Copy", Method, 0, ""},
+ {"(*VariableNode).String", Method, 0, ""},
+ {"(*WithNode).Copy", Method, 0, ""},
+ {"(*WithNode).String", Method, 0, ""},
+ {"(ActionNode).Position", Method, 1, ""},
+ {"(ActionNode).Type", Method, 0, ""},
+ {"(BoolNode).Position", Method, 1, ""},
+ {"(BoolNode).Type", Method, 0, ""},
+ {"(BranchNode).Position", Method, 1, ""},
+ {"(BranchNode).Type", Method, 0, ""},
+ {"(BreakNode).Position", Method, 18, ""},
+ {"(BreakNode).Type", Method, 18, ""},
+ {"(ChainNode).Position", Method, 1, ""},
+ {"(ChainNode).Type", Method, 1, ""},
+ {"(CommandNode).Position", Method, 1, ""},
+ {"(CommandNode).Type", Method, 0, ""},
+ {"(CommentNode).Position", Method, 16, ""},
+ {"(CommentNode).Type", Method, 16, ""},
+ {"(ContinueNode).Position", Method, 18, ""},
+ {"(ContinueNode).Type", Method, 18, ""},
+ {"(DotNode).Position", Method, 1, ""},
+ {"(FieldNode).Position", Method, 1, ""},
+ {"(FieldNode).Type", Method, 0, ""},
+ {"(IdentifierNode).Position", Method, 1, ""},
+ {"(IdentifierNode).Type", Method, 0, ""},
+ {"(IfNode).Position", Method, 1, ""},
+ {"(IfNode).Type", Method, 0, ""},
+ {"(ListNode).Position", Method, 1, ""},
+ {"(ListNode).Type", Method, 0, ""},
+ {"(NilNode).Position", Method, 1, ""},
+ {"(NodeType).Type", Method, 0, ""},
+ {"(NumberNode).Position", Method, 1, ""},
+ {"(NumberNode).Type", Method, 0, ""},
+ {"(PipeNode).Position", Method, 1, ""},
+ {"(PipeNode).Type", Method, 0, ""},
+ {"(Pos).Position", Method, 1, ""},
+ {"(RangeNode).Position", Method, 1, ""},
+ {"(RangeNode).Type", Method, 0, ""},
+ {"(StringNode).Position", Method, 1, ""},
+ {"(StringNode).Type", Method, 0, ""},
+ {"(TemplateNode).Position", Method, 1, ""},
+ {"(TemplateNode).Type", Method, 0, ""},
+ {"(TextNode).Position", Method, 1, ""},
+ {"(TextNode).Type", Method, 0, ""},
+ {"(VariableNode).Position", Method, 1, ""},
+ {"(VariableNode).Type", Method, 0, ""},
+ {"(WithNode).Position", Method, 1, ""},
+ {"(WithNode).Type", Method, 0, ""},
+ {"ActionNode", Type, 0, ""},
+ {"ActionNode.Line", Field, 0, ""},
+ {"ActionNode.NodeType", Field, 0, ""},
+ {"ActionNode.Pipe", Field, 0, ""},
+ {"ActionNode.Pos", Field, 1, ""},
+ {"BoolNode", Type, 0, ""},
+ {"BoolNode.NodeType", Field, 0, ""},
+ {"BoolNode.Pos", Field, 1, ""},
+ {"BoolNode.True", Field, 0, ""},
+ {"BranchNode", Type, 0, ""},
+ {"BranchNode.ElseList", Field, 0, ""},
+ {"BranchNode.Line", Field, 0, ""},
+ {"BranchNode.List", Field, 0, ""},
+ {"BranchNode.NodeType", Field, 0, ""},
+ {"BranchNode.Pipe", Field, 0, ""},
+ {"BranchNode.Pos", Field, 1, ""},
+ {"BreakNode", Type, 18, ""},
+ {"BreakNode.Line", Field, 18, ""},
+ {"BreakNode.NodeType", Field, 18, ""},
+ {"BreakNode.Pos", Field, 18, ""},
+ {"ChainNode", Type, 1, ""},
+ {"ChainNode.Field", Field, 1, ""},
+ {"ChainNode.Node", Field, 1, ""},
+ {"ChainNode.NodeType", Field, 1, ""},
+ {"ChainNode.Pos", Field, 1, ""},
+ {"CommandNode", Type, 0, ""},
+ {"CommandNode.Args", Field, 0, ""},
+ {"CommandNode.NodeType", Field, 0, ""},
+ {"CommandNode.Pos", Field, 1, ""},
+ {"CommentNode", Type, 16, ""},
+ {"CommentNode.NodeType", Field, 16, ""},
+ {"CommentNode.Pos", Field, 16, ""},
+ {"CommentNode.Text", Field, 16, ""},
+ {"ContinueNode", Type, 18, ""},
+ {"ContinueNode.Line", Field, 18, ""},
+ {"ContinueNode.NodeType", Field, 18, ""},
+ {"ContinueNode.Pos", Field, 18, ""},
+ {"DotNode", Type, 0, ""},
+ {"DotNode.NodeType", Field, 4, ""},
+ {"DotNode.Pos", Field, 1, ""},
+ {"FieldNode", Type, 0, ""},
+ {"FieldNode.Ident", Field, 0, ""},
+ {"FieldNode.NodeType", Field, 0, ""},
+ {"FieldNode.Pos", Field, 1, ""},
+ {"IdentifierNode", Type, 0, ""},
+ {"IdentifierNode.Ident", Field, 0, ""},
+ {"IdentifierNode.NodeType", Field, 0, ""},
+ {"IdentifierNode.Pos", Field, 1, ""},
+ {"IfNode", Type, 0, ""},
+ {"IfNode.BranchNode", Field, 0, ""},
+ {"IsEmptyTree", Func, 0, "func(n Node) bool"},
+ {"ListNode", Type, 0, ""},
+ {"ListNode.NodeType", Field, 0, ""},
+ {"ListNode.Nodes", Field, 0, ""},
+ {"ListNode.Pos", Field, 1, ""},
+ {"Mode", Type, 16, ""},
+ {"New", Func, 0, "func(name string, funcs ...map[string]any) *Tree"},
+ {"NewIdentifier", Func, 0, "func(ident string) *IdentifierNode"},
+ {"NilNode", Type, 1, ""},
+ {"NilNode.NodeType", Field, 4, ""},
+ {"NilNode.Pos", Field, 1, ""},
+ {"Node", Type, 0, ""},
+ {"NodeAction", Const, 0, ""},
+ {"NodeBool", Const, 0, ""},
+ {"NodeBreak", Const, 18, ""},
+ {"NodeChain", Const, 1, ""},
+ {"NodeCommand", Const, 0, ""},
+ {"NodeComment", Const, 16, ""},
+ {"NodeContinue", Const, 18, ""},
+ {"NodeDot", Const, 0, ""},
+ {"NodeField", Const, 0, ""},
+ {"NodeIdentifier", Const, 0, ""},
+ {"NodeIf", Const, 0, ""},
+ {"NodeList", Const, 0, ""},
+ {"NodeNil", Const, 1, ""},
+ {"NodeNumber", Const, 0, ""},
+ {"NodePipe", Const, 0, ""},
+ {"NodeRange", Const, 0, ""},
+ {"NodeString", Const, 0, ""},
+ {"NodeTemplate", Const, 0, ""},
+ {"NodeText", Const, 0, ""},
+ {"NodeType", Type, 0, ""},
+ {"NodeVariable", Const, 0, ""},
+ {"NodeWith", Const, 0, ""},
+ {"NumberNode", Type, 0, ""},
+ {"NumberNode.Complex128", Field, 0, ""},
+ {"NumberNode.Float64", Field, 0, ""},
+ {"NumberNode.Int64", Field, 0, ""},
+ {"NumberNode.IsComplex", Field, 0, ""},
+ {"NumberNode.IsFloat", Field, 0, ""},
+ {"NumberNode.IsInt", Field, 0, ""},
+ {"NumberNode.IsUint", Field, 0, ""},
+ {"NumberNode.NodeType", Field, 0, ""},
+ {"NumberNode.Pos", Field, 1, ""},
+ {"NumberNode.Text", Field, 0, ""},
+ {"NumberNode.Uint64", Field, 0, ""},
+ {"Parse", Func, 0, "func(name string, text string, leftDelim string, rightDelim string, funcs ...map[string]any) (map[string]*Tree, error)"},
+ {"ParseComments", Const, 16, ""},
+ {"PipeNode", Type, 0, ""},
+ {"PipeNode.Cmds", Field, 0, ""},
+ {"PipeNode.Decl", Field, 0, ""},
+ {"PipeNode.IsAssign", Field, 11, ""},
+ {"PipeNode.Line", Field, 0, ""},
+ {"PipeNode.NodeType", Field, 0, ""},
+ {"PipeNode.Pos", Field, 1, ""},
+ {"Pos", Type, 1, ""},
+ {"RangeNode", Type, 0, ""},
+ {"RangeNode.BranchNode", Field, 0, ""},
+ {"SkipFuncCheck", Const, 17, ""},
+ {"StringNode", Type, 0, ""},
+ {"StringNode.NodeType", Field, 0, ""},
+ {"StringNode.Pos", Field, 1, ""},
+ {"StringNode.Quoted", Field, 0, ""},
+ {"StringNode.Text", Field, 0, ""},
+ {"TemplateNode", Type, 0, ""},
+ {"TemplateNode.Line", Field, 0, ""},
+ {"TemplateNode.Name", Field, 0, ""},
+ {"TemplateNode.NodeType", Field, 0, ""},
+ {"TemplateNode.Pipe", Field, 0, ""},
+ {"TemplateNode.Pos", Field, 1, ""},
+ {"TextNode", Type, 0, ""},
+ {"TextNode.NodeType", Field, 0, ""},
+ {"TextNode.Pos", Field, 1, ""},
+ {"TextNode.Text", Field, 0, ""},
+ {"Tree", Type, 0, ""},
+ {"Tree.Mode", Field, 16, ""},
+ {"Tree.Name", Field, 0, ""},
+ {"Tree.ParseName", Field, 1, ""},
+ {"Tree.Root", Field, 0, ""},
+ {"VariableNode", Type, 0, ""},
+ {"VariableNode.Ident", Field, 0, ""},
+ {"VariableNode.NodeType", Field, 0, ""},
+ {"VariableNode.Pos", Field, 1, ""},
+ {"WithNode", Type, 0, ""},
+ {"WithNode.BranchNode", Field, 0, ""},
+ },
+ "time": {
+ {"(*Location).String", Method, 0, ""},
+ {"(*ParseError).Error", Method, 0, ""},
+ {"(*Ticker).Reset", Method, 15, ""},
+ {"(*Ticker).Stop", Method, 0, ""},
+ {"(*Time).GobDecode", Method, 0, ""},
+ {"(*Time).UnmarshalBinary", Method, 2, ""},
+ {"(*Time).UnmarshalJSON", Method, 0, ""},
+ {"(*Time).UnmarshalText", Method, 2, ""},
+ {"(*Timer).Reset", Method, 1, ""},
+ {"(*Timer).Stop", Method, 0, ""},
+ {"(Duration).Abs", Method, 19, ""},
+ {"(Duration).Hours", Method, 0, ""},
+ {"(Duration).Microseconds", Method, 13, ""},
+ {"(Duration).Milliseconds", Method, 13, ""},
+ {"(Duration).Minutes", Method, 0, ""},
+ {"(Duration).Nanoseconds", Method, 0, ""},
+ {"(Duration).Round", Method, 9, ""},
+ {"(Duration).Seconds", Method, 0, ""},
+ {"(Duration).String", Method, 0, ""},
+ {"(Duration).Truncate", Method, 9, ""},
+ {"(Month).String", Method, 0, ""},
+ {"(Time).Add", Method, 0, ""},
+ {"(Time).AddDate", Method, 0, ""},
+ {"(Time).After", Method, 0, ""},
+ {"(Time).AppendBinary", Method, 24, ""},
+ {"(Time).AppendFormat", Method, 5, ""},
+ {"(Time).AppendText", Method, 24, ""},
+ {"(Time).Before", Method, 0, ""},
+ {"(Time).Clock", Method, 0, ""},
+ {"(Time).Compare", Method, 20, ""},
+ {"(Time).Date", Method, 0, ""},
+ {"(Time).Day", Method, 0, ""},
+ {"(Time).Equal", Method, 0, ""},
+ {"(Time).Format", Method, 0, ""},
+ {"(Time).GoString", Method, 17, ""},
+ {"(Time).GobEncode", Method, 0, ""},
+ {"(Time).Hour", Method, 0, ""},
+ {"(Time).ISOWeek", Method, 0, ""},
+ {"(Time).In", Method, 0, ""},
+ {"(Time).IsDST", Method, 17, ""},
+ {"(Time).IsZero", Method, 0, ""},
+ {"(Time).Local", Method, 0, ""},
+ {"(Time).Location", Method, 0, ""},
+ {"(Time).MarshalBinary", Method, 2, ""},
+ {"(Time).MarshalJSON", Method, 0, ""},
+ {"(Time).MarshalText", Method, 2, ""},
+ {"(Time).Minute", Method, 0, ""},
+ {"(Time).Month", Method, 0, ""},
+ {"(Time).Nanosecond", Method, 0, ""},
+ {"(Time).Round", Method, 1, ""},
+ {"(Time).Second", Method, 0, ""},
+ {"(Time).String", Method, 0, ""},
+ {"(Time).Sub", Method, 0, ""},
+ {"(Time).Truncate", Method, 1, ""},
+ {"(Time).UTC", Method, 0, ""},
+ {"(Time).Unix", Method, 0, ""},
+ {"(Time).UnixMicro", Method, 17, ""},
+ {"(Time).UnixMilli", Method, 17, ""},
+ {"(Time).UnixNano", Method, 0, ""},
+ {"(Time).Weekday", Method, 0, ""},
+ {"(Time).Year", Method, 0, ""},
+ {"(Time).YearDay", Method, 1, ""},
+ {"(Time).Zone", Method, 0, ""},
+ {"(Time).ZoneBounds", Method, 19, ""},
+ {"(Weekday).String", Method, 0, ""},
+ {"ANSIC", Const, 0, ""},
+ {"After", Func, 0, "func(d Duration) <-chan Time"},
+ {"AfterFunc", Func, 0, "func(d Duration, f func()) *Timer"},
+ {"April", Const, 0, ""},
+ {"August", Const, 0, ""},
+ {"Date", Func, 0, "func(year int, month Month, day int, hour int, min int, sec int, nsec int, loc *Location) Time"},
+ {"DateOnly", Const, 20, ""},
+ {"DateTime", Const, 20, ""},
+ {"December", Const, 0, ""},
+ {"Duration", Type, 0, ""},
+ {"February", Const, 0, ""},
+ {"FixedZone", Func, 0, "func(name string, offset int) *Location"},
+ {"Friday", Const, 0, ""},
+ {"Hour", Const, 0, ""},
+ {"January", Const, 0, ""},
+ {"July", Const, 0, ""},
+ {"June", Const, 0, ""},
+ {"Kitchen", Const, 0, ""},
+ {"Layout", Const, 17, ""},
+ {"LoadLocation", Func, 0, "func(name string) (*Location, error)"},
+ {"LoadLocationFromTZData", Func, 10, "func(name string, data []byte) (*Location, error)"},
+ {"Local", Var, 0, ""},
+ {"Location", Type, 0, ""},
+ {"March", Const, 0, ""},
+ {"May", Const, 0, ""},
+ {"Microsecond", Const, 0, ""},
+ {"Millisecond", Const, 0, ""},
+ {"Minute", Const, 0, ""},
+ {"Monday", Const, 0, ""},
+ {"Month", Type, 0, ""},
+ {"Nanosecond", Const, 0, ""},
+ {"NewTicker", Func, 0, "func(d Duration) *Ticker"},
+ {"NewTimer", Func, 0, "func(d Duration) *Timer"},
+ {"November", Const, 0, ""},
+ {"Now", Func, 0, "func() Time"},
+ {"October", Const, 0, ""},
+ {"Parse", Func, 0, "func(layout string, value string) (Time, error)"},
+ {"ParseDuration", Func, 0, "func(s string) (Duration, error)"},
+ {"ParseError", Type, 0, ""},
+ {"ParseError.Layout", Field, 0, ""},
+ {"ParseError.LayoutElem", Field, 0, ""},
+ {"ParseError.Message", Field, 0, ""},
+ {"ParseError.Value", Field, 0, ""},
+ {"ParseError.ValueElem", Field, 0, ""},
+ {"ParseInLocation", Func, 1, "func(layout string, value string, loc *Location) (Time, error)"},
+ {"RFC1123", Const, 0, ""},
+ {"RFC1123Z", Const, 0, ""},
+ {"RFC3339", Const, 0, ""},
+ {"RFC3339Nano", Const, 0, ""},
+ {"RFC822", Const, 0, ""},
+ {"RFC822Z", Const, 0, ""},
+ {"RFC850", Const, 0, ""},
+ {"RubyDate", Const, 0, ""},
+ {"Saturday", Const, 0, ""},
+ {"Second", Const, 0, ""},
+ {"September", Const, 0, ""},
+ {"Since", Func, 0, "func(t Time) Duration"},
+ {"Sleep", Func, 0, "func(d Duration)"},
+ {"Stamp", Const, 0, ""},
+ {"StampMicro", Const, 0, ""},
+ {"StampMilli", Const, 0, ""},
+ {"StampNano", Const, 0, ""},
+ {"Sunday", Const, 0, ""},
+ {"Thursday", Const, 0, ""},
+ {"Tick", Func, 0, "func(d Duration) <-chan Time"},
+ {"Ticker", Type, 0, ""},
+ {"Ticker.C", Field, 0, ""},
+ {"Time", Type, 0, ""},
+ {"TimeOnly", Const, 20, ""},
+ {"Timer", Type, 0, ""},
+ {"Timer.C", Field, 0, ""},
+ {"Tuesday", Const, 0, ""},
+ {"UTC", Var, 0, ""},
+ {"Unix", Func, 0, "func(sec int64, nsec int64) Time"},
+ {"UnixDate", Const, 0, ""},
+ {"UnixMicro", Func, 17, "func(usec int64) Time"},
+ {"UnixMilli", Func, 17, "func(msec int64) Time"},
+ {"Until", Func, 8, "func(t Time) Duration"},
+ {"Wednesday", Const, 0, ""},
+ {"Weekday", Type, 0, ""},
+ },
+ "unicode": {
+ {"(SpecialCase).ToLower", Method, 0, ""},
+ {"(SpecialCase).ToTitle", Method, 0, ""},
+ {"(SpecialCase).ToUpper", Method, 0, ""},
+ {"ASCII_Hex_Digit", Var, 0, ""},
+ {"Adlam", Var, 7, ""},
+ {"Ahom", Var, 5, ""},
+ {"Anatolian_Hieroglyphs", Var, 5, ""},
+ {"Arabic", Var, 0, ""},
+ {"Armenian", Var, 0, ""},
+ {"Avestan", Var, 0, ""},
+ {"AzeriCase", Var, 0, ""},
+ {"Balinese", Var, 0, ""},
+ {"Bamum", Var, 0, ""},
+ {"Bassa_Vah", Var, 4, ""},
+ {"Batak", Var, 0, ""},
+ {"Bengali", Var, 0, ""},
+ {"Bhaiksuki", Var, 7, ""},
+ {"Bidi_Control", Var, 0, ""},
+ {"Bopomofo", Var, 0, ""},
+ {"Brahmi", Var, 0, ""},
+ {"Braille", Var, 0, ""},
+ {"Buginese", Var, 0, ""},
+ {"Buhid", Var, 0, ""},
+ {"C", Var, 0, ""},
+ {"Canadian_Aboriginal", Var, 0, ""},
+ {"Carian", Var, 0, ""},
+ {"CaseRange", Type, 0, ""},
+ {"CaseRange.Delta", Field, 0, ""},
+ {"CaseRange.Hi", Field, 0, ""},
+ {"CaseRange.Lo", Field, 0, ""},
+ {"CaseRanges", Var, 0, ""},
+ {"Categories", Var, 0, ""},
+ {"CategoryAliases", Var, 25, ""},
+ {"Caucasian_Albanian", Var, 4, ""},
+ {"Cc", Var, 0, ""},
+ {"Cf", Var, 0, ""},
+ {"Chakma", Var, 1, ""},
+ {"Cham", Var, 0, ""},
+ {"Cherokee", Var, 0, ""},
+ {"Chorasmian", Var, 16, ""},
+ {"Cn", Var, 25, ""},
+ {"Co", Var, 0, ""},
+ {"Common", Var, 0, ""},
+ {"Coptic", Var, 0, ""},
+ {"Cs", Var, 0, ""},
+ {"Cuneiform", Var, 0, ""},
+ {"Cypriot", Var, 0, ""},
+ {"Cypro_Minoan", Var, 21, ""},
+ {"Cyrillic", Var, 0, ""},
+ {"Dash", Var, 0, ""},
+ {"Deprecated", Var, 0, ""},
+ {"Deseret", Var, 0, ""},
+ {"Devanagari", Var, 0, ""},
+ {"Diacritic", Var, 0, ""},
+ {"Digit", Var, 0, ""},
+ {"Dives_Akuru", Var, 16, ""},
+ {"Dogra", Var, 13, ""},
+ {"Duployan", Var, 4, ""},
+ {"Egyptian_Hieroglyphs", Var, 0, ""},
+ {"Elbasan", Var, 4, ""},
+ {"Elymaic", Var, 14, ""},
+ {"Ethiopic", Var, 0, ""},
+ {"Extender", Var, 0, ""},
+ {"FoldCategory", Var, 0, ""},
+ {"FoldScript", Var, 0, ""},
+ {"Georgian", Var, 0, ""},
+ {"Glagolitic", Var, 0, ""},
+ {"Gothic", Var, 0, ""},
+ {"Grantha", Var, 4, ""},
+ {"GraphicRanges", Var, 0, ""},
+ {"Greek", Var, 0, ""},
+ {"Gujarati", Var, 0, ""},
+ {"Gunjala_Gondi", Var, 13, ""},
+ {"Gurmukhi", Var, 0, ""},
+ {"Han", Var, 0, ""},
+ {"Hangul", Var, 0, ""},
+ {"Hanifi_Rohingya", Var, 13, ""},
+ {"Hanunoo", Var, 0, ""},
+ {"Hatran", Var, 5, ""},
+ {"Hebrew", Var, 0, ""},
+ {"Hex_Digit", Var, 0, ""},
+ {"Hiragana", Var, 0, ""},
+ {"Hyphen", Var, 0, ""},
+ {"IDS_Binary_Operator", Var, 0, ""},
+ {"IDS_Trinary_Operator", Var, 0, ""},
+ {"Ideographic", Var, 0, ""},
+ {"Imperial_Aramaic", Var, 0, ""},
+ {"In", Func, 2, "func(r rune, ranges ...*RangeTable) bool"},
+ {"Inherited", Var, 0, ""},
+ {"Inscriptional_Pahlavi", Var, 0, ""},
+ {"Inscriptional_Parthian", Var, 0, ""},
+ {"Is", Func, 0, "func(rangeTab *RangeTable, r rune) bool"},
+ {"IsControl", Func, 0, "func(r rune) bool"},
+ {"IsDigit", Func, 0, "func(r rune) bool"},
+ {"IsGraphic", Func, 0, "func(r rune) bool"},
+ {"IsLetter", Func, 0, "func(r rune) bool"},
+ {"IsLower", Func, 0, "func(r rune) bool"},
+ {"IsMark", Func, 0, "func(r rune) bool"},
+ {"IsNumber", Func, 0, "func(r rune) bool"},
+ {"IsOneOf", Func, 0, "func(ranges []*RangeTable, r rune) bool"},
+ {"IsPrint", Func, 0, "func(r rune) bool"},
+ {"IsPunct", Func, 0, "func(r rune) bool"},
+ {"IsSpace", Func, 0, "func(r rune) bool"},
+ {"IsSymbol", Func, 0, "func(r rune) bool"},
+ {"IsTitle", Func, 0, "func(r rune) bool"},
+ {"IsUpper", Func, 0, "func(r rune) bool"},
+ {"Javanese", Var, 0, ""},
+ {"Join_Control", Var, 0, ""},
+ {"Kaithi", Var, 0, ""},
+ {"Kannada", Var, 0, ""},
+ {"Katakana", Var, 0, ""},
+ {"Kawi", Var, 21, ""},
+ {"Kayah_Li", Var, 0, ""},
+ {"Kharoshthi", Var, 0, ""},
+ {"Khitan_Small_Script", Var, 16, ""},
+ {"Khmer", Var, 0, ""},
+ {"Khojki", Var, 4, ""},
+ {"Khudawadi", Var, 4, ""},
+ {"L", Var, 0, ""},
+ {"LC", Var, 25, ""},
+ {"Lao", Var, 0, ""},
+ {"Latin", Var, 0, ""},
+ {"Lepcha", Var, 0, ""},
+ {"Letter", Var, 0, ""},
+ {"Limbu", Var, 0, ""},
+ {"Linear_A", Var, 4, ""},
+ {"Linear_B", Var, 0, ""},
+ {"Lisu", Var, 0, ""},
+ {"Ll", Var, 0, ""},
+ {"Lm", Var, 0, ""},
+ {"Lo", Var, 0, ""},
+ {"Logical_Order_Exception", Var, 0, ""},
+ {"Lower", Var, 0, ""},
+ {"LowerCase", Const, 0, ""},
+ {"Lt", Var, 0, ""},
+ {"Lu", Var, 0, ""},
+ {"Lycian", Var, 0, ""},
+ {"Lydian", Var, 0, ""},
+ {"M", Var, 0, ""},
+ {"Mahajani", Var, 4, ""},
+ {"Makasar", Var, 13, ""},
+ {"Malayalam", Var, 0, ""},
+ {"Mandaic", Var, 0, ""},
+ {"Manichaean", Var, 4, ""},
+ {"Marchen", Var, 7, ""},
+ {"Mark", Var, 0, ""},
+ {"Masaram_Gondi", Var, 10, ""},
+ {"MaxASCII", Const, 0, ""},
+ {"MaxCase", Const, 0, ""},
+ {"MaxLatin1", Const, 0, ""},
+ {"MaxRune", Const, 0, ""},
+ {"Mc", Var, 0, ""},
+ {"Me", Var, 0, ""},
+ {"Medefaidrin", Var, 13, ""},
+ {"Meetei_Mayek", Var, 0, ""},
+ {"Mende_Kikakui", Var, 4, ""},
+ {"Meroitic_Cursive", Var, 1, ""},
+ {"Meroitic_Hieroglyphs", Var, 1, ""},
+ {"Miao", Var, 1, ""},
+ {"Mn", Var, 0, ""},
+ {"Modi", Var, 4, ""},
+ {"Mongolian", Var, 0, ""},
+ {"Mro", Var, 4, ""},
+ {"Multani", Var, 5, ""},
+ {"Myanmar", Var, 0, ""},
+ {"N", Var, 0, ""},
+ {"Nabataean", Var, 4, ""},
+ {"Nag_Mundari", Var, 21, ""},
+ {"Nandinagari", Var, 14, ""},
+ {"Nd", Var, 0, ""},
+ {"New_Tai_Lue", Var, 0, ""},
+ {"Newa", Var, 7, ""},
+ {"Nko", Var, 0, ""},
+ {"Nl", Var, 0, ""},
+ {"No", Var, 0, ""},
+ {"Noncharacter_Code_Point", Var, 0, ""},
+ {"Number", Var, 0, ""},
+ {"Nushu", Var, 10, ""},
+ {"Nyiakeng_Puachue_Hmong", Var, 14, ""},
+ {"Ogham", Var, 0, ""},
+ {"Ol_Chiki", Var, 0, ""},
+ {"Old_Hungarian", Var, 5, ""},
+ {"Old_Italic", Var, 0, ""},
+ {"Old_North_Arabian", Var, 4, ""},
+ {"Old_Permic", Var, 4, ""},
+ {"Old_Persian", Var, 0, ""},
+ {"Old_Sogdian", Var, 13, ""},
+ {"Old_South_Arabian", Var, 0, ""},
+ {"Old_Turkic", Var, 0, ""},
+ {"Old_Uyghur", Var, 21, ""},
+ {"Oriya", Var, 0, ""},
+ {"Osage", Var, 7, ""},
+ {"Osmanya", Var, 0, ""},
+ {"Other", Var, 0, ""},
+ {"Other_Alphabetic", Var, 0, ""},
+ {"Other_Default_Ignorable_Code_Point", Var, 0, ""},
+ {"Other_Grapheme_Extend", Var, 0, ""},
+ {"Other_ID_Continue", Var, 0, ""},
+ {"Other_ID_Start", Var, 0, ""},
+ {"Other_Lowercase", Var, 0, ""},
+ {"Other_Math", Var, 0, ""},
+ {"Other_Uppercase", Var, 0, ""},
+ {"P", Var, 0, ""},
+ {"Pahawh_Hmong", Var, 4, ""},
+ {"Palmyrene", Var, 4, ""},
+ {"Pattern_Syntax", Var, 0, ""},
+ {"Pattern_White_Space", Var, 0, ""},
+ {"Pau_Cin_Hau", Var, 4, ""},
+ {"Pc", Var, 0, ""},
+ {"Pd", Var, 0, ""},
+ {"Pe", Var, 0, ""},
+ {"Pf", Var, 0, ""},
+ {"Phags_Pa", Var, 0, ""},
+ {"Phoenician", Var, 0, ""},
+ {"Pi", Var, 0, ""},
+ {"Po", Var, 0, ""},
+ {"Prepended_Concatenation_Mark", Var, 7, ""},
+ {"PrintRanges", Var, 0, ""},
+ {"Properties", Var, 0, ""},
+ {"Ps", Var, 0, ""},
+ {"Psalter_Pahlavi", Var, 4, ""},
+ {"Punct", Var, 0, ""},
+ {"Quotation_Mark", Var, 0, ""},
+ {"Radical", Var, 0, ""},
+ {"Range16", Type, 0, ""},
+ {"Range16.Hi", Field, 0, ""},
+ {"Range16.Lo", Field, 0, ""},
+ {"Range16.Stride", Field, 0, ""},
+ {"Range32", Type, 0, ""},
+ {"Range32.Hi", Field, 0, ""},
+ {"Range32.Lo", Field, 0, ""},
+ {"Range32.Stride", Field, 0, ""},
+ {"RangeTable", Type, 0, ""},
+ {"RangeTable.LatinOffset", Field, 1, ""},
+ {"RangeTable.R16", Field, 0, ""},
+ {"RangeTable.R32", Field, 0, ""},
+ {"Regional_Indicator", Var, 10, ""},
+ {"Rejang", Var, 0, ""},
+ {"ReplacementChar", Const, 0, ""},
+ {"Runic", Var, 0, ""},
+ {"S", Var, 0, ""},
+ {"STerm", Var, 0, ""},
+ {"Samaritan", Var, 0, ""},
+ {"Saurashtra", Var, 0, ""},
+ {"Sc", Var, 0, ""},
+ {"Scripts", Var, 0, ""},
+ {"Sentence_Terminal", Var, 7, ""},
+ {"Sharada", Var, 1, ""},
+ {"Shavian", Var, 0, ""},
+ {"Siddham", Var, 4, ""},
+ {"SignWriting", Var, 5, ""},
+ {"SimpleFold", Func, 0, "func(r rune) rune"},
+ {"Sinhala", Var, 0, ""},
+ {"Sk", Var, 0, ""},
+ {"Sm", Var, 0, ""},
+ {"So", Var, 0, ""},
+ {"Soft_Dotted", Var, 0, ""},
+ {"Sogdian", Var, 13, ""},
+ {"Sora_Sompeng", Var, 1, ""},
+ {"Soyombo", Var, 10, ""},
+ {"Space", Var, 0, ""},
+ {"SpecialCase", Type, 0, ""},
+ {"Sundanese", Var, 0, ""},
+ {"Syloti_Nagri", Var, 0, ""},
+ {"Symbol", Var, 0, ""},
+ {"Syriac", Var, 0, ""},
+ {"Tagalog", Var, 0, ""},
+ {"Tagbanwa", Var, 0, ""},
+ {"Tai_Le", Var, 0, ""},
+ {"Tai_Tham", Var, 0, ""},
+ {"Tai_Viet", Var, 0, ""},
+ {"Takri", Var, 1, ""},
+ {"Tamil", Var, 0, ""},
+ {"Tangsa", Var, 21, ""},
+ {"Tangut", Var, 7, ""},
+ {"Telugu", Var, 0, ""},
+ {"Terminal_Punctuation", Var, 0, ""},
+ {"Thaana", Var, 0, ""},
+ {"Thai", Var, 0, ""},
+ {"Tibetan", Var, 0, ""},
+ {"Tifinagh", Var, 0, ""},
+ {"Tirhuta", Var, 4, ""},
+ {"Title", Var, 0, ""},
+ {"TitleCase", Const, 0, ""},
+ {"To", Func, 0, "func(_case int, r rune) rune"},
+ {"ToLower", Func, 0, "func(r rune) rune"},
+ {"ToTitle", Func, 0, "func(r rune) rune"},
+ {"ToUpper", Func, 0, "func(r rune) rune"},
+ {"Toto", Var, 21, ""},
+ {"TurkishCase", Var, 0, ""},
+ {"Ugaritic", Var, 0, ""},
+ {"Unified_Ideograph", Var, 0, ""},
+ {"Upper", Var, 0, ""},
+ {"UpperCase", Const, 0, ""},
+ {"UpperLower", Const, 0, ""},
+ {"Vai", Var, 0, ""},
+ {"Variation_Selector", Var, 0, ""},
+ {"Version", Const, 0, ""},
+ {"Vithkuqi", Var, 21, ""},
+ {"Wancho", Var, 14, ""},
+ {"Warang_Citi", Var, 4, ""},
+ {"White_Space", Var, 0, ""},
+ {"Yezidi", Var, 16, ""},
+ {"Yi", Var, 0, ""},
+ {"Z", Var, 0, ""},
+ {"Zanabazar_Square", Var, 10, ""},
+ {"Zl", Var, 0, ""},
+ {"Zp", Var, 0, ""},
+ {"Zs", Var, 0, ""},
+ },
+ "unicode/utf16": {
+ {"AppendRune", Func, 20, "func(a []uint16, r rune) []uint16"},
+ {"Decode", Func, 0, "func(s []uint16) []rune"},
+ {"DecodeRune", Func, 0, "func(r1 rune, r2 rune) rune"},
+ {"Encode", Func, 0, "func(s []rune) []uint16"},
+ {"EncodeRune", Func, 0, "func(r rune) (r1 rune, r2 rune)"},
+ {"IsSurrogate", Func, 0, "func(r rune) bool"},
+ {"RuneLen", Func, 23, "func(r rune) int"},
+ },
+ "unicode/utf8": {
+ {"AppendRune", Func, 18, "func(p []byte, r rune) []byte"},
+ {"DecodeLastRune", Func, 0, "func(p []byte) (r rune, size int)"},
+ {"DecodeLastRuneInString", Func, 0, "func(s string) (r rune, size int)"},
+ {"DecodeRune", Func, 0, "func(p []byte) (r rune, size int)"},
+ {"DecodeRuneInString", Func, 0, "func(s string) (r rune, size int)"},
+ {"EncodeRune", Func, 0, "func(p []byte, r rune) int"},
+ {"FullRune", Func, 0, "func(p []byte) bool"},
+ {"FullRuneInString", Func, 0, "func(s string) bool"},
+ {"MaxRune", Const, 0, ""},
+ {"RuneCount", Func, 0, "func(p []byte) int"},
+ {"RuneCountInString", Func, 0, "func(s string) (n int)"},
+ {"RuneError", Const, 0, ""},
+ {"RuneLen", Func, 0, "func(r rune) int"},
+ {"RuneSelf", Const, 0, ""},
+ {"RuneStart", Func, 0, "func(b byte) bool"},
+ {"UTFMax", Const, 0, ""},
+ {"Valid", Func, 0, "func(p []byte) bool"},
+ {"ValidRune", Func, 1, "func(r rune) bool"},
+ {"ValidString", Func, 0, "func(s string) bool"},
+ },
+ "unique": {
+ {"(Handle).Value", Method, 23, ""},
+ {"Handle", Type, 23, ""},
+ {"Make", Func, 23, "func[T comparable](value T) Handle[T]"},
+ },
+ "unsafe": {
+ {"Add", Func, 0, ""},
+ {"Alignof", Func, 0, ""},
+ {"Offsetof", Func, 0, ""},
+ {"Pointer", Type, 0, ""},
+ {"Sizeof", Func, 0, ""},
+ {"Slice", Func, 0, ""},
+ {"SliceData", Func, 0, ""},
+ {"String", Func, 0, ""},
+ {"StringData", Func, 0, ""},
+ },
+ "weak": {
+ {"(Pointer).Value", Method, 24, ""},
+ {"Make", Func, 24, "func[T any](ptr *T) Pointer[T]"},
+ {"Pointer", Type, 24, ""},
+ },
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/operator/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
new file mode 100644
index 00000000..e223e0f3
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -0,0 +1,105 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run generate.go
+
+// Package stdlib provides a table of all exported symbols in the
+// standard library, along with the version at which they first
+// appeared. It also provides the import graph of std packages.
+package stdlib
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Symbol struct {
+ Name string
+ Kind Kind
+ Version Version // Go version that first included the symbol
+ // Signature provides the type of a function (defined only for Kind=Func).
+ // Imported types are denoted as pkg.T; pkg is not fully qualified.
+ // TODO(adonovan): use an unambiguous encoding that is parseable.
+ //
+ // Example2:
+ // func[M ~map[K]V, K comparable, V any](m M) M
+ // func(fi fs.FileInfo, link string) (*Header, error)
+ Signature string // if Kind == stdlib.Func
+}
+
+// A Kind indicates the kind of a symbol:
+// function, variable, constant, type, and so on.
+type Kind int8
+
+const (
+ Invalid Kind = iota // Example name:
+ Type // "Buffer"
+ Func // "Println"
+ Var // "EOF"
+ Const // "Pi"
+ Field // "Point.X"
+ Method // "(*Buffer).Grow"
+)
+
+func (kind Kind) String() string {
+ return [...]string{
+ Invalid: "invalid",
+ Type: "type",
+ Func: "func",
+ Var: "var",
+ Const: "const",
+ Field: "field",
+ Method: "method",
+ }[kind]
+}
+
+// A Version represents a version of Go of the form "go1.%d".
+type Version int8
+
+// String returns a version string of the form "go1.23", without allocating.
+func (v Version) String() string { return versions[v] }
+
+var versions [30]string // (increase constant as needed)
+
+func init() {
+ for i := range versions {
+ versions[i] = fmt.Sprintf("go1.%d", i)
+ }
+}
+
+// HasPackage reports whether the specified package path is part of
+// the standard library's public API.
+func HasPackage(path string) bool {
+ _, ok := PackageSymbols[path]
+ return ok
+}
+
+// SplitField splits the field symbol name into type and field
+// components. It must be called only on Field symbols.
+//
+// Example: "File.Package" -> ("File", "Package")
+func (sym *Symbol) SplitField() (typename, name string) {
+ if sym.Kind != Field {
+ panic("not a field")
+ }
+ typename, name, _ = strings.Cut(sym.Name, ".")
+ return
+}
+
+// SplitMethod splits the method symbol name into pointer, receiver,
+// and method components. It must be called only on Method symbols.
+//
+// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow")
+func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) {
+ if sym.Kind != Method {
+ panic("not a method")
+ }
+ recv, name, _ = strings.Cut(sym.Name, ".")
+ recv = recv[len("(") : len(recv)-len(")")]
+ ptr = recv[0] == '*'
+ if ptr {
+ recv = recv[len("*"):]
+ }
+ return
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typeparams/common.go b/operator/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 00000000..cdae2b8e
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,68 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *ast.IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &ast.IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
+func IsTypeParam(t types.Type) bool {
+ _, ok := types.Unalias(t).(*types.TypeParam)
+ return ok
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/operator/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 00000000..27a2b179
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "fmt"
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(T types.Type) ([]*types.Term, error) {
+ // typeSetOf(T) == typeSetOf(Unalias(T))
+ typ := types.Unalias(T)
+ if named, ok := typ.(*types.Named); ok {
+ typ = named.Underlying()
+ }
+ switch typ := typ.(type) {
+ case *types.TypeParam:
+ return StructuralTerms(typ)
+ case *types.Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*types.Term{types.NewTerm(false, T)}, nil
+ }
+}
+
+// Deref returns the type of the variable pointed to by t,
+// if t's core type is a pointer; otherwise it returns t.
+//
+// Do not assume that Deref(T)==T implies T is not a pointer:
+// consider "type T *T", for example.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func Deref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typeparams/free.go b/operator/vendor/golang.org/x/tools/internal/typeparams/free.go
new file mode 100644
index 00000000..709d2fc1
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -0,0 +1,131 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/aliases"
+)
+
+// Free is a memoization of the set of free type parameters within a
+// type. It makes a sequence of calls to [Free.Has] for overlapping
+// types more efficient. The zero value is ready for use.
+//
+// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
+type Free struct {
+ seen map[types.Type]bool
+}
+
+// Has reports whether the specified type has a free type parameter.
+func (w *Free) Has(typ types.Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ if w.seen == nil {
+ w.seen = make(map[types.Type]bool)
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case nil, *types.Basic: // TODO(gri) should nil be handled here?
+ break
+
+ case *types.Alias:
+ if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() {
+ return true // This is an uninstantiated Alias.
+ }
+ // The expansion of an alias can have free type parameters,
+ // whether or not the alias itself has type parameters:
+ //
+ // func _[K comparable]() {
+ // type Set = map[K]bool // free(Set) = {K}
+ // type MapTo[V] = map[K]V // free(Map[foo]) = {V}
+ // }
+ //
+ // So, we must Unalias.
+ return w.Has(types.Unalias(t))
+
+ case *types.Array:
+ return w.Has(t.Elem())
+
+ case *types.Slice:
+ return w.Has(t.Elem())
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ if w.Has(t.Field(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Pointer:
+ return w.Has(t.Elem())
+
+ case *types.Tuple:
+ n := t.Len()
+ for i := range n {
+ if w.Has(t.At(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather than
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return w.Has(t.Params()) || w.Has(t.Results())
+
+ case *types.Interface:
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ if w.Has(t.Method(i).Type()) {
+ return true
+ }
+ }
+ terms, err := InterfaceTermSet(t)
+ if err != nil {
+ return false // ill typed
+ }
+ for _, term := range terms {
+ if w.Has(term.Type()) {
+ return true
+ }
+ }
+
+ case *types.Map:
+ return w.Has(t.Key()) || w.Has(t.Elem())
+
+ case *types.Chan:
+ return w.Has(t.Elem())
+
+ case *types.Named:
+ args := t.TypeArgs()
+ if params := t.TypeParams(); params.Len() > args.Len() {
+ return true // this is an uninstantiated named type.
+ }
+ for i, n := 0, args.Len(); i < n; i++ {
+ if w.Has(args.At(i)) {
+ return true
+ }
+ }
+ return w.Has(t.Underlying()) // recurse for types local to parameterized functions
+
+ case *types.TypeParam:
+ return true
+
+ default:
+ panic(t) // unreachable
+ }
+
+ return false
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/operator/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 00000000..f49802b8
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+// type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*types.Term
+ for _, term := range tset.terms {
+ terms = append(terms, types.NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...any) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *types.Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *types.TypeParam, *types.Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *types.TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/operator/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 00000000..9bc29143
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,169 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/termlist.go
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "go/types"
+ "strings"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// termSep is the separator used between individual terms.
+const termSep = " | "
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf strings.Builder
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(termSep)
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/operator/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 00000000..fa758cdc
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,172 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/typeterm.go
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
new file mode 100644
index 00000000..3db2a135
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -0,0 +1,137 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/types"
+ _ "unsafe"
+)
+
+// CallKind describes the function position of an [*ast.CallExpr].
+type CallKind int
+
+const (
+ CallStatic CallKind = iota // static call to known function
+ CallInterface // dynamic call through an interface method
+ CallDynamic // dynamic call of a func value
+ CallBuiltin // call to a builtin function
+ CallConversion // a conversion (not a call)
+)
+
+var callKindNames = []string{
+ "CallStatic",
+ "CallInterface",
+ "CallDynamic",
+ "CallBuiltin",
+ "CallConversion",
+}
+
+func (k CallKind) String() string {
+ if i := int(k); i >= 0 && i < len(callKindNames) {
+ return callKindNames[i]
+ }
+ return fmt.Sprintf("typeutil.CallKind(%d)", k)
+}
+
+// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]).
+// It distinguishes among true function calls, calls to builtins, and type conversions,
+// and further classifies function calls as static calls (where the function is known),
+// dynamic interface calls, and other dynamic calls.
+//
+// For the declarations:
+//
+// func f() {}
+// func g[T any]() {}
+// var v func()
+// var s []func()
+// type I interface { M() }
+// var i I
+//
+// ClassifyCall returns the following:
+//
+// f() CallStatic
+// g[int]() CallStatic
+// i.M() CallInterface
+// min(1, 2) CallBuiltin
+// v() CallDynamic
+// s[0]() CallDynamic
+// int(x) CallConversion
+// []byte("") CallConversion
+func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
+ if info.Types == nil {
+ panic("ClassifyCall: info.Types is nil")
+ }
+ tv := info.Types[call.Fun]
+ if tv.IsType() {
+ return CallConversion
+ }
+ if tv.IsBuiltin() {
+ return CallBuiltin
+ }
+ obj := info.Uses[UsedIdent(info, call.Fun)]
+ // Classify the call by the type of the object, if any.
+ switch obj := obj.(type) {
+ case *types.Func:
+ if interfaceMethod(obj) {
+ return CallInterface
+ }
+ return CallStatic
+ default:
+ return CallDynamic
+ }
+}
+
+// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)]
+// is the [types.Object] used by e, if any.
+//
+// If e is one of various forms of reference:
+//
+// f, c, v, T lexical reference
+// pkg.X qualified identifier
+// f[T] or pkg.F[K,V] instantiations of the above kinds
+// expr.f field or method value selector
+// T.f method expression selector
+//
+// UsedIdent returns the identifier whose is associated value in [types.Info.Uses]
+// is the object to which it refers.
+//
+// For the declarations:
+//
+// func F[T any] {...}
+// type I interface { M() }
+// var (
+// x int
+// s struct { f int }
+// a []int
+// i I
+// )
+//
+// UsedIdent returns the following:
+//
+// Expr UsedIdent
+// x x
+// s.f f
+// F[int] F
+// i.M M
+// I.M M
+// min min
+// int int
+// 1 nil
+// a[0] nil
+// []byte nil
+//
+// Note: if e is an instantiated function or method, UsedIdent returns
+// the corresponding generic function or method on the generic type.
+func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+ return usedIdent(info, e)
+}
+
+//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident
+
+//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
+func interfaceMethod(f *types.Func) bool
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/element.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/element.go
new file mode 100644
index 00000000..4957f021
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/element.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/types"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// ForEachElement calls f for type T and each type reachable from its
+// type through reflection. It does this by recursively stripping off
+// type constructors; in addition, for each named type N, the type *N
+// is added to the result as it may have additional methods.
+//
+// The caller must provide an initially empty set used to de-duplicate
+// identical types, potentially across multiple calls to ForEachElement.
+// (Its final value holds all the elements seen, matching the arguments
+// passed to f.)
+//
+// TODO(adonovan): share/harmonize with go/callgraph/rta.
+func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
+ var visit func(T types.Type, skip bool)
+ visit = func(T types.Type, skip bool) {
+ if !skip {
+ if seen, _ := rtypes.Set(T, true).(bool); seen {
+ return // de-dup
+ }
+
+ f(T) // notify caller of new element type
+ }
+
+ // Recursion over signatures of each method.
+ tmset := msets.MethodSet(T)
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ // It is tempting to call visit(sig, false)
+ // but, as noted in golang.org/cl/65450043,
+ // the Signature.Recv field is ignored by
+ // types.Identical and typeutil.Map, which
+ // is confusing at best.
+ //
+ // More importantly, the true signature rtype
+ // reachable from a method using reflection
+ // has no receiver but an extra ordinary parameter.
+ // For the Read method of io.Reader we want:
+ // func(Reader, []byte) (int, error)
+ // but here sig is:
+ // func([]byte) (int, error)
+ // with .Recv = Reader (though it is hard to
+ // notice because it doesn't affect Signature.String
+ // or types.Identical).
+ //
+ // TODO(adonovan): construct and visit the correct
+ // non-method signature with an extra parameter
+ // (though since unnamed func types have no methods
+ // there is essentially no actual demand for this).
+ //
+ // TODO(adonovan): document whether or not it is
+ // safe to skip non-exported methods (as RTA does).
+ visit(sig.Params(), true) // skip the Tuple
+ visit(sig.Results(), true) // skip the Tuple
+ }
+
+ switch T := T.(type) {
+ case *types.Alias:
+ visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
+
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ visit(T.Elem(), false)
+
+ case *types.Slice:
+ visit(T.Elem(), false)
+
+ case *types.Chan:
+ visit(T.Elem(), false)
+
+ case *types.Map:
+ visit(T.Key(), false)
+ visit(T.Elem(), false)
+
+ case *types.Signature:
+ if T.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
+ }
+ visit(T.Params(), true) // skip the Tuple
+ visit(T.Results(), true) // skip the Tuple
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ visit(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ visit(T.Underlying(), true) // skip the unnamed type
+
+ case *types.Array:
+ visit(T.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, T.NumFields(); i < n; i++ {
+ // TODO(adonovan): document whether or not
+ // it is safe to skip non-exported fields.
+ visit(T.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, T.Len(); i < n; i++ {
+ visit(T.At(i).Type(), false)
+ }
+
+ case *types.TypeParam, *types.Union:
+ // forEachReachable must not be called on parameterized types.
+ panic(T)
+
+ default:
+ panic(T)
+ }
+ }
+ visit(T, false)
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
new file mode 100644
index 00000000..235a6def
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -0,0 +1,1560 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+//go:generate stringer -type=ErrorCode
+
+type ErrorCode int
+
+// This file defines the error codes that can be produced during type-checking.
+// Collectively, these codes provide an identifier that may be used to
+// implement special handling for certain types of errors.
+//
+// Error codes should be fine-grained enough that the exact nature of the error
+// can be easily determined, but coarse enough that they are not an
+// implementation detail of the type checking algorithm. As a rule-of-thumb,
+// errors should be considered equivalent if there is a theoretical refactoring
+// of the type checker in which they are emitted in exactly one place. For
+// example, the type checker emits different error messages for "too many
+// arguments" and "too few arguments", but one can imagine an alternative type
+// checker where this check instead just emits a single "wrong number of
+// arguments", so these errors should have the same code.
+//
+// Error code names should be as brief as possible while retaining accuracy and
+// distinctiveness. In most cases names should start with an adjective
+// describing the nature of the error (e.g. "invalid", "unused", "misplaced"),
+// and end with a noun identifying the relevant language object. For example,
+// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the
+// convention that "bad" implies a problem with syntax, and "invalid" implies a
+// problem with types.
+
+const (
+ // InvalidSyntaxTree occurs if an invalid syntax tree is provided
+ // to the type checker. It should never happen.
+ InvalidSyntaxTree ErrorCode = -1
+)
+
+const (
+ _ ErrorCode = iota
+
+ // Test is reserved for errors that only apply while in self-test mode.
+ Test
+
+ /* package names */
+
+ // BlankPkgName occurs when a package name is the blank identifier "_".
+ //
+ // Per the spec:
+ // "The PackageName must not be the blank identifier."
+ BlankPkgName
+
+ // MismatchedPkgName occurs when a file's package name doesn't match the
+ // package name already established by other files.
+ MismatchedPkgName
+
+ // InvalidPkgUse occurs when a package identifier is used outside of a
+ // selector expression.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // var _ = fmt
+ InvalidPkgUse
+
+ /* imports */
+
+ // BadImportPath occurs when an import path is not valid.
+ BadImportPath
+
+ // BrokenImport occurs when importing a package fails.
+ //
+ // Example:
+ // import "amissingpackage"
+ BrokenImport
+
+ // ImportCRenamed occurs when the special import "C" is renamed. "C" is a
+ // pseudo-package, and must not be renamed.
+ //
+ // Example:
+ // import _ "C"
+ ImportCRenamed
+
+ // UnusedImport occurs when an import is unused.
+ //
+ // Example:
+ // import "fmt"
+ //
+ // func main() {}
+ UnusedImport
+
+ /* initialization */
+
+ // InvalidInitCycle occurs when an invalid cycle is detected within the
+ // initialization graph.
+ //
+ // Example:
+ // var x int = f()
+ //
+ // func f() int { return x }
+ InvalidInitCycle
+
+ /* decls */
+
+ // DuplicateDecl occurs when an identifier is declared multiple times.
+ //
+ // Example:
+ // var x = 1
+ // var x = 2
+ DuplicateDecl
+
+ // InvalidDeclCycle occurs when a declaration cycle is not valid.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct {
+ // a [n]int
+ // }
+ //
+ // var n = unsafe.Sizeof(T{})
+ InvalidDeclCycle
+
+ // InvalidTypeCycle occurs when a cycle in type definitions results in a
+ // type that is not well-defined.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T [unsafe.Sizeof(T{})]int
+ InvalidTypeCycle
+
+ /* decls > const */
+
+ // InvalidConstInit occurs when a const declaration has a non-constant
+ // initializer.
+ //
+ // Example:
+ // var x int
+ // const _ = x
+ InvalidConstInit
+
+ // InvalidConstVal occurs when a const value cannot be converted to its
+ // target type.
+ //
+ // TODO(findleyr): this error code and example are not very clear. Consider
+ // removing it.
+ //
+ // Example:
+ // const _ = 1 << "hello"
+ InvalidConstVal
+
+ // InvalidConstType occurs when the underlying type in a const declaration
+ // is not a valid constant type.
+ //
+ // Example:
+ // const c *int = 4
+ InvalidConstType
+
+ /* decls > var (+ other variable assignment codes) */
+
+ // UntypedNilUse occurs when the predeclared (untyped) value nil is used to
+ // initialize a variable declared without an explicit type.
+ //
+ // Example:
+ // var x = nil
+ UntypedNilUse
+
+ // WrongAssignCount occurs when the number of values on the right-hand side
+ // of an assignment or initialization expression does not match the number
+ // of variables on the left-hand side.
+ //
+ // Example:
+ // var x = 1, 2
+ WrongAssignCount
+
+ // UnassignableOperand occurs when the left-hand side of an assignment is
+ // not assignable.
+ //
+ // Example:
+ // func f() {
+ // const c = 1
+ // c = 2
+ // }
+ UnassignableOperand
+
+ // NoNewVar occurs when a short variable declaration (':=') does not declare
+ // new variables.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // x := 2
+ // }
+ NoNewVar
+
+ // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does
+ // not have single-valued left-hand or right-hand side.
+ //
+ // Per the spec:
+ // "In assignment operations, both the left- and right-hand expression lists
+ // must contain exactly one single-valued expression"
+ //
+ // Example:
+ // func f() int {
+ // x, y := 1, 2
+ // x, y += 1
+ // return x + y
+ // }
+ MultiValAssignOp
+
+ // InvalidIfaceAssign occurs when a value of type T is used as an
+ // interface, but T does not implement a method of the expected interface.
+ //
+ // Example:
+ // type I interface {
+ // f()
+ // }
+ //
+ // type T int
+ //
+ // var x I = T(1)
+ InvalidIfaceAssign
+
+ // InvalidChanAssign occurs when a chan assignment is invalid.
+ //
+ // Per the spec, a value x is assignable to a channel type T if:
+ // "x is a bidirectional channel value, T is a channel type, x's type V and
+ // T have identical element types, and at least one of V or T is not a
+ // defined type."
+ //
+ // Example:
+ // type T1 chan int
+ // type T2 chan int
+ //
+ // var x T1
+ // // Invalid assignment because both types are named
+ // var _ T2 = x
+ InvalidChanAssign
+
+ // IncompatibleAssign occurs when the type of the right-hand side expression
+ // in an assignment cannot be assigned to the type of the variable being
+ // assigned.
+ //
+ // Example:
+ // var x []int
+ // var _ int = x
+ IncompatibleAssign
+
+ // UnaddressableFieldAssign occurs when trying to assign to a struct field
+ // in a map value.
+ //
+ // Example:
+ // func f() {
+ // m := make(map[string]struct{i int})
+ // m["foo"].i = 42
+ // }
+ UnaddressableFieldAssign
+
+ /* decls > type (+ other type expression codes) */
+
+ // NotAType occurs when the identifier used as the underlying type in a type
+ // declaration or the right-hand side of a type alias does not denote a type.
+ //
+ // Example:
+ // var S = 2
+ //
+ // type T S
+ NotAType
+
+ // InvalidArrayLen occurs when an array length is not a constant value.
+ //
+ // Example:
+ // var n = 3
+ // var _ = [n]int{}
+ InvalidArrayLen
+
+ // BlankIfaceMethod occurs when a method name is '_'.
+ //
+ // Per the spec:
+ // "The name of each explicitly specified method must be unique and not
+ // blank."
+ //
+ // Example:
+ // type T interface {
+ // _(int)
+ // }
+ BlankIfaceMethod
+
+ // IncomparableMapKey occurs when a map key type does not support the == and
+ // != operators.
+ //
+ // Per the spec:
+ // "The comparison operators == and != must be fully defined for operands of
+ // the key type; thus the key type must not be a function, map, or slice."
+ //
+ // Example:
+ // var x map[T]int
+ //
+ // type T []int
+ IncomparableMapKey
+
+ // InvalidIfaceEmbed occurs when a non-interface type is embedded in an
+ // interface.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (T) m()
+ //
+ // type I interface {
+ // T
+ // }
+ InvalidIfaceEmbed
+
+ // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T,
+ // and T itself is itself a pointer, an unsafe.Pointer, or an interface.
+ //
+ // Per the spec:
+ // "An embedded field must be specified as a type name T or as a pointer to
+ // a non-interface type name *T, and T itself may not be a pointer type."
+ //
+ // Example:
+ // type T *int
+ //
+ // type S struct {
+ // *T
+ // }
+ InvalidPtrEmbed
+
+ /* decls > func and method */
+
+ // BadRecv occurs when a method declaration does not have exactly one
+ // receiver parameter.
+ //
+ // Example:
+ // func () _() {}
+ BadRecv
+
+ // InvalidRecv occurs when a receiver type expression is not of the form T
+ // or *T, or T is a pointer type.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (**T) m() {}
+ InvalidRecv
+
+ // DuplicateFieldAndMethod occurs when an identifier appears as both a field
+ // and method name.
+ //
+ // Example:
+ // type T struct {
+ // m int
+ // }
+ //
+ // func (T) m() {}
+ DuplicateFieldAndMethod
+
+ // DuplicateMethod occurs when two methods on the same receiver type have
+ // the same name.
+ //
+ // Example:
+ // type T struct {}
+ // func (T) m() {}
+ // func (T) m(i int) int { return i }
+ DuplicateMethod
+
+ /* decls > special */
+
+ // InvalidBlank occurs when a blank identifier is used as a value or type.
+ //
+ // Per the spec:
+ // "The blank identifier may appear as an operand only on the left-hand side
+ // of an assignment."
+ //
+ // Example:
+ // var x = _
+ InvalidBlank
+
+ // InvalidIota occurs when the predeclared identifier iota is used outside
+ // of a constant declaration.
+ //
+ // Example:
+ // var x = iota
+ InvalidIota
+
+ // MissingInitBody occurs when an init function is missing its body.
+ //
+ // Example:
+ // func init()
+ MissingInitBody
+
+ // InvalidInitSig occurs when an init function declares parameters or
+ // results.
+ //
+ // Example:
+ // func init() int { return 1 }
+ InvalidInitSig
+
+ // InvalidInitDecl occurs when init is declared as anything other than a
+ // function.
+ //
+ // Example:
+ // var init = 1
+ InvalidInitDecl
+
+ // InvalidMainDecl occurs when main is declared as anything other than a
+ // function, in a main package.
+ InvalidMainDecl
+
+ /* exprs */
+
+ // TooManyValues occurs when a function returns too many values for the
+ // expression context in which it is used.
+ //
+ // Example:
+ // func ReturnTwo() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // var x = ReturnTwo()
+ TooManyValues
+
+ // NotAnExpr occurs when a type expression is used where a value expression
+ // is expected.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func f() {
+ // T
+ // }
+ NotAnExpr
+
+ /* exprs > const */
+
+ // TruncatedFloat occurs when a float constant is truncated to an integer
+ // value.
+ //
+ // Example:
+ // var _ int = 98.6
+ TruncatedFloat
+
+ // NumericOverflow occurs when a numeric constant overflows its target type.
+ //
+ // Example:
+ // var x int8 = 1000
+ NumericOverflow
+
+ /* exprs > operation */
+
+ // UndefinedOp occurs when an operator is not defined for the type(s) used
+ // in an operation.
+ //
+ // Example:
+ // var c = "a" - "b"
+ UndefinedOp
+
+ // MismatchedTypes occurs when operand types are incompatible in a binary
+ // operation.
+ //
+ // Example:
+ // var a = "hello"
+ // var b = 1
+ // var c = a - b
+ MismatchedTypes
+
+ // DivByZero occurs when a division operation is provable at compile
+ // time to be a division by zero.
+ //
+ // Example:
+ // const divisor = 0
+ // var x int = 1/divisor
+ DivByZero
+
+ // NonNumericIncDec occurs when an increment or decrement operator is
+ // applied to a non-numeric value.
+ //
+ // Example:
+ // func f() {
+ // var c = "c"
+ // c++
+ // }
+ NonNumericIncDec
+
+ /* exprs > ptr */
+
+ // UnaddressableOperand occurs when the & operator is applied to an
+ // unaddressable expression.
+ //
+ // Example:
+ // var x = &1
+ UnaddressableOperand
+
+ // InvalidIndirection occurs when a non-pointer value is indirected via the
+ // '*' operator.
+ //
+ // Example:
+ // var x int
+ // var y = *x
+ InvalidIndirection
+
+ /* exprs > [] */
+
+ // NonIndexableOperand occurs when an index operation is applied to a value
+ // that cannot be indexed.
+ //
+ // Example:
+ // var x = 1
+ // var y = x[1]
+ NonIndexableOperand
+
+ // InvalidIndex occurs when an index argument is not of integer type,
+ // negative, or out-of-bounds.
+ //
+ // Example:
+ // var s = [...]int{1,2,3}
+ // var x = s[5]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var _ = s[-1]
+ //
+ // Example:
+ // var s = []int{1,2,3}
+ // var i string
+ // var _ = s[i]
+ InvalidIndex
+
+ // SwappedSliceIndices occurs when constant indices in a slice expression
+ // are decreasing in value.
+ //
+ // Example:
+ // var _ = []int{1,2,3}[2:1]
+ SwappedSliceIndices
+
+ /* operators > slice */
+
+ // NonSliceableOperand occurs when a slice operation is applied to a value
+ // whose type is not sliceable, or is unaddressable.
+ //
+ // Example:
+ // var x = [...]int{1, 2, 3}[:1]
+ //
+ // Example:
+ // var x = 1
+ // var y = 1[:1]
+ NonSliceableOperand
+
+ // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is
+ // applied to a string.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s[1:2:3]
+ InvalidSliceExpr
+
+ /* exprs > shift */
+
+ // InvalidShiftCount occurs when the right-hand side of a shift operation is
+ // either non-integer, negative, or too large.
+ //
+ // Example:
+ // var (
+ // x string
+ // y int = 1 << x
+ // )
+ InvalidShiftCount
+
+ // InvalidShiftOperand occurs when the shifted operand is not an integer.
+ //
+ // Example:
+ // var s = "hello"
+ // var x = s << 2
+ InvalidShiftOperand
+
+ /* exprs > chan */
+
+ // InvalidReceive occurs when there is a channel receive from a value that
+ // is either not a channel, or is a send-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // <-x
+ // }
+ InvalidReceive
+
+ // InvalidSend occurs when there is a channel send to a value that is not a
+ // channel, or is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x = 1
+ // x <- "hello!"
+ // }
+ InvalidSend
+
+ /* exprs > literal */
+
+ // DuplicateLitKey occurs when an index is duplicated in a slice, array, or
+ // map literal.
+ //
+ // Example:
+ // var _ = []int{0:1, 0:2}
+ //
+ // Example:
+ // var _ = map[string]int{"a": 1, "a": 2}
+ DuplicateLitKey
+
+ // MissingLitKey occurs when a map literal is missing a key expression.
+ //
+ // Example:
+ // var _ = map[string]int{1}
+ MissingLitKey
+
+ // InvalidLitIndex occurs when the key in a key-value element of a slice or
+ // array literal is not an integer constant.
+ //
+ // Example:
+ // var i = 0
+ // var x = []string{i: "world"}
+ InvalidLitIndex
+
+ // OversizeArrayLit occurs when an array literal exceeds its length.
+ //
+ // Example:
+ // var _ = [2]int{1,2,3}
+ OversizeArrayLit
+
+ // MixedStructLit occurs when a struct literal contains a mix of positional
+ // and named elements.
+ //
+ // Example:
+ // var _ = struct{i, j int}{i: 1, 2}
+ MixedStructLit
+
+ // InvalidStructLit occurs when a positional struct literal has an incorrect
+ // number of values.
+ //
+ // Example:
+ // var _ = struct{i, j int}{1,2,3}
+ InvalidStructLit
+
+ // MissingLitField occurs when a struct literal refers to a field that does
+ // not exist on the struct type.
+ //
+ // Example:
+ // var _ = struct{i int}{j: 2}
+ MissingLitField
+
+ // DuplicateLitField occurs when a struct literal contains duplicated
+ // fields.
+ //
+ // Example:
+ // var _ = struct{i int}{i: 1, i: 2}
+ DuplicateLitField
+
+ // UnexportedLitField occurs when a positional struct literal implicitly
+ // assigns an unexported field of an imported type.
+ UnexportedLitField
+
+ // InvalidLitField occurs when a field name is not a valid identifier.
+ //
+ // Example:
+ // var _ = struct{i int}{1: 1}
+ InvalidLitField
+
+ // UntypedLit occurs when a composite literal omits a required type
+ // identifier.
+ //
+ // Example:
+ // type outer struct{
+ // inner struct { i int }
+ // }
+ //
+ // var _ = outer{inner: {1}}
+ UntypedLit
+
+ // InvalidLit occurs when a composite literal expression does not match its
+ // type.
+ //
+ // Example:
+ // type P *struct{
+ // x int
+ // }
+ // var _ = P {}
+ InvalidLit
+
+ /* exprs > selector */
+
+ // AmbiguousSelector occurs when a selector is ambiguous.
+ //
+ // Example:
+ // type E1 struct { i int }
+ // type E2 struct { i int }
+ // type T struct { E1; E2 }
+ //
+ // var x T
+ // var _ = x.i
+ AmbiguousSelector
+
+ // UndeclaredImportedName occurs when a package-qualified identifier is
+ // undeclared by the imported package.
+ //
+ // Example:
+ // import "go/types"
+ //
+ // var _ = types.NotAnActualIdentifier
+ UndeclaredImportedName
+
+ // UnexportedName occurs when a selector refers to an unexported identifier
+ // of an imported package.
+ //
+ // Example:
+ // import "reflect"
+ //
+ // type _ reflect.flag
+ UnexportedName
+
+ // UndeclaredName occurs when an identifier is not declared in the current
+ // scope.
+ //
+ // Example:
+ // var x T
+ UndeclaredName
+
+ // MissingFieldOrMethod occurs when a selector references a field or method
+ // that does not exist.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // var x = T{}.f
+ MissingFieldOrMethod
+
+ /* exprs > ... */
+
+ // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is
+ // not valid.
+ //
+ // Example:
+ // var _ = map[int][...]int{0: {}}
+ BadDotDotDotSyntax
+
+ // NonVariadicDotDotDot occurs when a "..." is used on the final argument to
+ // a non-variadic function.
+ //
+ // Example:
+ // func printArgs(s []string) {
+ // for _, a := range s {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // s := []string{"a", "b", "c"}
+ // printArgs(s...)
+ // }
+ NonVariadicDotDotDot
+
+ // MisplacedDotDotDot occurs when a "..." is used somewhere other than the
+ // final argument to a function call.
+ //
+ // Example:
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // a := []int{1,2,3}
+ // printArgs(0, a...)
+ // }
+ MisplacedDotDotDot
+
+ // InvalidDotDotDotOperand occurs when a "..." operator is applied to a
+ // single-valued operand.
+ //
+ // Example:
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func f() {
+ // a := 1
+ // printArgs(a...)
+ // }
+ //
+ // Example:
+ // func args() (int, int) {
+ // return 1, 2
+ // }
+ //
+ // func printArgs(args ...int) {
+ // for _, a := range args {
+ // println(a)
+ // }
+ // }
+ //
+ // func g() {
+ // printArgs(args()...)
+ // }
+ InvalidDotDotDotOperand
+
+ // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in
+ // function.
+ //
+ // Example:
+ // var s = []int{1, 2, 3}
+ // var l = len(s...)
+ InvalidDotDotDot
+
+ /* exprs > built-in */
+
+ // UncalledBuiltin occurs when a built-in function is used as a
+ // function-valued expression, instead of being called.
+ //
+ // Per the spec:
+ // "The built-in functions do not have standard Go types, so they can only
+ // appear in call expressions; they cannot be used as function values."
+ //
+ // Example:
+ // var _ = copy
+ UncalledBuiltin
+
+ // InvalidAppend occurs when append is called with a first argument that is
+ // not a slice.
+ //
+ // Example:
+ // var _ = append(1, 2)
+ InvalidAppend
+
+ // InvalidCap occurs when an argument to the cap built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = cap(s)
+ InvalidCap
+
+ // InvalidClose occurs when close(...) is called with an argument that is
+ // not of channel type, or that is a receive-only channel.
+ //
+ // Example:
+ // func f() {
+ // var x int
+ // close(x)
+ // }
+ InvalidClose
+
+ // InvalidCopy occurs when the arguments are not of slice type or do not
+ // have compatible type.
+ //
+ // See https://golang.org/ref/spec#Appending_and_copying_slices for more
+ // information on the type requirements for the copy built-in.
+ //
+ // Example:
+ // func f() {
+ // var x []int
+ // y := []int64{1,2,3}
+ // copy(x, y)
+ // }
+ InvalidCopy
+
+ // InvalidComplex occurs when the complex built-in function is called with
+ // arguments with incompatible types.
+ //
+ // Example:
+ // var _ = complex(float32(1), float64(2))
+ InvalidComplex
+
+ // InvalidDelete occurs when the delete built-in function is called with a
+ // first argument that is not a map.
+ //
+ // Example:
+ // func f() {
+ // m := "hello"
+ // delete(m, "e")
+ // }
+ InvalidDelete
+
+ // InvalidImag occurs when the imag built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = imag(int(1))
+ InvalidImag
+
+ // InvalidLen occurs when an argument to the len built-in function is not of
+ // supported type.
+ //
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
+ // which underlying types are supported as arguments to cap and len.
+ //
+ // Example:
+ // var s = 2
+ // var x = len(s)
+ InvalidLen
+
+ // SwappedMakeArgs occurs when make is called with three arguments, and its
+ // length argument is larger than its capacity argument.
+ //
+ // Example:
+ // var x = make([]int, 3, 2)
+ SwappedMakeArgs
+
+ // InvalidMake occurs when make is called with an unsupported type argument.
+ //
+ // See https://golang.org/ref/spec#Making_slices_maps_and_channels for
+ // information on the types that may be created using make.
+ //
+ // Example:
+ // var x = make(int)
+ InvalidMake
+
+ // InvalidReal occurs when the real built-in function is called with an
+ // argument that does not have complex type.
+ //
+ // Example:
+ // var _ = real(int(1))
+ InvalidReal
+
+ /* exprs > assertion */
+
+ // InvalidAssert occurs when a type assertion is applied to a
+ // value that is not of interface type.
+ //
+ // Example:
+ // var x = 1
+ // var _ = x.(float64)
+ InvalidAssert
+
+ // ImpossibleAssert occurs for a type assertion x.(T) when the value x of
+ // interface cannot have dynamic type T, due to a missing or mismatching
+ // method on T.
+ //
+ // Example:
+ // type T int
+ //
+ // func (t *T) m() int { return int(*t) }
+ //
+ // type I interface { m() int }
+ //
+ // var x I
+ // var _ = x.(T)
+ ImpossibleAssert
+
+ /* exprs > conversion */
+
+ // InvalidConversion occurs when the argument type cannot be converted to the
+ // target.
+ //
+ // See https://golang.org/ref/spec#Conversions for the rules of
+ // convertibility.
+ //
+ // Example:
+ // var x float64
+ // var _ = string(x)
+ InvalidConversion
+
+ // InvalidUntypedConversion occurs when there is no valid implicit
+ // conversion from an untyped value satisfying the type constraints of the
+ // context in which it is used.
+ //
+ // Example:
+ // var _ = 1 + ""
+ InvalidUntypedConversion
+
+ /* offsetof */
+
+ // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument
+ // that is not a selector expression.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Offsetof(x)
+ BadOffsetofSyntax
+
+ // InvalidOffsetof occurs when unsafe.Offsetof is called with a method
+ // selector, rather than a field selector, or when the field is embedded via
+ // a pointer.
+ //
+ // Per the spec:
+ //
+ // "If f is an embedded field, it must be reachable without pointer
+ // indirections through fields of the struct. "
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type T struct { f int }
+ // type S struct { *T }
+ // var s S
+ // var _ = unsafe.Offsetof(s.f)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // type S struct{}
+ //
+ // func (S) m() {}
+ //
+ // var s S
+ // var _ = unsafe.Offsetof(s.m)
+ InvalidOffsetof
+
+ /* control flow > scope */
+
+ // UnusedExpr occurs when a side-effect free expression is used as a
+ // statement. Such a statement has no effect.
+ //
+ // Example:
+ // func f(i int) {
+ // i*i
+ // }
+ UnusedExpr
+
+ // UnusedVar occurs when a variable is declared but unused.
+ //
+ // Example:
+ // func f() {
+ // x := 1
+ // }
+ UnusedVar
+
+ // MissingReturn occurs when a function with results is missing a return
+ // statement.
+ //
+ // Example:
+ // func f() int {}
+ MissingReturn
+
+ // WrongResultCount occurs when a return statement returns an incorrect
+ // number of values.
+ //
+ // Example:
+ // func ReturnOne() int {
+ // return 1, 2
+ // }
+ WrongResultCount
+
+ // OutOfScopeResult occurs when the name of a value implicitly returned by
+ // an empty return statement is shadowed in a nested scope.
+ //
+ // Example:
+ // func factor(n int) (i int) {
+ // for i := 2; i < n; i++ {
+ // if n%i == 0 {
+ // return
+ // }
+ // }
+ // return 0
+ // }
+ OutOfScopeResult
+
+ /* control flow > if */
+
+ // InvalidCond occurs when an if condition is not a boolean expression.
+ //
+ // Example:
+ // func checkReturn(i int) {
+ // if i {
+ // panic("non-zero return")
+ // }
+ // }
+ InvalidCond
+
+ /* control flow > for */
+
+ // InvalidPostDecl occurs when there is a declaration in a for-loop post
+ // statement.
+ //
+ // Example:
+ // func f() {
+ // for i := 0; i < 10; j := 0 {}
+ // }
+ InvalidPostDecl
+
+ // InvalidChanRange occurs when a send-only channel used in a range
+ // expression.
+ //
+ // Example:
+ // func sum(c chan<- int) {
+ // s := 0
+ // for i := range c {
+ // s += i
+ // }
+ // }
+ InvalidChanRange
+
+ // InvalidIterVar occurs when two iteration variables are used while ranging
+ // over a channel.
+ //
+ // Example:
+ // func f(c chan int) {
+ // for k, v := range c {
+ // println(k, v)
+ // }
+ // }
+ InvalidIterVar
+
+ // InvalidRangeExpr occurs when the type of a range expression is not array,
+ // slice, string, map, or channel.
+ //
+ // Example:
+ // func f(i int) {
+ // for j := range i {
+ // println(j)
+ // }
+ // }
+ InvalidRangeExpr
+
+ /* control flow > switch */
+
+ // MisplacedBreak occurs when a break statement is not within a for, switch,
+ // or select statement of the innermost function definition.
+ //
+ // Example:
+ // func f() {
+ // break
+ // }
+ MisplacedBreak
+
+ // MisplacedContinue occurs when a continue statement is not within a for
+ // loop of the innermost function definition.
+ //
+ // Example:
+ // func sumeven(n int) int {
+ // proceed := func() {
+ // continue
+ // }
+ // sum := 0
+ // for i := 1; i <= n; i++ {
+ // if i % 2 != 0 {
+ // proceed()
+ // }
+ // sum += i
+ // }
+ // return sum
+ // }
+ MisplacedContinue
+
+ // MisplacedFallthrough occurs when a fallthrough statement is not within an
+ // expression switch.
+ //
+ // Example:
+ // func typename(i interface{}) string {
+ // switch i.(type) {
+ // case int64:
+ // fallthrough
+ // case int:
+ // return "int"
+ // }
+ // return "unsupported"
+ // }
+ MisplacedFallthrough
+
+ // DuplicateCase occurs when a type or expression switch has duplicate
+ // cases.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // case 1:
+ // println("One")
+ // }
+ // }
+ DuplicateCase
+
+ // DuplicateDefault occurs when a type or expression switch has multiple
+ // default clauses.
+ //
+ // Example:
+ // func printInt(i int) {
+ // switch i {
+ // case 1:
+ // println("one")
+ // default:
+ // println("One")
+ // default:
+ // println("1")
+ // }
+ // }
+ DuplicateDefault
+
+ // BadTypeKeyword occurs when a .(type) expression is used anywhere other
+ // than a type switch.
+ //
+ // Example:
+ // type I interface {
+ // m()
+ // }
+ // var t I
+ // var _ = t.(type)
+ BadTypeKeyword
+
+ // InvalidTypeSwitch occurs when .(type) is used on an expression that is
+ // not of interface type.
+ //
+ // Example:
+ // func f(i int) {
+ // switch x := i.(type) {}
+ // }
+ InvalidTypeSwitch
+
+ // InvalidExprSwitch occurs when a switch expression is not comparable.
+ //
+ // Example:
+ // func _() {
+ // var a struct{ _ func() }
+ // switch a /* ERROR cannot switch on a */ {
+ // }
+ // }
+ InvalidExprSwitch
+
+ /* control flow > select */
+
+ // InvalidSelectCase occurs when a select case is not a channel send or
+ // receive.
+ //
+ // Example:
+ // func checkChan(c <-chan int) bool {
+ // select {
+ // case c:
+ // return true
+ // default:
+ // return false
+ // }
+ // }
+ InvalidSelectCase
+
+ /* control flow > labels and jumps */
+
+ // UndeclaredLabel occurs when an undeclared label is jumped to.
+ //
+ // Example:
+ // func f() {
+ // goto L
+ // }
+ UndeclaredLabel
+
+ // DuplicateLabel occurs when a label is declared more than once.
+ //
+ // Example:
+ // func f() int {
+ // L:
+ // L:
+ // return 1
+ // }
+ DuplicateLabel
+
+ // MisplacedLabel occurs when a break or continue label is not on a for,
+ // switch, or select statement.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // a := []int{1,2,3}
+ // for _, e := range a {
+ // if e > 10 {
+ // break L
+ // }
+ // println(a)
+ // }
+ // }
+ MisplacedLabel
+
+ // UnusedLabel occurs when a label is declared but not used.
+ //
+ // Example:
+ // func f() {
+ // L:
+ // }
+ UnusedLabel
+
+ // JumpOverDecl occurs when a label jumps over a variable declaration.
+ //
+ // Example:
+ // func f() int {
+ // goto L
+ // x := 2
+ // L:
+ // x++
+ // return x
+ // }
+ JumpOverDecl
+
+ // JumpIntoBlock occurs when a forward jump goes to a label inside a nested
+ // block.
+ //
+ // Example:
+ // func f(x int) {
+ // goto L
+ // if x > 0 {
+ // L:
+ // print("inside block")
+ // }
+ // }
+ JumpIntoBlock
+
+ /* control flow > calls */
+
+ // InvalidMethodExpr occurs when a pointer method is called but the argument
+ // is not addressable.
+ //
+ // Example:
+ // type T struct {}
+ //
+ // func (*T) m() int { return 1 }
+ //
+ // var _ = T.m(T{})
+ InvalidMethodExpr
+
+ // WrongArgCount occurs when too few or too many arguments are passed by a
+ // function call.
+ //
+ // Example:
+ // func f(i int) {}
+ // var x = f()
+ WrongArgCount
+
+ // InvalidCall occurs when an expression is called that is not of function
+ // type.
+ //
+ // Example:
+ // var x = "x"
+ // var y = x()
+ InvalidCall
+
+ /* control flow > suspended */
+
+ // UnusedResults occurs when a restricted expression-only built-in function
+ // is suspended via go or defer. Such a suspension discards the results of
+ // these side-effect free built-in functions, and therefore is ineffectual.
+ //
+ // Example:
+ // func f(a []int) int {
+ // defer len(a)
+ // return i
+ // }
+ UnusedResults
+
+ // InvalidDefer occurs when a deferred expression is not a function call,
+ // for example if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // defer int32(i)
+ // return i
+ // }
+ InvalidDefer
+
+ // InvalidGo occurs when a go expression is not a function call, for example
+ // if the expression is a type conversion.
+ //
+ // Example:
+ // func f(i int) int {
+ // go int32(i)
+ // return i
+ // }
+ InvalidGo
+
+ // All codes below were added in Go 1.17.
+
+ /* decl */
+
+ // BadDecl occurs when a declaration has invalid syntax.
+ BadDecl
+
+ // RepeatedDecl occurs when an identifier occurs more than once on the left
+ // hand side of a short variable declaration.
+ //
+ // Example:
+ // func _() {
+ // x, y, y := 1, 2, 3
+ // }
+ RepeatedDecl
+
+ /* unsafe */
+
+ // InvalidUnsafeAdd occurs when unsafe.Add is called with a
+ // length argument that is not of integer type.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var p unsafe.Pointer
+ // var _ = unsafe.Add(p, float64(1))
+ InvalidUnsafeAdd
+
+ // InvalidUnsafeSlice occurs when unsafe.Slice is called with a
+ // pointer argument that is not of pointer type or a length argument
+ // that is not of integer type, negative, or out of bounds.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(x, 1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, float64(1))
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, -1)
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.Slice(&x, uint64(1) << 63)
+ InvalidUnsafeSlice
+
+ // All codes below were added in Go 1.18.
+
+ /* features */
+
+ // UnsupportedFeature occurs when a language feature is used that is not
+ // supported at this Go version.
+ UnsupportedFeature
+
+ /* type params */
+
+ // NotAGenericType occurs when a non-generic type is used where a generic
+ // type is expected: in type or function instantiation.
+ //
+ // Example:
+ // type T int
+ //
+ // var _ T[int]
+ NotAGenericType
+
+ // WrongTypeArgCount occurs when a type or function is instantiated with an
+ // incorrect number of type arguments, including when a generic type or
+ // function is used without instantiation.
+ //
+ // Errors involving failed type inference are assigned other error codes.
+ //
+ // Example:
+ // type T[p any] int
+ //
+ // var _ T[int, string]
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // var x = f
+ WrongTypeArgCount
+
+ // CannotInferTypeArgs occurs when type or function type argument inference
+ // fails to infer all type arguments.
+ //
+ // Example:
+ // func f[T any]() {}
+ //
+ // func _() {
+ // f()
+ // }
+ //
+ // Example:
+ // type N[P, Q any] struct{}
+ //
+ // var _ N[int]
+ CannotInferTypeArgs
+
+ // InvalidTypeArg occurs when a type argument does not satisfy its
+ // corresponding type parameter constraints.
+ //
+ // Example:
+ // type T[P ~int] struct{}
+ //
+ // var _ T[string]
+ InvalidTypeArg // arguments? InferenceFailed
+
+ // InvalidInstanceCycle occurs when an invalid cycle is detected
+ // within the instantiation graph.
+ //
+ // Example:
+ // func f[T any]() { f[*T]() }
+ InvalidInstanceCycle
+
+ // InvalidUnion occurs when an embedded union or approximation element is
+ // not valid.
+ //
+ // Example:
+ // type _ interface {
+ // ~int | interface{ m() }
+ // }
+ InvalidUnion
+
+ // MisplacedConstraintIface occurs when a constraint-type interface is used
+ // outside of constraint position.
+ //
+ // Example:
+ // type I interface { ~int }
+ //
+ // var _ I
+ MisplacedConstraintIface
+
+ // InvalidMethodTypeParams occurs when methods have type parameters.
+ //
+ // It cannot be encountered with an AST parsed using go/parser.
+ InvalidMethodTypeParams
+
+ // MisplacedTypeParam occurs when a type parameter is used in a place where
+ // it is not permitted.
+ //
+ // Example:
+ // type T[P any] P
+ //
+ // Example:
+ // type T[P any] struct{ *P }
+ MisplacedTypeParam
+
+ // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with
+ // an argument that is not of slice type. It also occurs if it is used
+ // in a package compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var x int
+ // var _ = unsafe.SliceData(x)
+ InvalidUnsafeSliceData
+
+ // InvalidUnsafeString occurs when unsafe.String is called with
+ // a length argument that is not of integer type, negative, or
+ // out of bounds. It also occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ //
+ // Example:
+ // import "unsafe"
+ //
+ // var b [10]byte
+ // var _ = unsafe.String(&b[0], -1)
+ InvalidUnsafeString
+
+ // InvalidUnsafeStringData occurs if it is used in a package
+ // compiled for a language version before go1.20.
+ _ // not used anymore
+
+)
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
new file mode 100644
index 00000000..15ecf7c5
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go
@@ -0,0 +1,179 @@
+// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT.
+
+package typesinternal
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSyntaxTree - -1]
+ _ = x[Test-1]
+ _ = x[BlankPkgName-2]
+ _ = x[MismatchedPkgName-3]
+ _ = x[InvalidPkgUse-4]
+ _ = x[BadImportPath-5]
+ _ = x[BrokenImport-6]
+ _ = x[ImportCRenamed-7]
+ _ = x[UnusedImport-8]
+ _ = x[InvalidInitCycle-9]
+ _ = x[DuplicateDecl-10]
+ _ = x[InvalidDeclCycle-11]
+ _ = x[InvalidTypeCycle-12]
+ _ = x[InvalidConstInit-13]
+ _ = x[InvalidConstVal-14]
+ _ = x[InvalidConstType-15]
+ _ = x[UntypedNilUse-16]
+ _ = x[WrongAssignCount-17]
+ _ = x[UnassignableOperand-18]
+ _ = x[NoNewVar-19]
+ _ = x[MultiValAssignOp-20]
+ _ = x[InvalidIfaceAssign-21]
+ _ = x[InvalidChanAssign-22]
+ _ = x[IncompatibleAssign-23]
+ _ = x[UnaddressableFieldAssign-24]
+ _ = x[NotAType-25]
+ _ = x[InvalidArrayLen-26]
+ _ = x[BlankIfaceMethod-27]
+ _ = x[IncomparableMapKey-28]
+ _ = x[InvalidIfaceEmbed-29]
+ _ = x[InvalidPtrEmbed-30]
+ _ = x[BadRecv-31]
+ _ = x[InvalidRecv-32]
+ _ = x[DuplicateFieldAndMethod-33]
+ _ = x[DuplicateMethod-34]
+ _ = x[InvalidBlank-35]
+ _ = x[InvalidIota-36]
+ _ = x[MissingInitBody-37]
+ _ = x[InvalidInitSig-38]
+ _ = x[InvalidInitDecl-39]
+ _ = x[InvalidMainDecl-40]
+ _ = x[TooManyValues-41]
+ _ = x[NotAnExpr-42]
+ _ = x[TruncatedFloat-43]
+ _ = x[NumericOverflow-44]
+ _ = x[UndefinedOp-45]
+ _ = x[MismatchedTypes-46]
+ _ = x[DivByZero-47]
+ _ = x[NonNumericIncDec-48]
+ _ = x[UnaddressableOperand-49]
+ _ = x[InvalidIndirection-50]
+ _ = x[NonIndexableOperand-51]
+ _ = x[InvalidIndex-52]
+ _ = x[SwappedSliceIndices-53]
+ _ = x[NonSliceableOperand-54]
+ _ = x[InvalidSliceExpr-55]
+ _ = x[InvalidShiftCount-56]
+ _ = x[InvalidShiftOperand-57]
+ _ = x[InvalidReceive-58]
+ _ = x[InvalidSend-59]
+ _ = x[DuplicateLitKey-60]
+ _ = x[MissingLitKey-61]
+ _ = x[InvalidLitIndex-62]
+ _ = x[OversizeArrayLit-63]
+ _ = x[MixedStructLit-64]
+ _ = x[InvalidStructLit-65]
+ _ = x[MissingLitField-66]
+ _ = x[DuplicateLitField-67]
+ _ = x[UnexportedLitField-68]
+ _ = x[InvalidLitField-69]
+ _ = x[UntypedLit-70]
+ _ = x[InvalidLit-71]
+ _ = x[AmbiguousSelector-72]
+ _ = x[UndeclaredImportedName-73]
+ _ = x[UnexportedName-74]
+ _ = x[UndeclaredName-75]
+ _ = x[MissingFieldOrMethod-76]
+ _ = x[BadDotDotDotSyntax-77]
+ _ = x[NonVariadicDotDotDot-78]
+ _ = x[MisplacedDotDotDot-79]
+ _ = x[InvalidDotDotDotOperand-80]
+ _ = x[InvalidDotDotDot-81]
+ _ = x[UncalledBuiltin-82]
+ _ = x[InvalidAppend-83]
+ _ = x[InvalidCap-84]
+ _ = x[InvalidClose-85]
+ _ = x[InvalidCopy-86]
+ _ = x[InvalidComplex-87]
+ _ = x[InvalidDelete-88]
+ _ = x[InvalidImag-89]
+ _ = x[InvalidLen-90]
+ _ = x[SwappedMakeArgs-91]
+ _ = x[InvalidMake-92]
+ _ = x[InvalidReal-93]
+ _ = x[InvalidAssert-94]
+ _ = x[ImpossibleAssert-95]
+ _ = x[InvalidConversion-96]
+ _ = x[InvalidUntypedConversion-97]
+ _ = x[BadOffsetofSyntax-98]
+ _ = x[InvalidOffsetof-99]
+ _ = x[UnusedExpr-100]
+ _ = x[UnusedVar-101]
+ _ = x[MissingReturn-102]
+ _ = x[WrongResultCount-103]
+ _ = x[OutOfScopeResult-104]
+ _ = x[InvalidCond-105]
+ _ = x[InvalidPostDecl-106]
+ _ = x[InvalidChanRange-107]
+ _ = x[InvalidIterVar-108]
+ _ = x[InvalidRangeExpr-109]
+ _ = x[MisplacedBreak-110]
+ _ = x[MisplacedContinue-111]
+ _ = x[MisplacedFallthrough-112]
+ _ = x[DuplicateCase-113]
+ _ = x[DuplicateDefault-114]
+ _ = x[BadTypeKeyword-115]
+ _ = x[InvalidTypeSwitch-116]
+ _ = x[InvalidExprSwitch-117]
+ _ = x[InvalidSelectCase-118]
+ _ = x[UndeclaredLabel-119]
+ _ = x[DuplicateLabel-120]
+ _ = x[MisplacedLabel-121]
+ _ = x[UnusedLabel-122]
+ _ = x[JumpOverDecl-123]
+ _ = x[JumpIntoBlock-124]
+ _ = x[InvalidMethodExpr-125]
+ _ = x[WrongArgCount-126]
+ _ = x[InvalidCall-127]
+ _ = x[UnusedResults-128]
+ _ = x[InvalidDefer-129]
+ _ = x[InvalidGo-130]
+ _ = x[BadDecl-131]
+ _ = x[RepeatedDecl-132]
+ _ = x[InvalidUnsafeAdd-133]
+ _ = x[InvalidUnsafeSlice-134]
+ _ = x[UnsupportedFeature-135]
+ _ = x[NotAGenericType-136]
+ _ = x[WrongTypeArgCount-137]
+ _ = x[CannotInferTypeArgs-138]
+ _ = x[InvalidTypeArg-139]
+ _ = x[InvalidInstanceCycle-140]
+ _ = x[InvalidUnion-141]
+ _ = x[MisplacedConstraintIface-142]
+ _ = x[InvalidMethodTypeParams-143]
+ _ = x[MisplacedTypeParam-144]
+ _ = x[InvalidUnsafeSliceData-145]
+ _ = x[InvalidUnsafeString-146]
+}
+
+const (
+ _ErrorCode_name_0 = "InvalidSyntaxTree"
+ _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString"
+)
+
+var (
+ _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180}
+)
+
+func (i ErrorCode) String() string {
+ switch {
+ case i == -1:
+ return _ErrorCode_name_0
+ case 1 <= i && i <= 146:
+ i -= 1
+ return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]]
+ default:
+ return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/fx.go
new file mode 100644
index 00000000..93acff21
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/fx.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// NoEffects reports whether the expression has no side effects, i.e., it
+// does not modify the memory state. This function is conservative: it may
+// return false even when the expression has no effect.
+func NoEffects(info *types.Info, expr ast.Expr) bool {
+ noEffects := true
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr,
+ *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr,
+ *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType,
+ *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr:
+ // No effect
+ case *ast.UnaryExpr:
+ // Channel send <-ch has effects
+ if v.Op == token.ARROW {
+ noEffects = false
+ }
+ case *ast.CallExpr:
+ // Type conversion has no effects
+ if !info.Types[v.Fun].IsType() {
+ // TODO(adonovan): Add a case for built-in functions without side
+ // effects (by using callsPureBuiltin from tools/internal/refactor/inline)
+
+ noEffects = false
+ }
+ case *ast.FuncLit:
+ // A FuncLit has no effects, but do not descend into it.
+ return false
+ default:
+ // All other expressions have effects
+ noEffects = false
+ }
+
+ return noEffects
+ })
+ return noEffects
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
new file mode 100644
index 00000000..f2affec4
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+ "slices"
+)
+
+// IsTypeNamed reports whether t is (or is an alias for) a
+// package-level defined type with the given package path and one of
+// the given names. It returns false if t is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
+ if named, ok := types.Unalias(t).(*types.Named); ok {
+ tname := named.Obj()
+ return tname != nil &&
+ IsPackageLevel(tname) &&
+ tname.Pkg().Path() == pkgPath &&
+ slices.Contains(names, tname.Name())
+ }
+ return false
+}
+
+// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
+// package-level defined type with the given package path and one of the given
+// names. It returns false if t is not a pointer type.
+func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
+ r := Unpointer(t)
+ if r == t {
+ return false
+ }
+ return IsTypeNamed(r, pkgPath, names...)
+}
+
+// IsFunctionNamed reports whether obj is a package-level function
+// defined in the given package and has one of the given names.
+// It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
+ f, ok := obj.(*types.Func)
+ return ok &&
+ IsPackageLevel(obj) &&
+ f.Pkg().Path() == pkgPath &&
+ f.Type().(*types.Signature).Recv() == nil &&
+ slices.Contains(names, f.Name())
+}
+
+// IsMethodNamed reports whether obj is a method defined on a
+// package-level type with the given package and type name, and has
+// one of the given names. It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.TypeName.Name",
+// which is important for the performance of syntax matching.
+func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
+ if fn, ok := obj.(*types.Func); ok {
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ _, T := ReceiverNamed(recv)
+ return T != nil &&
+ IsTypeNamed(T, pkgPath, typeName) &&
+ slices.Contains(names, fn.Name())
+ }
+ }
+ return false
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
new file mode 100644
index 00000000..64f47919
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -0,0 +1,54 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/types"
+ "strconv"
+)
+
+// FileQualifier returns a [types.Qualifier] function that qualifies
+// imported symbols appropriately based on the import environment of a given
+// file.
+// If the same package is imported multiple times, the last appearance is
+// recorded.
+//
+// TODO(adonovan): this function ignores the effect of shadowing. It
+// should accept a [token.Pos] and a [types.Info] and compute only the
+// set of imports that are not shadowed at that point, analogous to
+// [analysisinternal.AddImport]. It could also compute (as a side
+// effect) the set of additional imports required to ensure that there
+// is an accessible import for each necessary package, making it
+// converge even more closely with AddImport.
+func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
+ // Construct mapping of import paths to their defined names.
+ // It is only necessary to look at renaming imports.
+ imports := make(map[string]string)
+ for _, imp := range f.Imports {
+ if imp.Name != nil && imp.Name.Name != "_" {
+ path, _ := strconv.Unquote(imp.Path.Value)
+ imports[path] = imp.Name.Name
+ }
+ }
+
+ // Define qualifier to replace full package paths with names of the imports.
+ return func(p *types.Package) string {
+ if p == nil || p == pkg {
+ return ""
+ }
+
+ if name, ok := imports[p.Path()]; ok {
+ if name == "." {
+ return ""
+ } else {
+ return name
+ }
+ }
+
+ // If there is no local renaming, fall back to the package name.
+ return p.Name()
+ }
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/recv.go
new file mode 100644
index 00000000..8352ea76
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -0,0 +1,44 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+)
+
+// ReceiverNamed returns the named type (if any) associated with the
+// type of recv, which may be of the form N or *N, or aliases thereof.
+// It also reports whether a Pointer was present.
+//
+// The named result may be nil if recv is from a method on an
+// anonymous interface or struct types or in ill-typed code.
+func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
+ t := recv.Type()
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
+ isPtr = true
+ t = ptr.Elem()
+ }
+ named, _ = types.Unalias(t).(*types.Named)
+ return
+}
+
+// Unpointer returns T given *T or an alias thereof.
+// For all other types it is the identity function.
+// It does not look at underlying types.
+// The result may be an alias.
+//
+// Use this function to strip off the optional pointer on a receiver
+// in a field or method selection, without losing the named type
+// (which is needed to compute the method set).
+//
+// See also [typeparams.MustDeref], which removes one level of
+// indirection from the type, regardless of named types (analogous to
+// a LOAD instruction).
+func Unpointer(t types.Type) types.Type {
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/toonew.go
new file mode 100644
index 00000000..cc86487e
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/toonew.go
@@ -0,0 +1,89 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+
+ "golang.org/x/tools/internal/stdlib"
+ "golang.org/x/tools/internal/versions"
+)
+
+// TooNewStdSymbols computes the set of package-level symbols
+// exported by pkg that are not available at the specified version.
+// The result maps each symbol to its minimum version.
+//
+// The pkg is allowed to contain type errors.
+func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string {
+ disallowed := make(map[types.Object]string)
+
+ // Pass 1: package-level symbols.
+ symbols := stdlib.PackageSymbols[pkg.Path()]
+ for _, sym := range symbols {
+ symver := sym.Version.String()
+ if versions.Before(version, symver) {
+ switch sym.Kind {
+ case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type:
+ disallowed[pkg.Scope().Lookup(sym.Name)] = symver
+ }
+ }
+ }
+
+ // Pass 2: fields and methods.
+ //
+ // We allow fields and methods if their associated type is
+ // disallowed, as otherwise we would report false positives
+ // for compatibility shims. Consider:
+ //
+ // //go:build go1.22
+ // type T struct { F std.Real } // correct new API
+ //
+ // //go:build !go1.22
+ // type T struct { F fake } // shim
+ // type fake struct { ... }
+ // func (fake) M () {}
+ //
+ // These alternative declarations of T use either the std.Real
+ // type, introduced in go1.22, or a fake type, for the field
+ // F. (The fakery could be arbitrarily deep, involving more
+ // nested fields and methods than are shown here.) Clients
+ // that use the compatibility shim T will compile with any
+ // version of go, whether older or newer than go1.22, but only
+ // the newer version will use the std.Real implementation.
+ //
+ // Now consider a reference to method M in new(T).F.M() in a
+ // module that requires a minimum of go1.21. The analysis may
+ // occur using a version of Go higher than 1.21, selecting the
+ // first version of T, so the method M is Real.M. This would
+ // spuriously cause the analyzer to report a reference to a
+ // too-new symbol even though this expression compiles just
+ // fine (with the fake implementation) using go1.21.
+ for _, sym := range symbols {
+ symVersion := sym.Version.String()
+ if !versions.Before(version, symVersion) {
+ continue // allowed
+ }
+
+ var obj types.Object
+ switch sym.Kind {
+ case stdlib.Field:
+ typename, name := sym.SplitField()
+ if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" {
+ obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name)
+ }
+
+ case stdlib.Method:
+ ptr, recvname, name := sym.SplitMethod()
+ if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" {
+ obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name)
+ }
+ }
+ if obj != nil {
+ disallowed[obj] = symVersion
+ }
+ }
+
+ return disallowed
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/types.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/types.go
new file mode 100644
index 00000000..fef74a78
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -0,0 +1,199 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typesinternal provides helpful operators for dealing with
+// go/types:
+//
+// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]);
+// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]);
+// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]);
+// - access to internal go/types APIs that are not yet
+// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and
+// - common algorithms related to types (e.g. [TooNewStdSymbols]).
+//
+// See also:
+// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax;
+// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers;
+// - [golang.org/x/tools/internal/refactor], for operators to compute text edits.
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+ "reflect"
+ "unsafe"
+
+ "golang.org/x/tools/go/ast/inspector"
+ "golang.org/x/tools/internal/aliases"
+)
+
+func SetUsesCgo(conf *types.Config) bool {
+ v := reflect.ValueOf(conf).Elem()
+
+ f := v.FieldByName("go115UsesCgo")
+ if !f.IsValid() {
+ f = v.FieldByName("UsesCgo")
+ if !f.IsValid() {
+ return false
+ }
+ }
+
+ addr := unsafe.Pointer(f.UnsafeAddr())
+ *(*bool)(addr) = true
+
+ return true
+}
+
+// ErrorCodeStartEnd extracts additional information from types.Error values
+// generated by Go version 1.16 and later: the error code, start position, and
+// end position. If all positions are valid, start <= err.Pos <= end.
+//
+// If the data could not be read, the final result parameter will be false.
+//
+// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
+func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
+ var data [3]int
+ // By coincidence all of these fields are ints, which simplifies things.
+ v := reflect.ValueOf(err)
+ for i, name := range []string{"go116code", "go116start", "go116end"} {
+ f := v.FieldByName(name)
+ if !f.IsValid() {
+ return 0, 0, 0, false
+ }
+ data[i] = int(f.Int())
+ }
+ return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true
+}
+
+// NameRelativeTo returns a types.Qualifier that qualifies members of
+// all packages other than pkg, using only the package name.
+// (By contrast, [types.RelativeTo] uses the complete package path,
+// which is often excessive.)
+//
+// If pkg is nil, it is equivalent to [*types.Package.Name].
+//
+// TODO(adonovan): all uses of this with TypeString should be
+// eliminated when https://go.dev/issues/75604 is resolved.
+func NameRelativeTo(pkg *types.Package) types.Qualifier {
+ return func(other *types.Package) string {
+ if pkg != nil && pkg == other {
+ return "" // same package; unqualified
+ }
+ return other.Name()
+ }
+}
+
+// TypeNameFor returns the type name symbol for the specified type, if
+// it is a [*types.Alias], [*types.Named], [*types.TypeParam], or a
+// [*types.Basic] representing a type.
+//
+// For all other types, and for Basic types representing a builtin,
+// constant, or nil, it returns nil. Be careful not to convert the
+// resulting nil pointer to a [types.Object]!
+//
+// If t is the type of a constant, it may be an "untyped" type, which
+// has no TypeName. To access the name of such types (e.g. "untyped
+// int"), use [types.Basic.Name].
+func TypeNameFor(t types.Type) *types.TypeName {
+ switch t := t.(type) {
+ case *types.Alias:
+ return t.Obj()
+ case *types.Named:
+ return t.Obj()
+ case *types.TypeParam:
+ return t.Obj()
+ case *types.Basic:
+ // See issues #71886 and #66890 for some history.
+ if tname, ok := types.Universe.Lookup(t.Name()).(*types.TypeName); ok {
+ return tname
+ }
+ }
+ return nil
+}
+
+// A NamedOrAlias is a [types.Type] that is named (as
+// defined by the spec) and capable of bearing type parameters: it
+// abstracts aliases ([types.Alias]) and defined types
+// ([types.Named]).
+//
+// Every type declared by an explicit "type" declaration is a
+// NamedOrAlias. (Built-in type symbols may additionally
+// have type [types.Basic], which is not a NamedOrAlias,
+// though the spec regards them as "named"; see [TypeNameFor].)
+//
+// NamedOrAlias cannot expose the Origin method, because
+// [types.Alias.Origin] and [types.Named.Origin] have different
+// (covariant) result types; use [Origin] instead.
+type NamedOrAlias interface {
+ types.Type
+ Obj() *types.TypeName
+ TypeArgs() *types.TypeList
+ TypeParams() *types.TypeParamList
+ SetTypeParams(tparams []*types.TypeParam)
+}
+
+var (
+ _ NamedOrAlias = (*types.Alias)(nil)
+ _ NamedOrAlias = (*types.Named)(nil)
+)
+
+// Origin returns the generic type of the Named or Alias type t if it
+// is instantiated, otherwise it returns t.
+func Origin(t NamedOrAlias) NamedOrAlias {
+ switch t := t.(type) {
+ case *types.Alias:
+ return aliases.Origin(t)
+ case *types.Named:
+ return t.Origin()
+ }
+ return t
+}
+
+// IsPackageLevel reports whether obj is a package-level symbol.
+func IsPackageLevel(obj types.Object) bool {
+ return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
+}
+
+// NewTypesInfo returns a *types.Info with all maps populated.
+func NewTypesInfo() *types.Info {
+ return &types.Info{
+ Types: map[ast.Expr]types.TypeAndValue{},
+ Instances: map[*ast.Ident]types.Instance{},
+ Defs: map[*ast.Ident]types.Object{},
+ Uses: map[*ast.Ident]types.Object{},
+ Implicits: map[ast.Node]types.Object{},
+ Selections: map[*ast.SelectorExpr]*types.Selection{},
+ Scopes: map[ast.Node]*types.Scope{},
+ FileVersions: map[*ast.File]string{},
+ }
+}
+
+// EnclosingScope returns the innermost block logically enclosing the cursor.
+func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope {
+ for cur := range cur.Enclosing() {
+ n := cur.Node()
+ // A function's Scope is associated with its FuncType.
+ switch f := n.(type) {
+ case *ast.FuncDecl:
+ n = f.Type
+ case *ast.FuncLit:
+ n = f.Type
+ }
+ if b := info.Scopes[n]; b != nil {
+ return b
+ }
+ }
+ panic("no Scope for *ast.File")
+}
+
+// Imports reports whether path is imported by pkg.
+func Imports(pkg *types.Package, path string) bool {
+ for _, imp := range pkg.Imports() {
+ if imp.Path() == path {
+ return true
+ }
+ }
+ return false
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
new file mode 100644
index 00000000..e5da0495
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
+// this API that actually does something.
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+ _ VarKind = iota // (not meaningful)
+ PackageVar // a package-level variable
+ LocalVar // a local variable
+ RecvVar // a method receiver variable
+ ParamVar // a function parameter variable
+ ResultVar // a function result variable
+ FieldVar // a struct field
+)
+
+func (kind VarKind) String() string {
+ return [...]string{
+ 0: "VarKind(0)",
+ PackageVar: "PackageVar",
+ LocalVar: "LocalVar",
+ RecvVar: "RecvVar",
+ ParamVar: "ParamVar",
+ ResultVar: "ResultVar",
+ FieldVar: "FieldVar",
+ }[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/operator/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/operator/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 00000000..453bba2a
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,381 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+)
+
+// ZeroString returns the string representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroString may return a partially correct
+// string representation. The caller should use the returned isValid boolean
+// to determine the validity of the expression.
+//
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+//
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// References to named types are qualified by an appropriate (optional)
+// qualifier function.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+//
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return "false", true
+ case t.Info()&types.IsNumeric != 0:
+ return "0", true
+ case t.Info()&types.IsString != 0:
+ return `""`, true
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return "nil", true
+ case t.Kind() == types.Invalid:
+ return "invalid", false
+ default:
+ panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return "nil", true
+
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return "invalid", false
+ }
+ return "nil", true
+
+ case *types.Named:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ return ZeroString(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ // A type parameter can have alias but alias type's underlying type
+ // can never be a type parameter.
+ // Use types.Unalias to preserve the info of type parameter instead
+ // of call Underlying() going right through and get the underlying
+ // type of the type parameter which is always an interface.
+ return ZeroString(types.Unalias(t), qual)
+ }
+
+ case *types.Array, *types.Struct:
+ return types.TypeString(t, qual) + "{}", true
+
+ case *types.TypeParam:
+ // Assumes func new is not shadowed.
+ return "*new(" + types.TypeString(t, qual) + ")", true
+
+ case *types.Tuple:
+ // Tuples are not normal values.
+ // We are currently format as "(t[0], ..., t[n])". Could be something else.
+ isValid := true
+ components := make([]string, t.Len())
+ for i := 0; i < t.Len(); i++ {
+ comp, ok := ZeroString(t.At(i).Type(), qual)
+
+ components[i] = comp
+ isValid = isValid && ok
+ }
+ return "(" + strings.Join(components, ", ") + ")", isValid
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
+// representation. The caller should use the returned isValid boolean to determine
+// the validity of the expression.
+//
+// This function is designed for types suitable for variables and should not be
+// used with Tuple or Union types.References to named types are qualified by an
+// appropriate (optional) qualifier function.
+//
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return &ast.Ident{Name: "false"}, true
+ case t.Info()&types.IsNumeric != 0:
+ return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
+ case t.Info()&types.IsString != 0:
+ return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return ast.NewIdent("nil"), true
+ case t.Kind() == types.Invalid:
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ default:
+ panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return ast.NewIdent("nil"), true
+
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ }
+ return ast.NewIdent("nil"), true
+
+ case *types.Named:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(types.Unalias(t), qual)
+ }
+
+ case *types.Array, *types.Struct:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+
+ case *types.TypeParam:
+ return &ast.StarExpr{ // *new(T)
+ X: &ast.CallExpr{
+ // Assumes func new is not shadowed.
+ Fun: ast.NewIdent("new"),
+ Args: []ast.Expr{
+ ast.NewIdent(t.Obj().Name()),
+ },
+ },
+ }, true
+
+ case *types.Tuple:
+ // Unlike ZeroString, there is no ast.Expr can express tuple by
+ // "(t[0], ..., t[n])".
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// are qualified by an appropriate (optional) qualifier function.
+// It may panic for types such as Tuple or Union.
+//
+// See also https://go.dev/issues/75604, which will provide a robust
+// Type-to-valid-Go-syntax formatter.
+func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch t.Kind() {
+ case types.UnsafePointer:
+ return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
+ default:
+ return ast.NewIdent(t.Name())
+ }
+
+ case *types.Pointer:
+ return &ast.UnaryExpr{
+ Op: token.MUL,
+ X: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Array:
+ return &ast.ArrayType{
+ Len: &ast.BasicLit{
+ Kind: token.INT,
+ Value: fmt.Sprintf("%d", t.Len()),
+ },
+ Elt: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Slice:
+ return &ast.ArrayType{
+ Elt: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Map:
+ return &ast.MapType{
+ Key: TypeExpr(t.Key(), qual),
+ Value: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Chan:
+ dir := ast.ChanDir(t.Dir())
+ if t.Dir() == types.SendRecv {
+ dir = ast.SEND | ast.RECV
+ }
+ return &ast.ChanType{
+ Dir: dir,
+ Value: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Signature:
+ var params []*ast.Field
+ for i := 0; i < t.Params().Len(); i++ {
+ params = append(params, &ast.Field{
+ Type: TypeExpr(t.Params().At(i).Type(), qual),
+ Names: []*ast.Ident{
+ {
+ Name: t.Params().At(i).Name(),
+ },
+ },
+ })
+ }
+ if t.Variadic() {
+ last := params[len(params)-1]
+ last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+ }
+ var returns []*ast.Field
+ for i := 0; i < t.Results().Len(); i++ {
+ returns = append(returns, &ast.Field{
+ Type: TypeExpr(t.Results().At(i).Type(), qual),
+ })
+ }
+ return &ast.FuncType{
+ Params: &ast.FieldList{
+ List: params,
+ },
+ Results: &ast.FieldList{
+ List: returns,
+ },
+ }
+
+ case *types.TypeParam:
+ pkgName := qual(t.Obj().Pkg())
+ if pkgName == "" || t.Obj().Pkg() == nil {
+ return ast.NewIdent(t.Obj().Name())
+ }
+ return &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: ast.NewIdent(t.Obj().Name()),
+ }
+
+ // types.TypeParam also implements interface NamedOrAlias. To differentiate,
+ // case TypeParam need to be present before case NamedOrAlias.
+ // TODO(hxjiang): remove this comment once TypeArgs() is added to interface
+ // NamedOrAlias.
+ case NamedOrAlias:
+ var expr ast.Expr = ast.NewIdent(t.Obj().Name())
+ if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
+ expr = &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: expr.(*ast.Ident),
+ }
+ }
+
+ // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
+ // typesinternal.NamedOrAlias.
+ if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
+ if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
+ var indices []ast.Expr
+ for i := range typeArgs.Len() {
+ indices = append(indices, TypeExpr(typeArgs.At(i), qual))
+ }
+ expr = &ast.IndexListExpr{
+ X: expr,
+ Indices: indices,
+ }
+ }
+ }
+
+ return expr
+
+ case *types.Struct:
+ return ast.NewIdent(t.String())
+
+ case *types.Interface:
+ return ast.NewIdent(t.String())
+
+ case *types.Union:
+ if t.Len() == 0 {
+ panic("Union type should have at least one term")
+ }
+ // Same as go/ast, the return expression will put last term in the
+ // Y field at topmost level of BinaryExpr.
+ // For union of type "float32 | float64 | int64", the structure looks
+ // similar to:
+ // {
+ // X: {
+ // X: float32,
+ // Op: |
+ // Y: float64,
+ // }
+ // Op: |,
+ // Y: int64,
+ // }
+ var union ast.Expr
+ for i := range t.Len() {
+ term := t.Term(i)
+ termExpr := TypeExpr(term.Type(), qual)
+ if term.Tilde() {
+ termExpr = &ast.UnaryExpr{
+ Op: token.TILDE,
+ X: termExpr,
+ }
+ }
+ if i == 0 {
+ union = termExpr
+ } else {
+ union = &ast.BinaryExpr{
+ X: union,
+ Op: token.OR,
+ Y: termExpr,
+ }
+ }
+ }
+ return union
+
+ case *types.Tuple:
+ panic("invalid input type types.Tuple")
+
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/versions/features.go b/operator/vendor/golang.org/x/tools/internal/versions/features.go
new file mode 100644
index 00000000..b53f1786
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/versions/features.go
@@ -0,0 +1,43 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+// This file contains predicates for working with file versions to
+// decide when a tool should consider a language feature enabled.
+
+// GoVersions that features in x/tools can be gated to.
+const (
+ Go1_18 = "go1.18"
+ Go1_19 = "go1.19"
+ Go1_20 = "go1.20"
+ Go1_21 = "go1.21"
+ Go1_22 = "go1.22"
+)
+
+// Future is an invalid unknown Go version sometime in the future.
+// Do not use directly with Compare.
+const Future = ""
+
+// AtLeast reports whether the file version v comes after a Go release.
+//
+// Use this predicate to enable a behavior once a certain Go release
+// has happened (and stays enabled in the future).
+func AtLeast(v, release string) bool {
+ if v == Future {
+ return true // an unknown future version is always after y.
+ }
+ return Compare(Lang(v), Lang(release)) >= 0
+}
+
+// Before reports whether the file version v is strictly before a Go release.
+//
+// Use this predicate to disable a behavior once a certain Go release
+// has happened (and stays enabled in the future).
+func Before(v, release string) bool {
+ if v == Future {
+ return false // an unknown future version happens after y.
+ }
+ return Compare(Lang(v), Lang(release)) < 0
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/versions/gover.go b/operator/vendor/golang.org/x/tools/internal/versions/gover.go
new file mode 100644
index 00000000..bbabcd22
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/versions/gover.go
@@ -0,0 +1,172 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a fork of internal/gover for use by x/tools until
+// go1.21 and earlier are no longer supported by x/tools.
+
+package versions
+
+import "strings"
+
+// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]]
+// The numbers are the original decimal strings to avoid integer overflows
+// and since there is very little actual math. (Probably overflow doesn't matter in practice,
+// but at the time this code was written, there was an existing test that used
+// go1.99999999999, which does not fit in an int on 32-bit platforms.
+// The "big decimal" representation avoids the problem entirely.)
+type gover struct {
+ major string // decimal
+ minor string // decimal or ""
+ patch string // decimal or ""
+ kind string // "", "alpha", "beta", "rc"
+ pre string // decimal or ""
+}
+
+// compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as toolchain versions.
+// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21".
+// Malformed versions compare less than well-formed versions and equal to each other.
+// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0".
+func compare(x, y string) int {
+ vx := parse(x)
+ vy := parse(y)
+
+ if c := cmpInt(vx.major, vy.major); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.minor, vy.minor); c != 0 {
+ return c
+ }
+ if c := cmpInt(vx.patch, vy.patch); c != 0 {
+ return c
+ }
+ if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc
+ return c
+ }
+ if c := cmpInt(vx.pre, vy.pre); c != 0 {
+ return c
+ }
+ return 0
+}
+
+// lang returns the Go language version. For example, lang("1.2.3") == "1.2".
+func lang(x string) string {
+ v := parse(x)
+ if v.minor == "" || v.major == "1" && v.minor == "0" {
+ return v.major
+ }
+ return v.major + "." + v.minor
+}
+
+// isValid reports whether the version x is valid.
+func isValid(x string) bool {
+ return parse(x) != gover{}
+}
+
+// parse parses the Go version string x into a version.
+// It returns the zero version if x is malformed.
+func parse(x string) gover {
+ var v gover
+
+ // Parse major version.
+ var ok bool
+ v.major, x, ok = cutInt(x)
+ if !ok {
+ return gover{}
+ }
+ if x == "" {
+ // Interpret "1" as "1.0.0".
+ v.minor = "0"
+ v.patch = "0"
+ return v
+ }
+
+ // Parse . before minor version.
+ if x[0] != '.' {
+ return gover{}
+ }
+
+ // Parse minor version.
+ v.minor, x, ok = cutInt(x[1:])
+ if !ok {
+ return gover{}
+ }
+ if x == "" {
+ // Patch missing is same as "0" for older versions.
+ // Starting in Go 1.21, patch missing is different from explicit .0.
+ if cmpInt(v.minor, "21") < 0 {
+ v.patch = "0"
+ }
+ return v
+ }
+
+ // Parse patch if present.
+ if x[0] == '.' {
+ v.patch, x, ok = cutInt(x[1:])
+ if !ok || x != "" {
+ // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != "").
+ // Allowing them would be a bit confusing because we already have:
+ // 1.21 < 1.21rc1
+ // But a prerelease of a patch would have the opposite effect:
+ // 1.21.3rc1 < 1.21.3
+ // We've never needed them before, so let's not start now.
+ return gover{}
+ }
+ return v
+ }
+
+ // Parse prerelease.
+ i := 0
+ for i < len(x) && (x[i] < '0' || '9' < x[i]) {
+ if x[i] < 'a' || 'z' < x[i] {
+ return gover{}
+ }
+ i++
+ }
+ if i == 0 {
+ return gover{}
+ }
+ v.kind, x = x[:i], x[i:]
+ if x == "" {
+ return v
+ }
+ v.pre, x, ok = cutInt(x)
+ if !ok || x != "" {
+ return gover{}
+ }
+
+ return v
+}
+
+// cutInt scans the leading decimal number at the start of x to an integer
+// and returns that value and the rest of the string.
+func cutInt(x string) (n, rest string, ok bool) {
+ i := 0
+ for i < len(x) && '0' <= x[i] && x[i] <= '9' {
+ i++
+ }
+ if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero
+ return "", "", false
+ }
+ return x[:i], x[i:], true
+}
+
+// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers.
+// (Copied from golang.org/x/mod/semver's compareInt.)
+func cmpInt(x, y string) int {
+ if x == y {
+ return 0
+ }
+ if len(x) < len(y) {
+ return -1
+ }
+ if len(x) > len(y) {
+ return +1
+ }
+ if x < y {
+ return -1
+ } else {
+ return +1
+ }
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/versions/types.go b/operator/vendor/golang.org/x/tools/internal/versions/types.go
new file mode 100644
index 00000000..0fc10ce4
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/versions/types.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import (
+ "go/ast"
+ "go/types"
+)
+
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+ // In tools built with Go >= 1.22, the Go version of a file
+ // follow a cascades of sources:
+ // 1) types.Info.FileVersion, which follows the cascade:
+ // 1.a) file version (ast.File.GoVersion),
+ // 1.b) the package version (types.Config.GoVersion), or
+ // 2) is some unknown Future version.
+ //
+ // File versions require a valid package version to be provided to types
+ // in Config.GoVersion. Config.GoVersion is either from the package's module
+ // or the toolchain (go run). This value should be provided by go/packages
+ // or unitchecker.Config.GoVersion.
+ if v := info.FileVersions[file]; IsValid(v) {
+ return v
+ }
+ // Note: we could instead return runtime.Version() [if valid].
+ // This would act as a max version on what a tool can support.
+ return Future
+}
diff --git a/operator/vendor/golang.org/x/tools/internal/versions/versions.go b/operator/vendor/golang.org/x/tools/internal/versions/versions.go
new file mode 100644
index 00000000..8d1f7453
--- /dev/null
+++ b/operator/vendor/golang.org/x/tools/internal/versions/versions.go
@@ -0,0 +1,57 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import (
+ "strings"
+)
+
+// Note: If we use build tags to use go/versions when go >=1.22,
+// we run into go.dev/issue/53737. Under some operations users would see an
+// import of "go/versions" even if they would not compile the file.
+// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
+// For this reason, this library just a clone of go/versions for the moment.
+
+// Lang returns the Go language version for version x.
+// If x is not a valid version, Lang returns the empty string.
+// For example:
+//
+// Lang("go1.21rc2") = "go1.21"
+// Lang("go1.21.2") = "go1.21"
+// Lang("go1.21") = "go1.21"
+// Lang("go1") = "go1"
+// Lang("bad") = ""
+// Lang("1.21") = ""
+func Lang(x string) string {
+ v := lang(stripGo(x))
+ if v == "" {
+ return ""
+ }
+ return x[:2+len(v)] // "go"+v without allocation
+}
+
+// Compare returns -1, 0, or +1 depending on whether
+// x < y, x == y, or x > y, interpreted as Go versions.
+// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
+// Invalid versions, including the empty string, compare less than
+// valid versions and equal to each other.
+// The language version "go1.21" compares less than the
+// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
+// Custom toolchain suffixes are ignored during comparison:
+// "go1.21.0" and "go1.21.0-bigcorp" are equal.
+func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) }
+
+// IsValid reports whether the version x is valid.
+func IsValid(x string) bool { return isValid(stripGo(x)) }
+
+// stripGo converts from a "go1.21" version to a "1.21" version.
+// If v does not start with "go", stripGo returns the empty string (a known invalid version).
+func stripGo(v string) string {
+ v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix.
+ if len(v) < 2 || v[:2] != "go" {
+ return ""
+ }
+ return v[2:]
+}
diff --git a/operator/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/operator/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc98..743bfb81 100644
--- a/operator/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/operator/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) {
func SizeVarint(v uint64) int {
// This computes 1 + (bits.Len64(v)-1)/7.
// 9/64 is a good enough approximation of 1/7
- return int(9*uint32(bits.Len64(v))+64) / 64
+ //
+ // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+ // instruction, which is very fast on CPUs from the last few years. The
+ // specific way of expressing the calculation matches C++ Protobuf, see
+ // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+ // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+ // By OR'ing v with 1, we guarantee that v is never 0, without changing the
+ // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+ // needs to add extra instructions to handle that case.
+ //
+ // The Go compiler currently (go1.24.4) does not make use of this knowledge.
+ // This opportunity (removing the XOR instruction, which handles the 0 case)
+ // results in a small (1%) performance win across CPU architectures.
+ //
+ // Independently of avoiding the 0 case, we need the v |= 1 line because
+ // it allows the Go compiler to eliminate an extra XCHGL barrier.
+ v |= 1
+
+ // It would be clearer to write log2value := 63 - uint32(...), but
+ // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+ // Proof of identity for our value range [0..63]:
+ // https://go.dev/play/p/Pdn9hEWYakX
+ log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+ return int((log2value*9 + (64 + 9)) / 64)
}
// AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/operator/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/operator/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 5a57ef6f..04696351 100644
Binary files a/operator/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/operator/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ
diff --git a/operator/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/operator/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 10132c9b..a0aad277 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -69,6 +69,12 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
case genid.FeatureSet_JsonFormat_field_number:
parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
+ case genid.FeatureSet_EnforceNamingStyle_field_number:
+ // EnforceNamingStyle is enforced in protoc, languages other than C++
+ // are not supposed to do anything with this feature.
+ case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+ // DefaultSymbolVisibility is enforced in protoc, runtimes should not
+ // inspect this value.
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
}
diff --git a/operator/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/operator/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 00000000..a12ec979
--- /dev/null
+++ b/operator/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ // Oneof fields never use the presence bitmap.
+ //
+ // Synthetic oneofs are an exception: Those are used to implement proto3
+ // optional fields and hence should follow non-oneof field semantics.
+ return false, false
+
+ case fd.IsMap():
+ // Map-typed fields never use the presence bitmap.
+ return false, false
+
+ case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+ // Lazy fields always use the presence bitmap (only messages can be lazy).
+ isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+ return isLazy, isLazy
+
+ default:
+ // If the field has presence, use the presence bitmap.
+ return fd.HasPresence(), false
+ }
+}
diff --git a/operator/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/operator/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f9185..3ceb6fa7 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@ const (
Api_SourceContext_field_name protoreflect.Name = "source_context"
Api_Mixins_field_name protoreflect.Name = "mixins"
Api_Syntax_field_name protoreflect.Name = "syntax"
+ Api_Edition_field_name protoreflect.Name = "edition"
Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@ const (
Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
+ Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition"
)
// Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@ const (
Api_SourceContext_field_number protoreflect.FieldNumber = 5
Api_Mixins_field_number protoreflect.FieldNumber = 6
Api_Syntax_field_number protoreflect.FieldNumber = 7
+ Api_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Method.
@@ -63,6 +66,7 @@ const (
Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
Method_Options_field_name protoreflect.Name = "options"
Method_Syntax_field_name protoreflect.Name = "syntax"
+ Method_Edition_field_name protoreflect.Name = "edition"
Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@ const (
Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
+ Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition"
)
// Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@ const (
Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
Method_Options_field_number protoreflect.FieldNumber = 6
Method_Syntax_field_number protoreflect.FieldNumber = 7
+ Method_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Mixin.
diff --git a/operator/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/operator/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index f30ab6b5..950a6a32 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -34,6 +34,19 @@ const (
Edition_EDITION_MAX_enum_value = 2147483647
)
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+ SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+ SymbolVisibility_enum_name = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+ SymbolVisibility_VISIBILITY_UNSET_enum_value = 0
+ SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1
+ SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
// Names for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
@@ -65,6 +78,7 @@ const (
FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency"
FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency"
+ FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type"
FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
FileDescriptorProto_Service_field_name protoreflect.Name = "service"
@@ -79,6 +93,7 @@ const (
FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+ FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -96,6 +111,7 @@ const (
FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3
FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11
+ FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4
FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5
FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6
@@ -124,6 +140,7 @@ const (
DescriptorProto_Options_field_name protoreflect.Name = "options"
DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+ DescriptorProto_Visibility_field_name protoreflect.Name = "visibility"
DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name"
DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -135,6 +152,7 @@ const (
DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options"
DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+ DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
)
// Field numbers for google.protobuf.DescriptorProto.
@@ -149,6 +167,7 @@ const (
DescriptorProto_Options_field_number protoreflect.FieldNumber = 7
DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9
DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10
+ DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11
)
// Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -388,12 +407,14 @@ const (
EnumDescriptorProto_Options_field_name protoreflect.Name = "options"
EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+ EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility"
EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+ EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
)
// Field numbers for google.protobuf.EnumDescriptorProto.
@@ -403,6 +424,7 @@ const (
EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5
+ EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -1008,29 +1030,35 @@ const (
// Field names for google.protobuf.FeatureSet.
const (
- FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
- FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
- FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
- FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
- FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
- FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
-
- FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
- FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
- FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
- FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
- FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
- FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+ FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
+ FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
+ FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
+ FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
+ FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style"
+ FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
+
+ FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+ FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+ FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+ FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+ FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+ FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
)
// Field numbers for google.protobuf.FeatureSet.
const (
- FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
- FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
- FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
- FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
- FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
- FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+ FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
+ FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
+ FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
+ FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
+ FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
+ FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+ FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7
+ FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
)
// Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1112,6 +1140,40 @@ const (
FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2
)
+// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+ FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
+ FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle"
+)
+
+// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+ FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
+ FeatureSet_STYLE2024_enum_value = 1
+ FeatureSet_STYLE_LEGACY_enum_value = 2
+)
+
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+ FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature"
+ FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+ FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+ FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1
+ FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2
+ FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3
+ FeatureSet_VisibilityFeature_STRICT_enum_value = 4
+)
+
// Names for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
diff --git a/operator/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/operator/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 41c1f74e..bdad12a9 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -11,6 +11,7 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/reflect/protoreflect"
piface "google.golang.org/protobuf/runtime/protoiface"
@@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
// permit us to skip over definitely-unset fields at marshal time.
var hasPresence bool
- hasPresence, cf.isLazy = usePresenceForField(si, fd)
+ hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
if hasPresence {
cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
diff --git a/operator/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/operator/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index dd55e8e0..5a439daa 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -11,6 +11,7 @@ import (
"strings"
"sync/atomic"
+ "google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/reflect/protoreflect"
)
@@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
fd := fds.Get(i)
fs := si.fieldsByNumber[fd.Number()]
var fi fieldInfo
- usePresence, _ := usePresenceForField(si, fd)
+ usePresence, _ := filedesc.UsePresenceForField(fd)
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
@@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
if p.IsNil() {
return false
}
- sp := p.Apply(fieldOffset).AtomicGetPointer()
- if sp.IsNil() {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
return false
}
- rv := sp.AsValueOf(fs.Type.Elem())
return rv.Elem().Len() > 0
},
clear: func(p pointer) {
- sp := p.Apply(fieldOffset).AtomicGetPointer()
- if !sp.IsNil() {
- rv := sp.AsValueOf(fs.Type.Elem())
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if !rv.IsNil() {
rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
}
},
@@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
if p.IsNil() {
return conv.Zero()
}
- sp := p.Apply(fieldOffset).AtomicGetPointer()
- if sp.IsNil() {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
return conv.Zero()
}
- rv := sp.AsValueOf(fs.Type.Elem())
if rv.Elem().Len() == 0 {
return conv.Zero()
}
@@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
func (mi *MessageInfo) present(p pointer, index uint32) bool {
return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
}
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field. The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit. Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
- hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
- // Non-oneof scalar fields with explicit field presence use the presence array.
- usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
- switch {
- case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
- return false, false
- case fd.IsMap():
- return false, false
- case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
- return hasLazyField, hasLazyField
- default:
- return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
- }
-}
diff --git a/operator/vendor/google.golang.org/protobuf/internal/impl/presence.go b/operator/vendor/google.golang.org/protobuf/internal/impl/presence.go
index 914cb1de..443afe81 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) {
// Present checks for the presence of a specific field number in a presence set.
func (p presence) Present(num uint32) bool {
- if p.P == nil {
- return false
- }
return Export{}.Present(p.toElem(num), num)
}
diff --git a/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
similarity index 99%
rename from operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
rename to operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index 1ffddf68..42dd6f70 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.21
-
package strs
import (
diff --git a/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
deleted file mode 100644
index 832a7988..00000000
--- a/operator/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package strs
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type (
- stringHeader struct {
- Data unsafe.Pointer
- Len int
- }
- sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
- }
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) (s string) {
- src := (*sliceHeader)(unsafe.Pointer(&b))
- dst := (*stringHeader)(unsafe.Pointer(&s))
- dst.Data = src.Data
- dst.Len = src.Len
- return s
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) (b []byte) {
- src := (*stringHeader)(unsafe.Pointer(&s))
- dst := (*sliceHeader)(unsafe.Pointer(&b))
- dst.Data = src.Data
- dst.Len = src.Len
- dst.Cap = src.Len
- return b
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
- buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
- n := len(prefix) + len(".") + len(name)
- if len(prefix) == 0 {
- n -= len(".")
- }
- sb.grow(n)
- sb.buf = append(sb.buf, prefix...)
- sb.buf = append(sb.buf, '.')
- sb.buf = append(sb.buf, name...)
- return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
- sb.grow(len(b))
- sb.buf = append(sb.buf, b...)
- return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
- if cap(sb.buf)-len(sb.buf) >= n {
- return
- }
-
- // Unlike strings.Builder, we do not need to copy over the contents
- // of the old buffer since our builder provides no API for
- // retrieving previously created strings.
- sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
- return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/operator/vendor/google.golang.org/protobuf/internal/version/version.go b/operator/vendor/google.golang.org/protobuf/internal/version/version.go
index 01efc330..697d1c14 100644
--- a/operator/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/operator/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 5
+ Patch = 8
PreRelease = ""
)
diff --git a/operator/vendor/google.golang.org/protobuf/proto/merge.go b/operator/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe578..ef55b97d 100644
--- a/operator/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/operator/vendor/google.golang.org/protobuf/proto/merge.go
@@ -59,6 +59,12 @@ func Clone(m Message) Message {
return dst.Interface()
}
+// CloneOf returns a deep copy of m. If the top-level message is invalid,
+// it returns an invalid message as well.
+func CloneOf[M Message](m M) M {
+ return Clone(m).(M)
+}
+
// mergeOptions provides a namespace for merge functions, and can be
// exported in the future if we add user-visible merge options.
type mergeOptions struct{}
diff --git a/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index ea154eec..730331e6 100644
--- a/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
b = p.appendRepeatedField(b, "public_dependency", nil)
case 11:
b = p.appendRepeatedField(b, "weak_dependency", nil)
+ case 15:
+ b = p.appendRepeatedField(b, "option_dependency", nil)
case 4:
b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
case 5:
@@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
case 10:
b = p.appendRepeatedField(b, "reserved_name", nil)
+ case 11:
+ b = p.appendSingularField(b, "visibility", nil)
}
return b
}
@@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
case 5:
b = p.appendRepeatedField(b, "reserved_name", nil)
+ case 6:
+ b = p.appendSingularField(b, "visibility", nil)
}
return b
}
@@ -398,6 +404,10 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
b = p.appendSingularField(b, "message_encoding", nil)
case 6:
b = p.appendSingularField(b, "json_format", nil)
+ case 7:
+ b = p.appendSingularField(b, "enforce_naming_style", nil)
+ case 8:
+ b = p.appendSingularField(b, "default_symbol_visibility", nil)
}
return b
}
diff --git a/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
similarity index 99%
rename from operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
rename to operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
index 479527b5..fe17f372 100644
--- a/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.21
-
package protoreflect
import (
diff --git a/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
deleted file mode 100644
index 0015fcb3..00000000
--- a/operator/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package protoreflect
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/internal/pragma"
-)
-
-type (
- stringHeader struct {
- Data unsafe.Pointer
- Len int
- }
- sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
- }
- ifaceHeader struct {
- Type unsafe.Pointer
- Data unsafe.Pointer
- }
-)
-
-var (
- nilType = typeOf(nil)
- boolType = typeOf(*new(bool))
- int32Type = typeOf(*new(int32))
- int64Type = typeOf(*new(int64))
- uint32Type = typeOf(*new(uint32))
- uint64Type = typeOf(*new(uint64))
- float32Type = typeOf(*new(float32))
- float64Type = typeOf(*new(float64))
- stringType = typeOf(*new(string))
- bytesType = typeOf(*new([]byte))
- enumType = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t any) unsafe.Pointer {
- return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
- pragma.DoNotCompare // 0B
-
- // typ stores the type of the value as a pointer to the Go type.
- typ unsafe.Pointer // 8B
-
- // ptr stores the data pointer for a String, Bytes, or interface value.
- ptr unsafe.Pointer // 8B
-
- // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
- // Enum value as a raw uint64.
- //
- // It is also used to store the length of a String or Bytes value;
- // the capacity is ignored.
- num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
- p := (*stringHeader)(unsafe.Pointer(&v))
- return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
- p := (*sliceHeader)(unsafe.Pointer(&v))
- return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfIface(v any) Value {
- p := (*ifaceHeader)(unsafe.Pointer(&v))
- return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() (x string) {
- *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
- return x
-}
-func (v Value) getBytes() (x []byte) {
- *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
- return x
-}
-func (v Value) getIface() (x any) {
- *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
- return x
-}
diff --git a/operator/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/operator/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index a5163376..4eacb523 100644
--- a/operator/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/operator/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
}
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported). Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+ SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0
+ SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1
+ SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+ SymbolVisibility_name = map[int32]string{
+ 0: "VISIBILITY_UNSET",
+ 1: "VISIBILITY_LOCAL",
+ 2: "VISIBILITY_EXPORT",
+ }
+ SymbolVisibility_value = map[string]int32{
+ "VISIBILITY_UNSET": 0,
+ "VISIBILITY_LOCAL": 1,
+ "VISIBILITY_EXPORT": 2,
+ }
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+ p := new(SymbolVisibility)
+ *p = x
+ return p
+}
+
+func (x SymbolVisibility) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = SymbolVisibility(num)
+ return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
// The verification state of the extension range.
type ExtensionRangeOptions_VerificationState int32
@@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
}
func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
}
func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[1]
+ return &file_google_protobuf_descriptor_proto_enumTypes[2]
}
func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string {
}
func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
}
func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[2]
+ return &file_google_protobuf_descriptor_proto_enumTypes[3]
}
func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string {
}
func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
}
func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[3]
+ return &file_google_protobuf_descriptor_proto_enumTypes[4]
}
func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string {
}
func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
}
func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[4]
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
}
func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string {
}
func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
}
func (FieldOptions_CType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[5]
+ return &file_google_protobuf_descriptor_proto_enumTypes[6]
}
func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string {
}
func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
}
func (FieldOptions_JSType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[6]
+ return &file_google_protobuf_descriptor_proto_enumTypes[7]
}
func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string {
}
func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
}
func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[7]
+ return &file_google_protobuf_descriptor_proto_enumTypes[8]
}
func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string {
}
func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
}
func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[8]
+ return &file_google_protobuf_descriptor_proto_enumTypes[9]
}
func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
}
func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
}
func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[9]
+ return &file_google_protobuf_descriptor_proto_enumTypes[10]
}
func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string {
}
func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
}
func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[10]
+ return &file_google_protobuf_descriptor_proto_enumTypes[11]
}
func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string {
}
func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
}
func (FeatureSet_EnumType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[11]
+ return &file_google_protobuf_descriptor_proto_enumTypes[12]
}
func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string {
}
func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
}
func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[12]
+ return &file_google_protobuf_descriptor_proto_enumTypes[13]
}
func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string {
}
func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
}
func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[13]
+ return &file_google_protobuf_descriptor_proto_enumTypes[14]
}
func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string {
}
func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
}
func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[14]
+ return &file_google_protobuf_descriptor_proto_enumTypes[15]
}
func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string {
}
func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
}
func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[15]
+ return &file_google_protobuf_descriptor_proto_enumTypes[16]
}
func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1139,6 +1203,136 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
}
+type FeatureSet_EnforceNamingStyle int32
+
+const (
+ FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
+ FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1
+ FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2
+)
+
+// Enum value maps for FeatureSet_EnforceNamingStyle.
+var (
+ FeatureSet_EnforceNamingStyle_name = map[int32]string{
+ 0: "ENFORCE_NAMING_STYLE_UNKNOWN",
+ 1: "STYLE2024",
+ 2: "STYLE_LEGACY",
+ }
+ FeatureSet_EnforceNamingStyle_value = map[string]int32{
+ "ENFORCE_NAMING_STYLE_UNKNOWN": 0,
+ "STYLE2024": 1,
+ "STYLE_LEGACY": 2,
+ }
+)
+
+func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
+ p := new(FeatureSet_EnforceNamingStyle)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_EnforceNamingStyle) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+}
+
+func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[17]
+}
+
+func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_EnforceNamingStyle(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
+func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
+}
+
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+ FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+ // Default pre-EDITION_2024, all UNSET visibility are export.
+ FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+ // All top-level symbols default to export, nested default to local.
+ FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+ // All symbols default to local.
+ FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+ // All symbols local by default. Nested types cannot be exported.
+ // With special case caveat for message { enum {} reserved 1 to max; }
+ // This is the recommended setting for new protos.
+ FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+ 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+ 1: "EXPORT_ALL",
+ 2: "EXPORT_TOP_LEVEL",
+ 3: "LOCAL_ALL",
+ 4: "STRICT",
+ }
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+ "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+ "EXPORT_ALL": 1,
+ "EXPORT_TOP_LEVEL": 2,
+ "LOCAL_ALL": 3,
+ "STRICT": 4,
+ }
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+ p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
// Represents the identified object's effect on the element in the original
// .proto file.
type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1177,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
}
func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
}
func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[16]
+ return &file_google_protobuf_descriptor_proto_enumTypes[19]
}
func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1262,6 +1456,9 @@ type FileDescriptorProto struct {
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // Names of files imported by this file purely for the purpose of providing
+ // option extensions. These are excluded from the dependency list above.
+ OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
// All top-level definitions in this file.
MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1277,8 +1474,14 @@ type FileDescriptorProto struct {
// The supported values are "proto2", "proto3", and "editions".
//
// If `edition` is present, this value must be "editions".
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
// The edition of the proto file.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
@@ -1349,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 {
return nil
}
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+ if x != nil {
+ return x.OptionDependency
+ }
+ return nil
+}
+
func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
if x != nil {
return x.MessageType
@@ -1419,7 +1629,9 @@ type DescriptorProto struct {
ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
- ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ // Support for `export` and `local` keywords on enums.
+ Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -1524,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string {
return nil
}
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+ if x != nil && x.Visibility != nil {
+ return *x.Visibility
+ }
+ return SymbolVisibility_VISIBILITY_UNSET
+}
+
type ExtensionRangeOptions struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The parser stores options it doesn't recognize here. See above.
@@ -1836,7 +2055,9 @@ type EnumDescriptorProto struct {
ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
// Reserved enum value names, which may not be reused. A given name may only
// be reserved once.
- ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ // Support for `export` and `local` keywords on enums.
+ Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
@@ -1906,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
return nil
}
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+ if x != nil && x.Visibility != nil {
+ return *x.Visibility
+ }
+ return SymbolVisibility_VISIBILITY_UNSET
+}
+
// Describes a value within an enum.
type EnumValueDescriptorProto struct {
state protoimpl.MessageState `protogen:"open.v1"`
@@ -2212,6 +2440,9 @@ type FileOptions struct {
// determining the ruby package.
RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
@@ -2482,6 +2713,9 @@ type MessageOptions struct {
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2639,7 +2873,10 @@ type FieldOptions struct {
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // DEPRECATED. DO NOT USE!
// For Google-internal migration only. Do not use.
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// Indicate that the field value should not be printed out when using debug
// formats, e.g. when the field contains sensitive credentials.
@@ -2648,6 +2885,9 @@ type FieldOptions struct {
Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
@@ -2740,6 +2980,7 @@ func (x *FieldOptions) GetDeprecated() bool {
return Default_FieldOptions_Deprecated
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
func (x *FieldOptions) GetWeak() bool {
if x != nil && x.Weak != nil {
return *x.Weak
@@ -2799,6 +3040,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
type OneofOptions struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2871,6 +3115,9 @@ type EnumOptions struct {
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2958,6 +3205,9 @@ type EnumValueOptions struct {
// this is a formalization for deprecating enum values.
Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
// Indicate that fields annotated with this enum value should not be printed
// out when using debug formats, e.g. when the field contains sensitive
@@ -3046,6 +3296,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
type ServiceOptions struct {
state protoimpl.MessageState `protogen:"open.v1"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
@@ -3124,6 +3377,9 @@ type MethodOptions struct {
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -3303,16 +3559,18 @@ func (x *UninterpretedOption) GetAggregateValue() string {
// be designed and implemented to handle this, hopefully before we ever hit a
// conflict here.
type FeatureSet struct {
- state protoimpl.MessageState `protogen:"open.v1"`
- FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
- EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
- RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
- Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
- MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
- JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
- extensionFields protoimpl.ExtensionFields
- unknownFields protoimpl.UnknownFields
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+ EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+ RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+ Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+ MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+ JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+ EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+ DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSet) Reset() {
@@ -3387,6 +3645,20 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
return FeatureSet_JSON_FORMAT_UNKNOWN
}
+func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
+ if x != nil && x.EnforceNamingStyle != nil {
+ return *x.EnforceNamingStyle
+ }
+ return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
+}
+
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+ if x != nil && x.DefaultSymbolVisibility != nil {
+ return *x.DefaultSymbolVisibility
+ }
+ return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
// A compiled specification for the defaults of a set of features. These
// messages are generated from FeatureSet extensions and can be used to seed
// feature resolution. The resolution with this object becomes a simple search
@@ -4047,6 +4319,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
return false
}
+type FeatureSet_VisibilityFeature struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+ *x = FeatureSet_VisibilityFeature{}
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
// A map from every known edition with a unique set of defaults to its
// defaults. Not all editions may be contained here. For a given edition,
// the defaults at the closest matching edition ordered at or before it should
@@ -4064,7 +4372,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4076,7 +4384,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4212,7 +4520,7 @@ type SourceCodeInfo_Location struct {
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4224,7 +4532,7 @@ func (x *SourceCodeInfo_Location) String() string {
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4296,7 +4604,7 @@ type GeneratedCodeInfo_Annotation struct {
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4308,7 +4616,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4361,777 +4669,389 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
-var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
- 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
- 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
- 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
- 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
- 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
- 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
- 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
- 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
- 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
- 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
- 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
- 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
- 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
- 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
- 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
- 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
- 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
- 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
- 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
- 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
- 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
- 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
- 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
- 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
- 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
- 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
- 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
- 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
- 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
- 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
- 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
- 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
- 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
- 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
- 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
- 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
- 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
- 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
- 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
- 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
- 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
- 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
- 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
- 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
- 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
- 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
- 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
- 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
- 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
- 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
- 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
- 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
- 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
- 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
- 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
- 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
- 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
- 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
- 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
- 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
- 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
- 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
- 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
- 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
- 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
- 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
- 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
- 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
- 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
- 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
- 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
- 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
- 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
- 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
- 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
- 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
- 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
- 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
- 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
- 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
- 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
- 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
- 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
- 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
- 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
- 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
- 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
- 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
- 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
- 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
- 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
- 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
- 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
- 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
- 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
- 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
- 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
- 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
- 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
- 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
- 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
- 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
- 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
- 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
- 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
- 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
- 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
- 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
- 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
- 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
- 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
- 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
- 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
- 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
- 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
- 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
- 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
- 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
- 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
- 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
- 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
- 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
- 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
- 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
- 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
- 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
- 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
- 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
- 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
- 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
- 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
- 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
- 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
- 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
- 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
- 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
- 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
- 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
- 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
- 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
- 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
- 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
- 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
- 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
- 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
- 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
- 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
- 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
- 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
- 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
- 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
- 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
- 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
- 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
- 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
- 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
- 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
- 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
- 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
- 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
- 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
- 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
- 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
- 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
- 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
- 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
- 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
- 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
- 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
- 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
- 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
- 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
- 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
- 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
- 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
- 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
- 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
- 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
- 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
- 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
- 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
- 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
- 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
- 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
- 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
- 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
- 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
- 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
- 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
- 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
- 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
- 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
- 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
- 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
- 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
- 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
- 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
- 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
- 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
- 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
- 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
- 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
- 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
- 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
- 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
- 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
- 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
- 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
- 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
- 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
- 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
- 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
- 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
- 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
- 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
- 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
- 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
- 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
- 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
- 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
- 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
- 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
- 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
- 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
- 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
- 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
- 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
- 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
- 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
- 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
- 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
- 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
- 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
- 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
- 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
- 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
- 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
- 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
- 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
- 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
- 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
- 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
- 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
- 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
- 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
- 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
- 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
- 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
- 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
- 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
- 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
- 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
- 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
- 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
- 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
- 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
- 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
- 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
- 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
- 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
- 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
- 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
- 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
- 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
- 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
- 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
- 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
- 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
- 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
- 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
- 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
- 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
- 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
- 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
- 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
- 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
- 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
- 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
- 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
- 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
- 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
- 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
- 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
- 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
- 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
- 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
- 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
- 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
- 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
- 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
- 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
- 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
- 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-})
+const file_google_protobuf_descriptor_proto_rawDesc = "" +
+ "\n" +
+ " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
+ "\x11FileDescriptorSet\x128\n" +
+ "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
+ "\x13FileDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
+ "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
+ "\n" +
+ "dependency\x18\x03 \x03(\tR\n" +
+ "dependency\x12+\n" +
+ "\x11public_dependency\x18\n" +
+ " \x03(\x05R\x10publicDependency\x12'\n" +
+ "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+ "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
+ "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
+ "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
+ "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
+ "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
+ "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
+ "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
+ "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
+ "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
+ "\x0fDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
+ "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
+ "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
+ "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
+ "nestedType\x12A\n" +
+ "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
+ "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
+ "\n" +
+ "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
+ "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
+ "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
+ "\rreserved_name\x18\n" +
+ " \x03(\tR\freservedName\x12A\n" +
+ "\n" +
+ "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+ "visibility\x1az\n" +
+ "\x0eExtensionRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
+ "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
+ "\rReservedRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
+ "\x15ExtensionRangeOptions\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
+ "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
+ "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
+ "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
+ "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
+ "\vDeclaration\x12\x16\n" +
+ "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
+ "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
+ "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
+ "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
+ "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
+ "\x11VerificationState\x12\x0f\n" +
+ "\vDECLARATION\x10\x00\x12\x0e\n" +
+ "\n" +
+ "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
+ "\x14FieldDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+ "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
+ "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
+ "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
+ "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
+ "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
+ "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
+ "\voneof_index\x18\t \x01(\x05R\n" +
+ "oneofIndex\x12\x1b\n" +
+ "\tjson_name\x18\n" +
+ " \x01(\tR\bjsonName\x127\n" +
+ "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
+ "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
+ "\x04Type\x12\x0f\n" +
+ "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
+ "\n" +
+ "TYPE_FLOAT\x10\x02\x12\x0e\n" +
+ "\n" +
+ "TYPE_INT64\x10\x03\x12\x0f\n" +
+ "\vTYPE_UINT64\x10\x04\x12\x0e\n" +
+ "\n" +
+ "TYPE_INT32\x10\x05\x12\x10\n" +
+ "\fTYPE_FIXED64\x10\x06\x12\x10\n" +
+ "\fTYPE_FIXED32\x10\a\x12\r\n" +
+ "\tTYPE_BOOL\x10\b\x12\x0f\n" +
+ "\vTYPE_STRING\x10\t\x12\x0e\n" +
+ "\n" +
+ "TYPE_GROUP\x10\n" +
+ "\x12\x10\n" +
+ "\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
+ "\n" +
+ "TYPE_BYTES\x10\f\x12\x0f\n" +
+ "\vTYPE_UINT32\x10\r\x12\r\n" +
+ "\tTYPE_ENUM\x10\x0e\x12\x11\n" +
+ "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
+ "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
+ "\vTYPE_SINT32\x10\x11\x12\x0f\n" +
+ "\vTYPE_SINT64\x10\x12\"C\n" +
+ "\x05Label\x12\x12\n" +
+ "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
+ "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
+ "\x0eLABEL_REQUIRED\x10\x02\"c\n" +
+ "\x14OneofDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
+ "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
+ "\x13EnumDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
+ "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
+ "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
+ "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+ "\n" +
+ "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+ "visibility\x1a;\n" +
+ "\x11EnumReservedRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
+ "\x18EnumValueDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+ "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
+ "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+ "\x16ServiceDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
+ "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+ "\x15MethodDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
+ "\n" +
+ "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
+ "\voutput_type\x18\x03 \x01(\tR\n" +
+ "outputType\x128\n" +
+ "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
+ "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
+ "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
+ "\vFileOptions\x12!\n" +
+ "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
+ "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
+ "\x13java_multiple_files\x18\n" +
+ " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
+ "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
+ "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
+ "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
+ "\n" +
+ "go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
+ "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
+ "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
+ "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
+ "\n" +
+ "deprecated\x18\x17 \x01(\b:\x05falseR\n" +
+ "deprecated\x12.\n" +
+ "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
+ "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
+ "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
+ "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
+ "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
+ "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
+ "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
+ "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
+ "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
+ "\fOptimizeMode\x12\t\n" +
+ "\x05SPEED\x10\x01\x12\r\n" +
+ "\tCODE_SIZE\x10\x02\x12\x10\n" +
+ "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
+ "\x0eMessageOptions\x12<\n" +
+ "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
+ "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12\x1b\n" +
+ "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
+ "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+ "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
+ "\"\xa1\r\n" +
+ "\fFieldOptions\x12A\n" +
+ "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
+ "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
+ "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
+ "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
+ "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12\x1d\n" +
+ "\x04weak\x18\n" +
+ " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
+ "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
+ "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
+ "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
+ "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
+ "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
+ "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
+ "\x0eEditionDefault\x122\n" +
+ "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
+ "\x0eFeatureSupport\x12G\n" +
+ "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
+ "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
+ "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
+ "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
+ "\x05CType\x12\n" +
+ "\n" +
+ "\x06STRING\x10\x00\x12\b\n" +
+ "\x04CORD\x10\x01\x12\x10\n" +
+ "\fSTRING_PIECE\x10\x02\"5\n" +
+ "\x06JSType\x12\r\n" +
+ "\tJS_NORMAL\x10\x00\x12\r\n" +
+ "\tJS_STRING\x10\x01\x12\r\n" +
+ "\tJS_NUMBER\x10\x02\"U\n" +
+ "\x0fOptionRetention\x12\x15\n" +
+ "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
+ "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
+ "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
+ "\x10OptionTargetType\x12\x17\n" +
+ "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
+ "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
+ "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
+ "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
+ "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
+ "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
+ "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
+ "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
+ "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
+ "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
+ "\fOneofOptions\x127\n" +
+ "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
+ "\vEnumOptions\x12\x1f\n" +
+ "\vallow_alias\x18\x02 \x01(\bR\n" +
+ "allowAlias\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12V\n" +
+ "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+ "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
+ "\x10EnumValueOptions\x12%\n" +
+ "\n" +
+ "deprecated\x18\x01 \x01(\b:\x05falseR\n" +
+ "deprecated\x127\n" +
+ "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
+ "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
+ "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
+ "\x0eServiceOptions\x127\n" +
+ "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
+ "\n" +
+ "deprecated\x18! \x01(\b:\x05falseR\n" +
+ "deprecated\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
+ "\rMethodOptions\x12%\n" +
+ "\n" +
+ "deprecated\x18! \x01(\b:\x05falseR\n" +
+ "deprecated\x12q\n" +
+ "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
+ "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
+ "\x10IdempotencyLevel\x12\x17\n" +
+ "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
+ "\n" +
+ "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
+ "\x13UninterpretedOption\x12A\n" +
+ "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
+ "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
+ "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
+ "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
+ "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
+ "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
+ "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
+ "\bNamePart\x12\x1b\n" +
+ "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
+ "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
+ "\n" +
+ "FeatureSet\x12\x91\x01\n" +
+ "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
+ "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
+ "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
+ "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
+ "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
+ "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
+ "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
+ "jsonFormat\x12\xab\x01\n" +
+ "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+ "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+ "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+ "\x11VisibilityFeature\"\x81\x01\n" +
+ "\x17DefaultSymbolVisibility\x12%\n" +
+ "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+ "\n" +
+ "EXPORT_ALL\x10\x01\x12\x14\n" +
+ "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+ "\tLOCAL_ALL\x10\x03\x12\n" +
+ "\n" +
+ "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
+ "\rFieldPresence\x12\x1a\n" +
+ "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
+ "\bEXPLICIT\x10\x01\x12\f\n" +
+ "\bIMPLICIT\x10\x02\x12\x13\n" +
+ "\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
+ "\bEnumType\x12\x15\n" +
+ "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
+ "\x04OPEN\x10\x01\x12\n" +
+ "\n" +
+ "\x06CLOSED\x10\x02\"V\n" +
+ "\x15RepeatedFieldEncoding\x12#\n" +
+ "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
+ "\n" +
+ "\x06PACKED\x10\x01\x12\f\n" +
+ "\bEXPANDED\x10\x02\"I\n" +
+ "\x0eUtf8Validation\x12\x1b\n" +
+ "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
+ "\n" +
+ "\x06VERIFY\x10\x02\x12\b\n" +
+ "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
+ "\x0fMessageEncoding\x12\x1c\n" +
+ "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
+ "\tDELIMITED\x10\x02\"H\n" +
+ "\n" +
+ "JsonFormat\x12\x17\n" +
+ "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
+ "\x05ALLOW\x10\x01\x12\x16\n" +
+ "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
+ "\x12EnforceNamingStyle\x12 \n" +
+ "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
+ "\tSTYLE2024\x10\x01\x12\x10\n" +
+ "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
+ "\x12FeatureSetDefaults\x12X\n" +
+ "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
+ "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
+ "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
+ "\x18FeatureSetEditionDefault\x122\n" +
+ "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
+ "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
+ "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
+ "\x0eSourceCodeInfo\x12D\n" +
+ "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
+ "\bLocation\x12\x16\n" +
+ "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
+ "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
+ "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
+ "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
+ "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
+ "\x11GeneratedCodeInfo\x12M\n" +
+ "\n" +
+ "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
+ "annotation\x1a\xeb\x01\n" +
+ "\n" +
+ "Annotation\x12\x16\n" +
+ "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
+ "\vsource_file\x18\x02 \x01(\tR\n" +
+ "sourceFile\x12\x14\n" +
+ "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
+ "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
+ "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
+ "\bSemantic\x12\b\n" +
+ "\x04NONE\x10\x00\x12\a\n" +
+ "\x03SET\x10\x01\x12\t\n" +
+ "\x05ALIAS\x10\x02*\xa7\x02\n" +
+ "\aEdition\x12\x13\n" +
+ "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
+ "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
+ "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
+ "\fEDITION_2023\x10\xe8\a\x12\x11\n" +
+ "\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+ "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
+ "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
+ "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
+ "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
+ "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
+ "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+ "\x10SymbolVisibility\x12\x14\n" +
+ "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+ "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+ "\x11VISIBILITY_EXPORT\x10\x02B~\n" +
+ "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
var (
file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
@@ -5145,143 +5065,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
var file_google_protobuf_descriptor_proto_goTypes = []any{
- (Edition)(0), // 0: google.protobuf.Edition
- (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState
- (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type
- (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label
- (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode
- (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType
- (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType
- (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention
- (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType
- (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel
- (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence
- (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType
- (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
- (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation
- (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding
- (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat
- (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 27: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption
- (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet
- (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults
- (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange
- (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault
- (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport
- (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart
- (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation
+ (Edition)(0), // 0: google.protobuf.Edition
+ (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+ (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+ (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type
+ (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label
+ (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode
+ (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType
+ (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType
+ (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention
+ (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType
+ (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel
+ (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence
+ (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType
+ (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+ (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation
+ (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding
+ (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat
+ (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+ (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 30: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption
+ (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet
+ (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults
+ (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange
+ (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault
+ (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport
+ (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart
+ (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature
+ (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
- 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
- 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
- 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
- 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
- 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
- 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
- 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
- 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
- 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
- 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
- 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
- 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
- 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
- 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
- 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
- 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
- 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
- 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
- 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
- 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
- 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
- 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
- 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
- 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
- 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
- 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
- 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
- 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
- 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 77, // [77:77] is the sub-list for method output_type
- 77, // [77:77] is the sub-list for method input_type
- 77, // [77:77] is the sub-list for extension type_name
- 77, // [77:77] is the sub-list for extension extendee
- 0, // [0:77] is the sub-list for field type_name
+ 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+ 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+ 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+ 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+ 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+ 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+ 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+ 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+ 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+ 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+ 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+ 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+ 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+ 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+ 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+ 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+ 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+ 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+ 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+ 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+ 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+ 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+ 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+ 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+ 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+ 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+ 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+ 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+ 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+ 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+ 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+ 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 81, // [81:81] is the sub-list for method output_type
+ 81, // [81:81] is the sub-list for method input_type
+ 81, // [81:81] is the sub-list for extension type_name
+ 81, // [81:81] is the sub-list for extension extendee
+ 0, // [0:81] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5294,8 +5222,8 @@ func file_google_protobuf_descriptor_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
- NumEnums: 17,
- NumMessages: 33,
+ NumEnums: 20,
+ NumMessages: 34,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/operator/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/operator/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 497da66e..1ff0d149 100644
--- a/operator/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/operator/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -412,23 +412,13 @@ func (x *Any) GetValue() []byte {
var File_google_protobuf_any_proto protoreflect.FileDescriptor
-var file_google_protobuf_any_proto_rawDesc = string([]byte{
- 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
- 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
- 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
- 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_any_proto_rawDesc = "" +
+ "\n" +
+ "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
+ "\x03Any\x12\x19\n" +
+ "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
+ "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_any_proto_rawDescOnce sync.Once
diff --git a/operator/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/operator/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 00ac835c..06d584c1 100644
--- a/operator/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/operator/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -298,24 +298,13 @@ func (x *Timestamp) GetNanos() int32 {
var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
-var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
- 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
- 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
- 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
- 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
- 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
- 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
- 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_timestamp_proto_rawDesc = "" +
+ "\n" +
+ "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
+ "\tTimestamp\x12\x18\n" +
+ "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+ "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
+ "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
diff --git a/operator/vendor/gopkg.in/evanphx/json-patch.v4/README.md b/operator/vendor/gopkg.in/evanphx/json-patch.v4/README.md
index 28e35169..86fefd5b 100644
--- a/operator/vendor/gopkg.in/evanphx/json-patch.v4/README.md
+++ b/operator/vendor/gopkg.in/evanphx/json-patch.v4/README.md
@@ -4,7 +4,7 @@
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
[](http://godoc.org/github.com/evanphx/json-patch)
-[](https://travis-ci.org/evanphx/json-patch)
+[](https://github.com/evanphx/json-patch/actions/workflows/go.yml)
[](https://goreportcard.com/report/github.com/evanphx/json-patch)
# Get It!
@@ -14,9 +14,7 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie
go get -u github.com/evanphx/json-patch/v5
```
-**Stable Versions**:
-* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
-* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
+If you need version 4, use `go get -u gopkg.in/evanphx/json-patch.v4`
(previous versions below `v3` are unavailable)
@@ -314,4 +312,4 @@ go test -cover ./...
```
Builds for pull requests are tested automatically
-using [TravisCI](https://travis-ci.org/evanphx/json-patch).
+using [GitHub Actions](https://github.com/evanphx/json-patch/actions/workflows/go.yml).
diff --git a/operator/vendor/gopkg.in/evanphx/json-patch.v4/patch.go b/operator/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
index dc2b7e51..95136681 100644
--- a/operator/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
+++ b/operator/vendor/gopkg.in/evanphx/json-patch.v4/patch.go
@@ -3,11 +3,10 @@ package jsonpatch
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"strconv"
"strings"
-
- "github.com/pkg/errors"
)
const (
@@ -277,7 +276,7 @@ func (o Operation) Path() (string, error) {
return op, nil
}
- return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+ return "unknown", fmt.Errorf("operation missing path field: %w", ErrMissing)
}
// From reads the "from" field of the Operation.
@@ -294,7 +293,7 @@ func (o Operation) From() (string, error) {
return op, nil
}
- return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+ return "unknown", fmt.Errorf("operation, missing from field: %w", ErrMissing)
}
func (o Operation) value() *lazyNode {
@@ -319,7 +318,7 @@ func (o Operation) ValueInterface() (interface{}, error) {
return v, nil
}
- return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+ return nil, fmt.Errorf("operation, missing value field: %w", ErrMissing)
}
func isArray(buf []byte) bool {
@@ -359,7 +358,7 @@ func findObject(pd *container, path string) (container, string) {
next, ok := doc.get(decodePatchKey(part))
- if next == nil || ok != nil {
+ if next == nil || ok != nil || next.raw == nil {
return nil, ""
}
@@ -398,7 +397,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) {
func (d *partialDoc) remove(key string) error {
_, ok := (*d)[key]
if !ok {
- return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
+ return fmt.Errorf("Unable to remove nonexistent key: %s: %w", key, ErrMissing)
}
delete(*d, key)
@@ -415,10 +414,10 @@ func (d *partialArray) set(key string, val *lazyNode) error {
if idx < 0 {
if !SupportNegativeIndices {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(*d) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(*d)
}
@@ -435,7 +434,7 @@ func (d *partialArray) add(key string, val *lazyNode) error {
idx, err := strconv.Atoi(key)
if err != nil {
- return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+ return fmt.Errorf("value was not a proper array index: '%s': %w", key, err)
}
sz := len(*d) + 1
@@ -445,15 +444,15 @@ func (d *partialArray) add(key string, val *lazyNode) error {
cur := *d
if idx >= len(ary) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < 0 {
if !SupportNegativeIndices {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(ary) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(ary)
}
@@ -475,16 +474,16 @@ func (d *partialArray) get(key string) (*lazyNode, error) {
if idx < 0 {
if !SupportNegativeIndices {
- return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(*d) {
- return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(*d)
}
if idx >= len(*d) {
- return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return nil, fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
return (*d)[idx], nil
@@ -499,15 +498,15 @@ func (d *partialArray) remove(key string) error {
cur := *d
if idx >= len(cur) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < 0 {
if !SupportNegativeIndices {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
if idx < -len(cur) {
- return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+ return fmt.Errorf("Unable to access invalid index: %d: %w", idx, ErrInvalidIndex)
}
idx += len(cur)
}
@@ -525,18 +524,18 @@ func (d *partialArray) remove(key string) error {
func (p Patch) add(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+ return fmt.Errorf("add operation failed to decode path: %w", ErrMissing)
}
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+ return fmt.Errorf("add operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
}
err = con.add(key, op.value())
if err != nil {
- return errors.Wrapf(err, "error in add for path: '%s'", path)
+ return fmt.Errorf("error in add for path: '%s': %w", path, err)
}
return nil
@@ -545,18 +544,18 @@ func (p Patch) add(doc *container, op Operation) error {
func (p Patch) remove(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+ return fmt.Errorf("remove operation failed to decode path: %w", ErrMissing)
}
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+ return fmt.Errorf("remove operation does not apply: doc is missing path: \"%s\": %w", path, ErrMissing)
}
err = con.remove(key)
if err != nil {
- return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ return fmt.Errorf("error in remove for path: '%s': %w", path, err)
}
return nil
@@ -565,7 +564,7 @@ func (p Patch) remove(doc *container, op Operation) error {
func (p Patch) replace(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(err, "replace operation failed to decode path")
+ return fmt.Errorf("replace operation failed to decode path: %w", err)
}
if path == "" {
@@ -574,7 +573,7 @@ func (p Patch) replace(doc *container, op Operation) error {
if val.which == eRaw {
if !val.tryDoc() {
if !val.tryAry() {
- return errors.Wrapf(err, "replace operation value must be object or array")
+ return fmt.Errorf("replace operation value must be object or array: %w", err)
}
}
}
@@ -585,7 +584,7 @@ func (p Patch) replace(doc *container, op Operation) error {
case eDoc:
*doc = &val.doc
case eRaw:
- return errors.Wrapf(err, "replace operation hit impossible case")
+ return fmt.Errorf("replace operation hit impossible case: %w", err)
}
return nil
@@ -594,17 +593,17 @@ func (p Patch) replace(doc *container, op Operation) error {
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+ return fmt.Errorf("replace operation does not apply: doc is missing path: %s: %w", path, ErrMissing)
}
_, ok := con.get(key)
if ok != nil {
- return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+ return fmt.Errorf("replace operation does not apply: doc is missing key: %s: %w", path, ErrMissing)
}
err = con.set(key, op.value())
if err != nil {
- return errors.Wrapf(err, "error in remove for path: '%s'", path)
+ return fmt.Errorf("error in remove for path: '%s': %w", path, err)
}
return nil
@@ -613,39 +612,39 @@ func (p Patch) replace(doc *container, op Operation) error {
func (p Patch) move(doc *container, op Operation) error {
from, err := op.From()
if err != nil {
- return errors.Wrapf(err, "move operation failed to decode from")
+ return fmt.Errorf("move operation failed to decode from: %w", err)
}
con, key := findObject(doc, from)
if con == nil {
- return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+ return fmt.Errorf("move operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
}
val, err := con.get(key)
if err != nil {
- return errors.Wrapf(err, "error in move for path: '%s'", key)
+ return fmt.Errorf("error in move for path: '%s': %w", key, err)
}
err = con.remove(key)
if err != nil {
- return errors.Wrapf(err, "error in move for path: '%s'", key)
+ return fmt.Errorf("error in move for path: '%s': %w", key, err)
}
path, err := op.Path()
if err != nil {
- return errors.Wrapf(err, "move operation failed to decode path")
+ return fmt.Errorf("move operation failed to decode path: %w", err)
}
con, key = findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+ return fmt.Errorf("move operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
}
err = con.add(key, val)
if err != nil {
- return errors.Wrapf(err, "error in move for path: '%s'", path)
+ return fmt.Errorf("error in move for path: '%s': %w", path, err)
}
return nil
@@ -654,7 +653,7 @@ func (p Patch) move(doc *container, op Operation) error {
func (p Patch) test(doc *container, op Operation) error {
path, err := op.Path()
if err != nil {
- return errors.Wrapf(err, "test operation failed to decode path")
+ return fmt.Errorf("test operation failed to decode path: %w", err)
}
if path == "" {
@@ -673,67 +672,67 @@ func (p Patch) test(doc *container, op Operation) error {
return nil
}
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
}
con, key := findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+ return fmt.Errorf("test operation does not apply: is missing path: %s: %w", path, ErrMissing)
}
val, err := con.get(key)
if err != nil {
- return errors.Wrapf(err, "error in test for path: '%s'", path)
+ return fmt.Errorf("error in test for path: '%s': %w", path, err)
}
if val == nil {
- if op.value().raw == nil {
+ if op.value() == nil || op.value().raw == nil {
return nil
}
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
} else if op.value() == nil {
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
}
if val.equal(op.value()) {
return nil
}
- return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+ return fmt.Errorf("testing value %s failed: %w", path, ErrTestFailed)
}
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
from, err := op.From()
if err != nil {
- return errors.Wrapf(err, "copy operation failed to decode from")
+ return fmt.Errorf("copy operation failed to decode from: %w", err)
}
con, key := findObject(doc, from)
if con == nil {
- return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+ return fmt.Errorf("copy operation does not apply: doc is missing from path: %s: %w", from, ErrMissing)
}
val, err := con.get(key)
if err != nil {
- return errors.Wrapf(err, "error in copy for from: '%s'", from)
+ return fmt.Errorf("error in copy for from: '%s': %w", from, err)
}
path, err := op.Path()
if err != nil {
- return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+ return fmt.Errorf("copy operation failed to decode path: %w", ErrMissing)
}
con, key = findObject(doc, path)
if con == nil {
- return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+ return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s: %w", path, ErrMissing)
}
valCopy, sz, err := deepCopy(val)
if err != nil {
- return errors.Wrapf(err, "error while performing deep copy")
+ return fmt.Errorf("error while performing deep copy: %w", err)
}
(*accumulatedCopySize) += int64(sz)
@@ -743,7 +742,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er
err = con.add(key, valCopy)
if err != nil {
- return errors.Wrapf(err, "error while adding value during copy")
+ return fmt.Errorf("error while adding value during copy: %w", err)
}
return nil
diff --git a/operator/vendor/k8s.io/api/admission/v1/doc.go b/operator/vendor/k8s.io/api/admission/v1/doc.go
index cab65282..c8be5b23 100644
--- a/operator/vendor/k8s.io/api/admission/v1/doc.go
+++ b/operator/vendor/k8s.io/api/admission/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=false
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.admission.v1
+
// +groupName=admission.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/admission/v1/generated.pb.go b/operator/vendor/k8s.io/api/admission/v1/generated.pb.go
index f5c41791..b9fe1402 100644
--- a/operator/vendor/k8s.io/api/admission/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/admission/v1/generated.pb.go
@@ -23,12 +23,10 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -36,172 +34,11 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} }
-func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} }
-func (*AdmissionRequest) ProtoMessage() {}
-func (*AdmissionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_7b47d27831186ccf, []int{0}
-}
-func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AdmissionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdmissionRequest.Merge(m, src)
-}
-func (m *AdmissionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AdmissionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AdmissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo
-
-func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} }
-func (*AdmissionResponse) ProtoMessage() {}
-func (*AdmissionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_7b47d27831186ccf, []int{1}
-}
-func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AdmissionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdmissionResponse.Merge(m, src)
-}
-func (m *AdmissionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AdmissionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AdmissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo
-
-func (m *AdmissionReview) Reset() { *m = AdmissionReview{} }
-func (*AdmissionReview) ProtoMessage() {}
-func (*AdmissionReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_7b47d27831186ccf, []int{2}
-}
-func (m *AdmissionReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AdmissionReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdmissionReview.Merge(m, src)
-}
-func (m *AdmissionReview) XXX_Size() int {
- return m.Size()
-}
-func (m *AdmissionReview) XXX_DiscardUnknown() {
- xxx_messageInfo_AdmissionReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest")
- proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry")
- proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/admission/v1/generated.proto", fileDescriptor_7b47d27831186ccf)
-}
-
-var fileDescriptor_7b47d27831186ccf = []byte{
- // 907 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
- 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xba, 0xf2, 0x61, 0x6d, 0x72, 0x00,
- 0x17, 0xb5, 0xbb, 0x24, 0x82, 0x2a, 0xaa, 0x40, 0x22, 0x4b, 0x2a, 0x14, 0x90, 0x9a, 0x68, 0xda,
- 0x40, 0xc5, 0x01, 0x69, 0x62, 0x4f, 0xed, 0xc1, 0xf6, 0xcc, 0xb2, 0x33, 0xeb, 0xe0, 0x1b, 0x27,
- 0xce, 0x7c, 0x03, 0x8e, 0x7c, 0x06, 0xbe, 0x41, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x2d, 0x72,
- 0x42, 0x33, 0x3b, 0xfb, 0xa7, 0x89, 0x2d, 0x42, 0xc3, 0x29, 0xfb, 0xfe, 0xfc, 0x7e, 0xef, 0xe5,
- 0xf7, 0xf6, 0xbd, 0x35, 0xf8, 0x70, 0xbc, 0x2b, 0x3c, 0xca, 0x7d, 0x1c, 0x52, 0x1f, 0x0f, 0xa6,
- 0x54, 0x08, 0xca, 0x99, 0x3f, 0xdb, 0xf6, 0x87, 0x84, 0x91, 0x08, 0x4b, 0x32, 0xf0, 0xc2, 0x88,
- 0x4b, 0x0e, 0xef, 0x25, 0x89, 0x1e, 0x0e, 0xa9, 0x97, 0x25, 0x7a, 0xb3, 0xed, 0xf6, 0xc3, 0x21,
- 0x95, 0xa3, 0xf8, 0xc4, 0xeb, 0xf3, 0xa9, 0x3f, 0xe4, 0x43, 0xee, 0xeb, 0xfc, 0x93, 0xf8, 0xa5,
- 0xb6, 0xb4, 0xa1, 0x9f, 0x12, 0x9e, 0xf6, 0x83, 0x62, 0xc1, 0x58, 0x8e, 0x08, 0x93, 0xb4, 0x8f,
- 0xe5, 0xea, 0xaa, 0xed, 0x4f, 0xf2, 0xec, 0x29, 0xee, 0x8f, 0x28, 0x23, 0xd1, 0xdc, 0x0f, 0xc7,
- 0x43, 0xe5, 0x10, 0xfe, 0x94, 0x48, 0xbc, 0x0a, 0xe5, 0xaf, 0x43, 0x45, 0x31, 0x93, 0x74, 0x4a,
- 0xae, 0x00, 0x1e, 0xfd, 0x1b, 0x40, 0xf4, 0x47, 0x64, 0x8a, 0x2f, 0xe3, 0xb6, 0x7e, 0xb7, 0x41,
- 0x6b, 0x2f, 0x15, 0x03, 0x91, 0x9f, 0x62, 0x22, 0x24, 0x0c, 0x40, 0x39, 0xa6, 0x03, 0xc7, 0xea,
- 0x5a, 0x3d, 0x3b, 0xf8, 0xf8, 0x6c, 0xd1, 0x29, 0x2d, 0x17, 0x9d, 0xf2, 0xf1, 0xc1, 0xfe, 0xc5,
- 0xa2, 0xf3, 0xfe, 0xba, 0x42, 0x72, 0x1e, 0x12, 0xe1, 0x1d, 0x1f, 0xec, 0x23, 0x05, 0x86, 0x2f,
- 0x40, 0x65, 0x4c, 0xd9, 0xc0, 0xb9, 0xd5, 0xb5, 0x7a, 0x8d, 0x9d, 0x47, 0x5e, 0x2e, 0x7e, 0x06,
- 0xf3, 0xc2, 0xf1, 0x50, 0x39, 0x84, 0xa7, 0x64, 0xf0, 0x66, 0xdb, 0xde, 0x57, 0x11, 0x8f, 0xc3,
- 0x6f, 0x49, 0xa4, 0x9a, 0xf9, 0x86, 0xb2, 0x41, 0xb0, 0x69, 0x8a, 0x57, 0x94, 0x85, 0x34, 0x23,
- 0x1c, 0x81, 0x7a, 0x44, 0x04, 0x8f, 0xa3, 0x3e, 0x71, 0xca, 0x9a, 0xfd, 0xf1, 0x7f, 0x67, 0x47,
- 0x86, 0x21, 0x68, 0x99, 0x0a, 0xf5, 0xd4, 0x83, 0x32, 0x76, 0xf8, 0x29, 0x68, 0x88, 0xf8, 0x24,
- 0x0d, 0x38, 0x15, 0xad, 0xc7, 0x5d, 0x03, 0x68, 0x3c, 0xcb, 0x43, 0xa8, 0x98, 0x07, 0x29, 0x68,
- 0x44, 0x89, 0x92, 0xaa, 0x6b, 0xe7, 0x9d, 0x1b, 0x29, 0xd0, 0x54, 0xa5, 0x50, 0x4e, 0x87, 0x8a,
- 0xdc, 0x70, 0x0e, 0x9a, 0xc6, 0xcc, 0xba, 0xbc, 0x7d, 0x63, 0x49, 0xee, 0x2e, 0x17, 0x9d, 0x26,
- 0x7a, 0x93, 0x16, 0x5d, 0xae, 0x03, 0xbf, 0x06, 0xd0, 0xb8, 0x0a, 0x42, 0x38, 0x4d, 0xad, 0x51,
- 0xdb, 0x68, 0x04, 0xd1, 0x95, 0x0c, 0xb4, 0x02, 0x05, 0xbb, 0xa0, 0xc2, 0xf0, 0x94, 0x38, 0x1b,
- 0x1a, 0x9d, 0x0d, 0xfd, 0x29, 0x9e, 0x12, 0xa4, 0x23, 0xd0, 0x07, 0xb6, 0xfa, 0x2b, 0x42, 0xdc,
- 0x27, 0x4e, 0x55, 0xa7, 0xdd, 0x31, 0x69, 0xf6, 0xd3, 0x34, 0x80, 0xf2, 0x1c, 0xf8, 0x19, 0xb0,
- 0x79, 0xa8, 0x5e, 0x75, 0xca, 0x99, 0x53, 0xd3, 0x00, 0x37, 0x05, 0x1c, 0xa6, 0x81, 0x8b, 0xa2,
- 0x81, 0x72, 0x00, 0x7c, 0x0e, 0xea, 0xb1, 0x20, 0xd1, 0x01, 0x7b, 0xc9, 0x9d, 0xba, 0x16, 0xf4,
- 0x03, 0xaf, 0x78, 0x3e, 0xde, 0x58, 0x7b, 0x25, 0xe4, 0xb1, 0xc9, 0xce, 0xdf, 0xa7, 0xd4, 0x83,
- 0x32, 0x26, 0x78, 0x0c, 0xaa, 0xfc, 0xe4, 0x47, 0xd2, 0x97, 0x8e, 0xad, 0x39, 0x1f, 0xae, 0x1d,
- 0x92, 0xd9, 0x5a, 0x0f, 0xe1, 0xd3, 0x27, 0x3f, 0x4b, 0xc2, 0xd4, 0x7c, 0x82, 0xdb, 0x86, 0xba,
- 0x7a, 0xa8, 0x49, 0x90, 0x21, 0x83, 0x3f, 0x00, 0x9b, 0x4f, 0x06, 0x89, 0xd3, 0x01, 0x6f, 0xc3,
- 0x9c, 0x49, 0x79, 0x98, 0xf2, 0xa0, 0x9c, 0x12, 0x6e, 0x81, 0xea, 0x20, 0x9a, 0xa3, 0x98, 0x39,
- 0x8d, 0xae, 0xd5, 0xab, 0x07, 0x40, 0xf5, 0xb0, 0xaf, 0x3d, 0xc8, 0x44, 0xe0, 0x0b, 0x50, 0xe3,
- 0xa1, 0x12, 0x43, 0x38, 0x9b, 0x6f, 0xd3, 0x41, 0xd3, 0x74, 0x50, 0x3b, 0x4c, 0x58, 0x50, 0x4a,
- 0xb7, 0xf5, 0x47, 0x05, 0xdc, 0x29, 0x5c, 0x28, 0x11, 0x72, 0x26, 0xc8, 0xff, 0x72, 0xa2, 0xee,
- 0x83, 0x1a, 0x9e, 0x4c, 0xf8, 0x29, 0x49, 0xae, 0x54, 0x3d, 0x6f, 0x62, 0x2f, 0x71, 0xa3, 0x34,
- 0x0e, 0x8f, 0x40, 0x55, 0x48, 0x2c, 0x63, 0x61, 0x2e, 0xce, 0x83, 0xeb, 0xad, 0xd7, 0x33, 0x8d,
- 0x49, 0x04, 0x43, 0x44, 0xc4, 0x13, 0x89, 0x0c, 0x0f, 0xec, 0x80, 0x8d, 0x10, 0xcb, 0xfe, 0x48,
- 0x5f, 0x95, 0xcd, 0xc0, 0x5e, 0x2e, 0x3a, 0x1b, 0x47, 0xca, 0x81, 0x12, 0x3f, 0xdc, 0x05, 0xb6,
- 0x7e, 0x78, 0x3e, 0x0f, 0xd3, 0xc5, 0x68, 0xab, 0x11, 0x1d, 0xa5, 0xce, 0x8b, 0xa2, 0x81, 0xf2,
- 0x64, 0xf8, 0xab, 0x05, 0x5a, 0x38, 0x1e, 0x50, 0xb9, 0xc7, 0x18, 0x97, 0x38, 0x99, 0x4a, 0xb5,
- 0x5b, 0xee, 0x35, 0x76, 0xbe, 0xf0, 0xd6, 0x7c, 0x04, 0xbd, 0x2b, 0x12, 0x7b, 0x7b, 0x97, 0x28,
- 0x9e, 0x30, 0x19, 0xcd, 0x03, 0xc7, 0x68, 0xd4, 0xba, 0x1c, 0x46, 0x57, 0x6a, 0xc2, 0x1e, 0xa8,
- 0x9f, 0xe2, 0x88, 0x51, 0x36, 0x14, 0x4e, 0xad, 0x5b, 0x56, 0xab, 0xad, 0x36, 0xe3, 0x3b, 0xe3,
- 0x43, 0x59, 0xb4, 0xfd, 0x25, 0x78, 0x6f, 0x65, 0x39, 0xd8, 0x02, 0xe5, 0x31, 0x99, 0x27, 0x73,
- 0x46, 0xea, 0x11, 0xbe, 0x0b, 0x36, 0x66, 0x78, 0x12, 0x13, 0x3d, 0x33, 0x1b, 0x25, 0xc6, 0xe3,
- 0x5b, 0xbb, 0xd6, 0xd6, 0x9f, 0x16, 0x68, 0x16, 0xfe, 0x8d, 0x19, 0x25, 0xa7, 0xf0, 0x08, 0xd4,
- 0xcc, 0xbd, 0xd1, 0x1c, 0x8d, 0x9d, 0xfb, 0xd7, 0x51, 0x40, 0x03, 0x82, 0x86, 0x7a, 0x15, 0xd2,
- 0x3b, 0x98, 0xd2, 0xa8, 0xd3, 0x10, 0x19, 0x89, 0xcc, 0xc7, 0xed, 0xa3, 0xeb, 0x8b, 0x9a, 0x08,
- 0x90, 0x5a, 0x28, 0x63, 0x0a, 0x3e, 0x3f, 0x3b, 0x77, 0x4b, 0xaf, 0xce, 0xdd, 0xd2, 0xeb, 0x73,
- 0xb7, 0xf4, 0xcb, 0xd2, 0xb5, 0xce, 0x96, 0xae, 0xf5, 0x6a, 0xe9, 0x5a, 0xaf, 0x97, 0xae, 0xf5,
- 0xd7, 0xd2, 0xb5, 0x7e, 0xfb, 0xdb, 0x2d, 0x7d, 0x7f, 0x6f, 0xcd, 0x6f, 0x9d, 0x7f, 0x02, 0x00,
- 0x00, 0xff, 0xff, 0x5c, 0x49, 0x23, 0x22, 0x05, 0x09, 0x00, 0x00,
-}
+func (m *AdmissionReview) Reset() { *m = AdmissionReview{} }
func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -384,7 +221,7 @@ func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.AuditAnnotations {
keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ sort.Strings(keysForAuditAnnotations)
for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
baseI := i
@@ -640,7 +477,7 @@ func (this *AdmissionResponse) String() string {
for k := range this.AuditAnnotations {
keysForAuditAnnotations = append(keysForAuditAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ sort.Strings(keysForAuditAnnotations)
mapStringForAuditAnnotations := "map[string]string{"
for _, k := range keysForAuditAnnotations {
mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
diff --git a/operator/vendor/k8s.io/api/admission/v1/generated.proto b/operator/vendor/k8s.io/api/admission/v1/generated.proto
index 9648aa58..cd5c88ba 100644
--- a/operator/vendor/k8s.io/api/admission/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/admission/v1/generated.proto
@@ -31,23 +31,23 @@ option go_package = "k8s.io/api/admission/v1";
// AdmissionRequest describes the admission.Attributes for the admission request.
message AdmissionRequest {
- // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+ // uid is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
optional string uid = 1;
- // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+ // kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
- // Resource is the fully-qualified resource being requested (for example, v1.pods)
+ // resource is the fully-qualified resource being requested (for example, v1.pods)
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
- // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+ // subResource is the subresource being requested, if any (for example, "status" or "scale")
// +optional
optional string subResource = 4;
- // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+ // requestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -60,7 +60,7 @@ message AdmissionRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
- // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+ // requestResource is the fully-qualified resource of the original API request (for example, v1.pods).
// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -73,42 +73,42 @@ message AdmissionRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
- // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+ // requestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
// See documentation for the "matchPolicy" field in the webhook configuration type.
// +optional
optional string requestSubResource = 15;
- // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
+ // name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this field will contain an empty string.
// +optional
optional string name = 5;
- // Namespace is the namespace associated with the request (if any).
+ // namespace is the namespace associated with the request (if any).
// +optional
optional string namespace = 6;
- // Operation is the operation being performed. This may be different than the operation
+ // operation is the operation being performed. This may be different than the operation
// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
optional string operation = 7;
- // UserInfo is information about the requesting user
+ // userInfo is information about the requesting user
optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8;
- // Object is the object from the incoming request.
+ // object is the object from the incoming request.
// +optional
optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
- // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+ // oldObject is the existing object. Only populated for DELETE and UPDATE requests.
// +optional
optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
- // DryRun indicates that modifications will definitely not be persisted for this request.
+ // dryRun indicates that modifications will definitely not be persisted for this request.
// Defaults to false.
// +optional
optional bool dryRun = 11;
- // Options is the operation option structure of the operation being performed.
+ // options is the operation option structure of the operation being performed.
// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
// different than the options the caller provided. e.g. for a patch request the performed
// Operation might be a CREATE, in which case the Options will a
@@ -119,27 +119,27 @@ message AdmissionRequest {
// AdmissionResponse describes an admission response.
message AdmissionResponse {
- // UID is an identifier for the individual request/response.
+ // uid is an identifier for the individual request/response.
// This must be copied over from the corresponding AdmissionRequest.
optional string uid = 1;
- // Allowed indicates whether or not the admission request was permitted.
+ // allowed indicates whether or not the admission request was permitted.
optional bool allowed = 2;
- // Result contains extra details into why an admission request was denied.
+ // status is the result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
- // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+ // patch is the patch body. Currently we only support "JSONPatch" which implements RFC 6902.
// +optional
optional bytes patch = 4;
- // The type of Patch. Currently we only allow "JSONPatch".
+ // patchType is the type of Patch. Currently we only allow "JSONPatch".
// +optional
optional string patchType = 5;
- // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+ // auditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
// the admission webhook to add additional context to the audit log for this request.
@@ -151,16 +151,17 @@ message AdmissionResponse {
// Limit warnings to 120 characters if possible.
// Warnings over 256 characters and large numbers of warnings may be truncated.
// +optional
+ // +listType=atomic
repeated string warnings = 7;
}
// AdmissionReview describes an admission review request/response.
message AdmissionReview {
- // Request describes the attributes for the admission request.
+ // request describes the attributes for the admission request.
// +optional
optional AdmissionRequest request = 1;
- // Response describes the attributes for the admission response.
+ // response describes the attributes for the admission response.
// +optional
optional AdmissionResponse response = 2;
}
diff --git a/operator/vendor/k8s.io/api/admission/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/admission/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..4e1ec547
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admission/v1/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*AdmissionRequest) ProtoMessage() {}
+
+func (*AdmissionResponse) ProtoMessage() {}
+
+func (*AdmissionReview) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/admission/v1/types.go b/operator/vendor/k8s.io/api/admission/v1/types.go
index 2def92da..395672c3 100644
--- a/operator/vendor/k8s.io/api/admission/v1/types.go
+++ b/operator/vendor/k8s.io/api/admission/v1/types.go
@@ -29,30 +29,30 @@ import (
// AdmissionReview describes an admission review request/response.
type AdmissionReview struct {
metav1.TypeMeta `json:",inline"`
- // Request describes the attributes for the admission request.
+ // request describes the attributes for the admission request.
// +optional
Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"`
- // Response describes the attributes for the admission response.
+ // response describes the attributes for the admission response.
// +optional
Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"`
}
// AdmissionRequest describes the admission.Attributes for the admission request.
type AdmissionRequest struct {
- // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+ // uid is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
- // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+ // kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"`
- // Resource is the fully-qualified resource being requested (for example, v1.pods)
+ // resource is the fully-qualified resource being requested (for example, v1.pods)
Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"`
- // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+ // subResource is the subresource being requested, if any (for example, "status" or "scale")
// +optional
SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"`
- // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+ // requestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -64,7 +64,7 @@ type AdmissionRequest struct {
// See documentation for the "matchPolicy" field in the webhook configuration type for more details.
// +optional
RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"`
- // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+ // requestResource is the fully-qualified resource of the original API request (for example, v1.pods).
// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -76,35 +76,35 @@ type AdmissionRequest struct {
// See documentation for the "matchPolicy" field in the webhook configuration type.
// +optional
RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"`
- // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+ // requestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
// See documentation for the "matchPolicy" field in the webhook configuration type.
// +optional
RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"`
- // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
+ // name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this field will contain an empty string.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
- // Namespace is the namespace associated with the request (if any).
+ // namespace is the namespace associated with the request (if any).
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
- // Operation is the operation being performed. This may be different than the operation
+ // operation is the operation being performed. This may be different than the operation
// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"`
- // UserInfo is information about the requesting user
+ // userInfo is information about the requesting user
UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"`
- // Object is the object from the incoming request.
+ // object is the object from the incoming request.
// +optional
Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"`
- // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+ // oldObject is the existing object. Only populated for DELETE and UPDATE requests.
// +optional
OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"`
- // DryRun indicates that modifications will definitely not be persisted for this request.
+ // dryRun indicates that modifications will definitely not be persisted for this request.
// Defaults to false.
// +optional
DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"`
- // Options is the operation option structure of the operation being performed.
+ // options is the operation option structure of the operation being performed.
// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
// different than the options the caller provided. e.g. for a patch request the performed
// Operation might be a CREATE, in which case the Options will a
@@ -115,27 +115,27 @@ type AdmissionRequest struct {
// AdmissionResponse describes an admission response.
type AdmissionResponse struct {
- // UID is an identifier for the individual request/response.
+ // uid is an identifier for the individual request/response.
// This must be copied over from the corresponding AdmissionRequest.
UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
- // Allowed indicates whether or not the admission request was permitted.
+ // allowed indicates whether or not the admission request was permitted.
Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
- // Result contains extra details into why an admission request was denied.
+ // status is the result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
- // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+ // patch is the patch body. Currently we only support "JSONPatch" which implements RFC 6902.
// +optional
Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"`
- // The type of Patch. Currently we only allow "JSONPatch".
+ // patchType is the type of Patch. Currently we only allow "JSONPatch".
// +optional
PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"`
- // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+ // auditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
// the admission webhook to add additional context to the audit log for this request.
@@ -147,6 +147,7 @@ type AdmissionResponse struct {
// Limit warnings to 120 characters if possible.
// Warnings over 256 characters and large numbers of warnings may be truncated.
// +optional
+ // +listType=atomic
Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"`
}
diff --git a/operator/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
index 1395a7e1..0fe90aca 100644
--- a/operator/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
@@ -29,21 +29,21 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_AdmissionRequest = map[string]string{
"": "AdmissionRequest describes the admission.Attributes for the admission request.",
- "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
- "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
- "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)",
- "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
- "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
- "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
- "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
- "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.",
- "namespace": "Namespace is the namespace associated with the request (if any).",
- "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
- "userInfo": "UserInfo is information about the requesting user",
- "object": "Object is the object from the incoming request.",
- "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.",
- "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
- "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
+ "uid": "uid is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
+ "kind": "kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
+ "resource": "resource is the fully-qualified resource being requested (for example, v1.pods)",
+ "subResource": "subResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
+ "requestKind": "requestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
+ "requestResource": "requestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
+ "requestSubResource": "requestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
+ "name": "name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.",
+ "namespace": "namespace is the namespace associated with the request (if any).",
+ "operation": "operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
+ "userInfo": "userInfo is information about the requesting user",
+ "object": "object is the object from the incoming request.",
+ "oldObject": "oldObject is the existing object. Only populated for DELETE and UPDATE requests.",
+ "dryRun": "dryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
+ "options": "options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
}
func (AdmissionRequest) SwaggerDoc() map[string]string {
@@ -52,12 +52,12 @@ func (AdmissionRequest) SwaggerDoc() map[string]string {
var map_AdmissionResponse = map[string]string{
"": "AdmissionResponse describes an admission response.",
- "uid": "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.",
- "allowed": "Allowed indicates whether or not the admission request was permitted.",
- "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
- "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
- "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".",
- "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
+ "uid": "uid is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.",
+ "allowed": "allowed indicates whether or not the admission request was permitted.",
+ "status": "status is the result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
+ "patch": "patch is the patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
+ "patchType": "patchType is the type of Patch. Currently we only allow \"JSONPatch\".",
+ "auditAnnotations": "auditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
"warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.",
}
@@ -67,8 +67,8 @@ func (AdmissionResponse) SwaggerDoc() map[string]string {
var map_AdmissionReview = map[string]string{
"": "AdmissionReview describes an admission review request/response.",
- "request": "Request describes the attributes for the admission request.",
- "response": "Response describes the attributes for the admission response.",
+ "request": "request describes the attributes for the admission request.",
+ "response": "response describes the attributes for the admission response.",
}
func (AdmissionReview) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/admission/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/admission/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..b36acc05
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admission/v1/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AdmissionRequest) OpenAPIModelName() string {
+ return "io.k8s.api.admission.v1.AdmissionRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AdmissionResponse) OpenAPIModelName() string {
+ return "io.k8s.api.admission.v1.AdmissionResponse"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AdmissionReview) OpenAPIModelName() string {
+ return "io.k8s.api.admission.v1.AdmissionReview"
+}
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/doc.go b/operator/vendor/k8s.io/api/admission/v1beta1/doc.go
index 44749568..db856da1 100644
--- a/operator/vendor/k8s.io/api/admission/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=false
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.admission.v1beta1
// +groupName=admission.k8s.io
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
index 22147cbe..e8bb2c06 100644
--- a/operator/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
@@ -23,12 +23,10 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -36,172 +34,11 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} }
-func (m *AdmissionRequest) Reset() { *m = AdmissionRequest{} }
-func (*AdmissionRequest) ProtoMessage() {}
-func (*AdmissionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_d8f147b43c61e73e, []int{0}
-}
-func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AdmissionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdmissionRequest.Merge(m, src)
-}
-func (m *AdmissionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AdmissionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AdmissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo
-
-func (m *AdmissionResponse) Reset() { *m = AdmissionResponse{} }
-func (*AdmissionResponse) ProtoMessage() {}
-func (*AdmissionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_d8f147b43c61e73e, []int{1}
-}
-func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AdmissionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdmissionResponse.Merge(m, src)
-}
-func (m *AdmissionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AdmissionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AdmissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo
-
-func (m *AdmissionReview) Reset() { *m = AdmissionReview{} }
-func (*AdmissionReview) ProtoMessage() {}
-func (*AdmissionReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_d8f147b43c61e73e, []int{2}
-}
-func (m *AdmissionReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AdmissionReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AdmissionReview.Merge(m, src)
-}
-func (m *AdmissionReview) XXX_Size() int {
- return m.Size()
-}
-func (m *AdmissionReview) XXX_DiscardUnknown() {
- xxx_messageInfo_AdmissionReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest")
- proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry")
- proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_d8f147b43c61e73e)
-}
-
-var fileDescriptor_d8f147b43c61e73e = []byte{
- // 911 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
- 0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xaa, 0xb5, 0xc9, 0x01,
- 0x19, 0xa9, 0x9d, 0x25, 0x11, 0x54, 0x51, 0xc5, 0x25, 0x4b, 0x22, 0x14, 0x90, 0x9a, 0x68, 0x5a,
- 0x43, 0xe1, 0x80, 0x34, 0xb6, 0xa7, 0xf6, 0x60, 0x7b, 0x66, 0xd9, 0x99, 0x4d, 0xf0, 0x8d, 0x3b,
- 0x17, 0xbe, 0x01, 0x5f, 0x80, 0x6f, 0xc1, 0x25, 0xc7, 0x1e, 0x7b, 0xb2, 0x88, 0xf9, 0x16, 0x39,
- 0xa1, 0x99, 0x9d, 0xf5, 0x3a, 0x4e, 0x52, 0xfa, 0xef, 0x94, 0x7d, 0x7f, 0x7e, 0xbf, 0xf7, 0xf2,
- 0x7b, 0xfb, 0xde, 0x1a, 0xdc, 0x1f, 0xef, 0x4a, 0xc4, 0x44, 0x40, 0x22, 0x16, 0x90, 0xc1, 0x94,
- 0x49, 0xc9, 0x04, 0x0f, 0x4e, 0xb6, 0x7b, 0x54, 0x91, 0xed, 0x60, 0x48, 0x39, 0x8d, 0x89, 0xa2,
- 0x03, 0x14, 0xc5, 0x42, 0x09, 0x78, 0x2f, 0xcd, 0x46, 0x24, 0x62, 0x68, 0x99, 0x8d, 0x6c, 0x76,
- 0xf3, 0xc1, 0x90, 0xa9, 0x51, 0xd2, 0x43, 0x7d, 0x31, 0x0d, 0x86, 0x62, 0x28, 0x02, 0x03, 0xea,
- 0x25, 0xcf, 0x8d, 0x65, 0x0c, 0xf3, 0x94, 0x92, 0x35, 0x2f, 0x95, 0x4e, 0xd4, 0x88, 0x72, 0xc5,
- 0xfa, 0x44, 0xa5, 0xf5, 0xd7, 0x4b, 0x37, 0x3f, 0xcf, 0xb3, 0xa7, 0xa4, 0x3f, 0x62, 0x9c, 0xc6,
- 0xb3, 0x20, 0x1a, 0x0f, 0xb5, 0x43, 0x06, 0x53, 0xaa, 0xc8, 0x75, 0xa8, 0xe0, 0x26, 0x54, 0x9c,
- 0x70, 0xc5, 0xa6, 0xf4, 0x0a, 0xe0, 0xe1, 0xff, 0x01, 0x64, 0x7f, 0x44, 0xa7, 0x64, 0x1d, 0xb7,
- 0xf5, 0xa7, 0x0b, 0x1a, 0x7b, 0x99, 0x22, 0x98, 0xfe, 0x92, 0x50, 0xa9, 0x60, 0x08, 0x8a, 0x09,
- 0x1b, 0x78, 0x4e, 0xdb, 0xe9, 0xb8, 0xe1, 0x67, 0x67, 0xf3, 0x56, 0x61, 0x31, 0x6f, 0x15, 0xbb,
- 0x87, 0xfb, 0x17, 0xf3, 0xd6, 0xc7, 0x37, 0x15, 0x52, 0xb3, 0x88, 0x4a, 0xd4, 0x3d, 0xdc, 0xc7,
- 0x1a, 0x0c, 0x9f, 0x81, 0xd2, 0x98, 0xf1, 0x81, 0x77, 0xab, 0xed, 0x74, 0x6a, 0x3b, 0x0f, 0x51,
- 0x3e, 0x81, 0x25, 0x0c, 0x45, 0xe3, 0xa1, 0x76, 0x48, 0xa4, 0x65, 0x40, 0x27, 0xdb, 0xe8, 0xeb,
- 0x58, 0x24, 0xd1, 0x77, 0x34, 0xd6, 0xcd, 0x7c, 0xcb, 0xf8, 0x20, 0xdc, 0xb4, 0xc5, 0x4b, 0xda,
- 0xc2, 0x86, 0x11, 0x8e, 0x40, 0x35, 0xa6, 0x52, 0x24, 0x71, 0x9f, 0x7a, 0x45, 0xc3, 0xfe, 0xe8,
- 0xcd, 0xd9, 0xb1, 0x65, 0x08, 0x1b, 0xb6, 0x42, 0x35, 0xf3, 0xe0, 0x25, 0x3b, 0xfc, 0x02, 0xd4,
- 0x64, 0xd2, 0xcb, 0x02, 0x5e, 0xc9, 0xe8, 0x71, 0xd7, 0x02, 0x6a, 0x4f, 0xf2, 0x10, 0x5e, 0xcd,
- 0x83, 0x0c, 0xd4, 0xe2, 0x54, 0x49, 0xdd, 0xb5, 0xf7, 0xc1, 0x3b, 0x29, 0x50, 0xd7, 0xa5, 0x70,
- 0x4e, 0x87, 0x57, 0xb9, 0xe1, 0x0c, 0xd4, 0xad, 0xb9, 0xec, 0xf2, 0xf6, 0x3b, 0x4b, 0x72, 0x77,
- 0x31, 0x6f, 0xd5, 0xf1, 0x65, 0x5a, 0xbc, 0x5e, 0x07, 0x7e, 0x03, 0xa0, 0x75, 0xad, 0x08, 0xe1,
- 0xd5, 0x8d, 0x46, 0x4d, 0xab, 0x11, 0xc4, 0x57, 0x32, 0xf0, 0x35, 0x28, 0xd8, 0x06, 0x25, 0x4e,
- 0xa6, 0xd4, 0xdb, 0x30, 0xe8, 0xe5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x26, 0x02, 0x03, 0xe0, 0xea,
- 0xbf, 0x32, 0x22, 0x7d, 0xea, 0x95, 0x4d, 0xda, 0x1d, 0x9b, 0xe6, 0x3e, 0xce, 0x02, 0x38, 0xcf,
- 0x81, 0x5f, 0x02, 0x57, 0x44, 0xfa, 0x55, 0x67, 0x82, 0x7b, 0x15, 0x03, 0xf0, 0x33, 0xc0, 0x51,
- 0x16, 0xb8, 0x58, 0x35, 0x70, 0x0e, 0x80, 0x4f, 0x41, 0x35, 0x91, 0x34, 0x3e, 0xe4, 0xcf, 0x85,
- 0x57, 0x35, 0x82, 0x7e, 0x82, 0x56, 0x6f, 0xc8, 0xa5, 0xb5, 0xd7, 0x42, 0x76, 0x6d, 0x76, 0xfe,
- 0x3e, 0x65, 0x1e, 0xbc, 0x64, 0x82, 0x5d, 0x50, 0x16, 0xbd, 0x9f, 0x69, 0x5f, 0x79, 0xae, 0xe1,
- 0x7c, 0x70, 0xe3, 0x90, 0xec, 0xd6, 0x22, 0x4c, 0x4e, 0x0f, 0x7e, 0x55, 0x94, 0xeb, 0xf9, 0x84,
- 0xb7, 0x2d, 0x75, 0xf9, 0xc8, 0x90, 0x60, 0x4b, 0x06, 0x7f, 0x02, 0xae, 0x98, 0x0c, 0x52, 0xa7,
- 0x07, 0xde, 0x86, 0x79, 0x29, 0xe5, 0x51, 0xc6, 0x83, 0x73, 0x4a, 0xb8, 0x05, 0xca, 0x83, 0x78,
- 0x86, 0x13, 0xee, 0xd5, 0xda, 0x4e, 0xa7, 0x1a, 0x02, 0xdd, 0xc3, 0xbe, 0xf1, 0x60, 0x1b, 0x81,
- 0xcf, 0x40, 0x45, 0x44, 0x5a, 0x0c, 0xe9, 0x6d, 0xbe, 0x4d, 0x07, 0x75, 0xdb, 0x41, 0xe5, 0x28,
- 0x65, 0xc1, 0x19, 0xdd, 0xd6, 0x5f, 0x25, 0x70, 0x67, 0xe5, 0x42, 0xc9, 0x48, 0x70, 0x49, 0xdf,
- 0xcb, 0x89, 0xfa, 0x14, 0x54, 0xc8, 0x64, 0x22, 0x4e, 0x69, 0x7a, 0xa5, 0xaa, 0x79, 0x13, 0x7b,
- 0xa9, 0x1b, 0x67, 0x71, 0x78, 0x0c, 0xca, 0x52, 0x11, 0x95, 0x48, 0x7b, 0x71, 0xee, 0xbf, 0xde,
- 0x7a, 0x3d, 0x31, 0x98, 0x54, 0x30, 0x4c, 0x65, 0x32, 0x51, 0xd8, 0xf2, 0xc0, 0x16, 0xd8, 0x88,
- 0x88, 0xea, 0x8f, 0xcc, 0x55, 0xd9, 0x0c, 0xdd, 0xc5, 0xbc, 0xb5, 0x71, 0xac, 0x1d, 0x38, 0xf5,
- 0xc3, 0x5d, 0xe0, 0x9a, 0x87, 0xa7, 0xb3, 0x28, 0x5b, 0x8c, 0xa6, 0x1e, 0xd1, 0x71, 0xe6, 0xbc,
- 0x58, 0x35, 0x70, 0x9e, 0x0c, 0x7f, 0x77, 0x40, 0x83, 0x24, 0x03, 0xa6, 0xf6, 0x38, 0x17, 0x8a,
- 0xa4, 0x53, 0x29, 0xb7, 0x8b, 0x9d, 0xda, 0xce, 0x01, 0x7a, 0xd5, 0x97, 0x10, 0x5d, 0xd1, 0x19,
- 0xed, 0xad, 0xf1, 0x1c, 0x70, 0x15, 0xcf, 0x42, 0xcf, 0x0a, 0xd5, 0x58, 0x0f, 0xe3, 0x2b, 0x85,
- 0x61, 0x07, 0x54, 0x4f, 0x49, 0xcc, 0x19, 0x1f, 0x4a, 0xaf, 0xd2, 0x2e, 0xea, 0xfd, 0xd6, 0xeb,
- 0xf1, 0xbd, 0xf5, 0xe1, 0x65, 0xb4, 0xf9, 0x15, 0xf8, 0xe8, 0xda, 0x72, 0xb0, 0x01, 0x8a, 0x63,
- 0x3a, 0x4b, 0x87, 0x8d, 0xf5, 0x23, 0xfc, 0x10, 0x6c, 0x9c, 0x90, 0x49, 0x42, 0xcd, 0xe0, 0x5c,
- 0x9c, 0x1a, 0x8f, 0x6e, 0xed, 0x3a, 0x5b, 0x7f, 0x3b, 0xa0, 0xbe, 0xf2, 0x6f, 0x9c, 0x30, 0x7a,
- 0x0a, 0xbb, 0xa0, 0x62, 0x8f, 0x8e, 0xe1, 0xa8, 0xed, 0xa0, 0xd7, 0x96, 0xc1, 0xa0, 0xc2, 0x9a,
- 0x7e, 0x29, 0xb2, 0x8b, 0x98, 0x71, 0xc1, 0x1f, 0xcc, 0x87, 0xc8, 0xe8, 0x64, 0x3f, 0x73, 0xc1,
- 0x1b, 0xca, 0x9b, 0x4a, 0x91, 0x59, 0x78, 0x49, 0x17, 0x86, 0x67, 0xe7, 0x7e, 0xe1, 0xc5, 0xb9,
- 0x5f, 0x78, 0x79, 0xee, 0x17, 0x7e, 0x5b, 0xf8, 0xce, 0xd9, 0xc2, 0x77, 0x5e, 0x2c, 0x7c, 0xe7,
- 0xe5, 0xc2, 0x77, 0xfe, 0x59, 0xf8, 0xce, 0x1f, 0xff, 0xfa, 0x85, 0x1f, 0xef, 0xbd, 0xea, 0x47,
- 0xd0, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0x6e, 0x31, 0x41, 0x23, 0x09, 0x00, 0x00,
-}
+func (m *AdmissionReview) Reset() { *m = AdmissionReview{} }
func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -384,7 +221,7 @@ func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.AuditAnnotations {
keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ sort.Strings(keysForAuditAnnotations)
for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
baseI := i
@@ -640,7 +477,7 @@ func (this *AdmissionResponse) String() string {
for k := range this.AuditAnnotations {
keysForAuditAnnotations = append(keysForAuditAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ sort.Strings(keysForAuditAnnotations)
mapStringForAuditAnnotations := "map[string]string{"
for _, k := range keysForAuditAnnotations {
mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/generated.proto b/operator/vendor/k8s.io/api/admission/v1beta1/generated.proto
index d27c05b7..5af23499 100644
--- a/operator/vendor/k8s.io/api/admission/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/generated.proto
@@ -31,23 +31,23 @@ option go_package = "k8s.io/api/admission/v1beta1";
// AdmissionRequest describes the admission.Attributes for the admission request.
message AdmissionRequest {
- // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+ // uid is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
optional string uid = 1;
- // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+ // kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
- // Resource is the fully-qualified resource being requested (for example, v1.pods)
+ // resource is the fully-qualified resource being requested (for example, v1.pods)
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
- // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+ // subResource is the subresource being requested, if any (for example, "status" or "scale")
// +optional
optional string subResource = 4;
- // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+ // requestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -60,7 +60,7 @@ message AdmissionRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
- // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+ // requestResource is the fully-qualified resource of the original API request (for example, v1.pods).
// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -73,42 +73,42 @@ message AdmissionRequest {
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
- // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+ // requestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
// See documentation for the "matchPolicy" field in the webhook configuration type.
// +optional
optional string requestSubResource = 15;
- // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
+ // name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this field will contain an empty string.
// +optional
optional string name = 5;
- // Namespace is the namespace associated with the request (if any).
+ // namespace is the namespace associated with the request (if any).
// +optional
optional string namespace = 6;
- // Operation is the operation being performed. This may be different than the operation
+ // operation is the operation being performed. This may be different than the operation
// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
optional string operation = 7;
- // UserInfo is information about the requesting user
+ // userInfo is information about the requesting user
optional .k8s.io.api.authentication.v1.UserInfo userInfo = 8;
- // Object is the object from the incoming request.
+ // object is the object from the incoming request.
// +optional
optional .k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
- // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+ // oldObject is the existing object. Only populated for DELETE and UPDATE requests.
// +optional
optional .k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
- // DryRun indicates that modifications will definitely not be persisted for this request.
+ // dryRun indicates that modifications will definitely not be persisted for this request.
// Defaults to false.
// +optional
optional bool dryRun = 11;
- // Options is the operation option structure of the operation being performed.
+ // options is the operation option structure of the operation being performed.
// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
// different than the options the caller provided. e.g. for a patch request the performed
// Operation might be a CREATE, in which case the Options will a
@@ -119,27 +119,27 @@ message AdmissionRequest {
// AdmissionResponse describes an admission response.
message AdmissionResponse {
- // UID is an identifier for the individual request/response.
+ // uid is an identifier for the individual request/response.
// This should be copied over from the corresponding AdmissionRequest.
optional string uid = 1;
- // Allowed indicates whether or not the admission request was permitted.
+ // allowed indicates whether or not the admission request was permitted.
optional bool allowed = 2;
- // Result contains extra details into why an admission request was denied.
+ // status is the result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
- // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+ // patch is the patch body. Currently we only support "JSONPatch" which implements RFC 6902.
// +optional
optional bytes patch = 4;
- // The type of Patch. Currently we only allow "JSONPatch".
+ // patchType is the type of Patch. Currently we only allow "JSONPatch".
// +optional
optional string patchType = 5;
- // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+ // auditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
// the admission webhook to add additional context to the audit log for this request.
@@ -151,16 +151,17 @@ message AdmissionResponse {
// Limit warnings to 120 characters if possible.
// Warnings over 256 characters and large numbers of warnings may be truncated.
// +optional
+ // +listType=atomic
repeated string warnings = 7;
}
// AdmissionReview describes an admission review request/response.
message AdmissionReview {
- // Request describes the attributes for the admission request.
+ // request describes the attributes for the admission request.
// +optional
optional AdmissionRequest request = 1;
- // Response describes the attributes for the admission response.
+ // response describes the attributes for the admission response.
// +optional
optional AdmissionResponse response = 2;
}
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/admission/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..95c70229
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*AdmissionRequest) ProtoMessage() {}
+
+func (*AdmissionResponse) ProtoMessage() {}
+
+func (*AdmissionReview) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/types.go b/operator/vendor/k8s.io/api/admission/v1beta1/types.go
index 00c619d9..81941eb3 100644
--- a/operator/vendor/k8s.io/api/admission/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/types.go
@@ -33,30 +33,30 @@ import (
// AdmissionReview describes an admission review request/response.
type AdmissionReview struct {
metav1.TypeMeta `json:",inline"`
- // Request describes the attributes for the admission request.
+ // request describes the attributes for the admission request.
// +optional
Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"`
- // Response describes the attributes for the admission response.
+ // response describes the attributes for the admission response.
// +optional
Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"`
}
// AdmissionRequest describes the admission.Attributes for the admission request.
type AdmissionRequest struct {
- // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+ // uid is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
- // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+ // kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"`
- // Resource is the fully-qualified resource being requested (for example, v1.pods)
+ // resource is the fully-qualified resource being requested (for example, v1.pods)
Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"`
- // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+ // subResource is the subresource being requested, if any (for example, "status" or "scale")
// +optional
SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"`
- // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+ // requestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -68,7 +68,7 @@ type AdmissionRequest struct {
// See documentation for the "matchPolicy" field in the webhook configuration type for more details.
// +optional
RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"`
- // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+ // requestResource is the fully-qualified resource of the original API request (for example, v1.pods).
// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
@@ -80,35 +80,35 @@ type AdmissionRequest struct {
// See documentation for the "matchPolicy" field in the webhook configuration type.
// +optional
RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"`
- // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+ // requestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
// See documentation for the "matchPolicy" field in the webhook configuration type.
// +optional
RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"`
- // Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
+ // name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this field will contain an empty string.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
- // Namespace is the namespace associated with the request (if any).
+ // namespace is the namespace associated with the request (if any).
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
- // Operation is the operation being performed. This may be different than the operation
+ // operation is the operation being performed. This may be different than the operation
// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"`
- // UserInfo is information about the requesting user
+ // userInfo is information about the requesting user
UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"`
- // Object is the object from the incoming request.
+ // object is the object from the incoming request.
// +optional
Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"`
- // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+ // oldObject is the existing object. Only populated for DELETE and UPDATE requests.
// +optional
OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"`
- // DryRun indicates that modifications will definitely not be persisted for this request.
+ // dryRun indicates that modifications will definitely not be persisted for this request.
// Defaults to false.
// +optional
DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"`
- // Options is the operation option structure of the operation being performed.
+ // options is the operation option structure of the operation being performed.
// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
// different than the options the caller provided. e.g. for a patch request the performed
// Operation might be a CREATE, in which case the Options will a
@@ -119,27 +119,27 @@ type AdmissionRequest struct {
// AdmissionResponse describes an admission response.
type AdmissionResponse struct {
- // UID is an identifier for the individual request/response.
+ // uid is an identifier for the individual request/response.
// This should be copied over from the corresponding AdmissionRequest.
UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
- // Allowed indicates whether or not the admission request was permitted.
+ // allowed indicates whether or not the admission request was permitted.
Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
- // Result contains extra details into why an admission request was denied.
+ // status is the result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
- // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+ // patch is the patch body. Currently we only support "JSONPatch" which implements RFC 6902.
// +optional
Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"`
- // The type of Patch. Currently we only allow "JSONPatch".
+ // patchType is the type of Patch. Currently we only allow "JSONPatch".
// +optional
PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"`
- // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+ // auditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
// the admission webhook to add additional context to the audit log for this request.
@@ -151,6 +151,7 @@ type AdmissionResponse struct {
// Limit warnings to 120 characters if possible.
// Warnings over 256 characters and large numbers of warnings may be truncated.
// +optional
+ // +listType=atomic
Warnings []string `json:"warnings,omitempty" protobuf:"bytes,7,rep,name=warnings"`
}
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
index 82598ed5..25039cf2 100644
--- a/operator/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
@@ -29,21 +29,21 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_AdmissionRequest = map[string]string{
"": "AdmissionRequest describes the admission.Attributes for the admission request.",
- "uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
- "kind": "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
- "resource": "Resource is the fully-qualified resource being requested (for example, v1.pods)",
- "subResource": "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
- "requestKind": "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
- "requestResource": "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
- "requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
- "name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.",
- "namespace": "Namespace is the namespace associated with the request (if any).",
- "operation": "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
- "userInfo": "UserInfo is information about the requesting user",
- "object": "Object is the object from the incoming request.",
- "oldObject": "OldObject is the existing object. Only populated for DELETE and UPDATE requests.",
- "dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
- "options": "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
+ "uid": "uid is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
+ "kind": "kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
+ "resource": "resource is the fully-qualified resource being requested (for example, v1.pods)",
+ "subResource": "subResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
+ "requestKind": "requestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
+ "requestResource": "requestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
+ "requestSubResource": "requestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
+ "name": "name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this field will contain an empty string.",
+ "namespace": "namespace is the namespace associated with the request (if any).",
+ "operation": "operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
+ "userInfo": "userInfo is information about the requesting user",
+ "object": "object is the object from the incoming request.",
+ "oldObject": "oldObject is the existing object. Only populated for DELETE and UPDATE requests.",
+ "dryRun": "dryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
+ "options": "options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
}
func (AdmissionRequest) SwaggerDoc() map[string]string {
@@ -52,12 +52,12 @@ func (AdmissionRequest) SwaggerDoc() map[string]string {
var map_AdmissionResponse = map[string]string{
"": "AdmissionResponse describes an admission response.",
- "uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.",
- "allowed": "Allowed indicates whether or not the admission request was permitted.",
- "status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
- "patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
- "patchType": "The type of Patch. Currently we only allow \"JSONPatch\".",
- "auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
+ "uid": "uid is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.",
+ "allowed": "allowed indicates whether or not the admission request was permitted.",
+ "status": "status is the result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
+ "patch": "patch is the patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
+ "patchType": "patchType is the type of Patch. Currently we only allow \"JSONPatch\".",
+ "auditAnnotations": "auditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
"warnings": "warnings is a list of warning messages to return to the requesting API client. Warning messages describe a problem the client making the API request should correct or be aware of. Limit warnings to 120 characters if possible. Warnings over 256 characters and large numbers of warnings may be truncated.",
}
@@ -67,8 +67,8 @@ func (AdmissionResponse) SwaggerDoc() map[string]string {
var map_AdmissionReview = map[string]string{
"": "AdmissionReview describes an admission review request/response.",
- "request": "Request describes the attributes for the admission request.",
- "response": "Response describes the attributes for the admission response.",
+ "request": "request describes the attributes for the admission request.",
+ "response": "response describes the attributes for the admission response.",
}
func (AdmissionReview) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/admission/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/admission/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..e67adcd7
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admission/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AdmissionRequest) OpenAPIModelName() string {
+ return "io.k8s.api.admission.v1beta1.AdmissionRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AdmissionResponse) OpenAPIModelName() string {
+ return "io.k8s.api.admission.v1beta1.AdmissionResponse"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AdmissionReview) OpenAPIModelName() string {
+ return "io.k8s.api.admission.v1beta1.AdmissionReview"
+}
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1/doc.go b/operator/vendor/k8s.io/api/admissionregistration/v1/doc.go
index ec0ebb9c..0bcaeaa5 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1/doc.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.admissionregistration.v1
+
// +groupName=admissionregistration.k8s.io
// Package v1 is the v1 version of the API.
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/operator/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
index 09295734..91b2f1cb 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
@@ -24,950 +24,67 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
-func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
-func (*AuditAnnotation) ProtoMessage() {}
-func (*AuditAnnotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{0}
-}
-func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AuditAnnotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuditAnnotation.Merge(m, src)
-}
-func (m *AuditAnnotation) XXX_Size() int {
- return m.Size()
-}
-func (m *AuditAnnotation) XXX_DiscardUnknown() {
- xxx_messageInfo_AuditAnnotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
-
-func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
-func (*ExpressionWarning) ProtoMessage() {}
-func (*ExpressionWarning) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{1}
-}
-func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExpressionWarning) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExpressionWarning.Merge(m, src)
-}
-func (m *ExpressionWarning) XXX_Size() int {
- return m.Size()
-}
-func (m *ExpressionWarning) XXX_DiscardUnknown() {
- xxx_messageInfo_ExpressionWarning.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
-
-func (m *MatchCondition) Reset() { *m = MatchCondition{} }
-func (*MatchCondition) ProtoMessage() {}
-func (*MatchCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{2}
-}
-func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MatchCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MatchCondition.Merge(m, src)
-}
-func (m *MatchCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *MatchCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_MatchCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
-
-func (m *MatchResources) Reset() { *m = MatchResources{} }
-func (*MatchResources) ProtoMessage() {}
-func (*MatchResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{3}
-}
-func (m *MatchResources) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MatchResources) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MatchResources.Merge(m, src)
-}
-func (m *MatchResources) XXX_Size() int {
- return m.Size()
-}
-func (m *MatchResources) XXX_DiscardUnknown() {
- xxx_messageInfo_MatchResources.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MatchResources proto.InternalMessageInfo
-
-func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
-func (*MutatingWebhook) ProtoMessage() {}
-func (*MutatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{4}
-}
-func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingWebhook) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingWebhook.Merge(m, src)
-}
-func (m *MutatingWebhook) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingWebhook) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingWebhook.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
-
-func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
-func (*MutatingWebhookConfiguration) ProtoMessage() {}
-func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{5}
-}
-func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src)
-}
-func (m *MutatingWebhookConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
-
-func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
-func (*MutatingWebhookConfigurationList) ProtoMessage() {}
-func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{6}
-}
-func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src)
-}
-func (m *MutatingWebhookConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
-
-func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
-func (*NamedRuleWithOperations) ProtoMessage() {}
-func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{7}
-}
-func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedRuleWithOperations.Merge(m, src)
-}
-func (m *NamedRuleWithOperations) XXX_Size() int {
- return m.Size()
-}
-func (m *NamedRuleWithOperations) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
-
-func (m *ParamKind) Reset() { *m = ParamKind{} }
-func (*ParamKind) ProtoMessage() {}
-func (*ParamKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{8}
-}
-func (m *ParamKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParamKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParamKind.Merge(m, src)
-}
-func (m *ParamKind) XXX_Size() int {
- return m.Size()
-}
-func (m *ParamKind) XXX_DiscardUnknown() {
- xxx_messageInfo_ParamKind.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ParamKind proto.InternalMessageInfo
-
-func (m *ParamRef) Reset() { *m = ParamRef{} }
-func (*ParamRef) ProtoMessage() {}
-func (*ParamRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{9}
-}
-func (m *ParamRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParamRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParamRef.Merge(m, src)
-}
-func (m *ParamRef) XXX_Size() int {
- return m.Size()
-}
-func (m *ParamRef) XXX_DiscardUnknown() {
- xxx_messageInfo_ParamRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ParamRef proto.InternalMessageInfo
-
-func (m *Rule) Reset() { *m = Rule{} }
-func (*Rule) ProtoMessage() {}
-func (*Rule) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{10}
-}
-func (m *Rule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Rule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Rule.Merge(m, src)
-}
-func (m *Rule) XXX_Size() int {
- return m.Size()
-}
-func (m *Rule) XXX_DiscardUnknown() {
- xxx_messageInfo_Rule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Rule proto.InternalMessageInfo
-
-func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} }
-func (*RuleWithOperations) ProtoMessage() {}
-func (*RuleWithOperations) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{11}
-}
-func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuleWithOperations) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuleWithOperations.Merge(m, src)
-}
-func (m *RuleWithOperations) XXX_Size() int {
- return m.Size()
-}
-func (m *RuleWithOperations) XXX_DiscardUnknown() {
- xxx_messageInfo_RuleWithOperations.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo
-
-func (m *ServiceReference) Reset() { *m = ServiceReference{} }
-func (*ServiceReference) ProtoMessage() {}
-func (*ServiceReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{12}
-}
-func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceReference.Merge(m, src)
-}
-func (m *ServiceReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceReference.DiscardUnknown(m)
-}
+func (m *MatchCondition) Reset() { *m = MatchCondition{} }
-var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
+func (m *MatchResources) Reset() { *m = MatchResources{} }
-func (m *TypeChecking) Reset() { *m = TypeChecking{} }
-func (*TypeChecking) ProtoMessage() {}
-func (*TypeChecking) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{13}
-}
-func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeChecking) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeChecking.Merge(m, src)
-}
-func (m *TypeChecking) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeChecking) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeChecking.DiscardUnknown(m)
-}
+func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
-var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
+func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
-func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
-func (*ValidatingAdmissionPolicy) ProtoMessage() {}
-func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{14}
-}
-func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m)
-}
+func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
-var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
+func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
-func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
-func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{15}
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m)
-}
+func (m *ParamKind) Reset() { *m = ParamKind{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
+func (m *ParamRef) Reset() { *m = ParamRef{} }
-func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
-func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{16}
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m)
-}
+func (m *Rule) Reset() { *m = Rule{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo
+func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} }
-func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
-func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{17}
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
-func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{18}
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
-func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
-func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{19}
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m)
-}
+func (m *ServiceReference) Reset() { *m = ServiceReference{} }
-var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
+func (m *TypeChecking) Reset() { *m = TypeChecking{} }
-func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
-func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{20}
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
-func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
-func (*ValidatingWebhook) ProtoMessage() {}
-func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{21}
-}
-func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingWebhook) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingWebhook.Merge(m, src)
-}
-func (m *ValidatingWebhook) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingWebhook) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
-var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
-func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
-func (*ValidatingWebhookConfiguration) ProtoMessage() {}
-func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{22}
-}
-func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src)
-}
-func (m *ValidatingWebhookConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
-var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
-func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
-func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
-func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{23}
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src)
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
-var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
+func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
-func (m *Validation) Reset() { *m = Validation{} }
-func (*Validation) ProtoMessage() {}
-func (*Validation) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{24}
-}
-func (m *Validation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Validation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Validation.Merge(m, src)
-}
-func (m *Validation) XXX_Size() int {
- return m.Size()
-}
-func (m *Validation) XXX_DiscardUnknown() {
- xxx_messageInfo_Validation.DiscardUnknown(m)
-}
+func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
-var xxx_messageInfo_Validation proto.InternalMessageInfo
+func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
-func (m *Variable) Reset() { *m = Variable{} }
-func (*Variable) ProtoMessage() {}
-func (*Variable) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{25}
-}
-func (m *Variable) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Variable) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Variable.Merge(m, src)
-}
-func (m *Variable) XXX_Size() int {
- return m.Size()
-}
-func (m *Variable) XXX_DiscardUnknown() {
- xxx_messageInfo_Variable.DiscardUnknown(m)
-}
+func (m *Validation) Reset() { *m = Validation{} }
-var xxx_messageInfo_Variable proto.InternalMessageInfo
+func (m *Variable) Reset() { *m = Variable{} }
-func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
-func (*WebhookClientConfig) ProtoMessage() {}
-func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_3205c7dc5bf0c9bf, []int{26}
-}
-func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *WebhookClientConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WebhookClientConfig.Merge(m, src)
-}
-func (m *WebhookClientConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *WebhookClientConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1.AuditAnnotation")
- proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1.ExpressionWarning")
- proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1.MatchCondition")
- proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1.MatchResources")
- proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhook")
- proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfiguration")
- proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.MutatingWebhookConfigurationList")
- proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.NamedRuleWithOperations")
- proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1.ParamKind")
- proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1.ParamRef")
- proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1.Rule")
- proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1.RuleWithOperations")
- proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1.ServiceReference")
- proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1.TypeChecking")
- proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicy")
- proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding")
- proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingList")
- proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec")
- proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyList")
- proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicySpec")
- proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus")
- proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhook")
- proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfiguration")
- proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1.ValidatingWebhookConfigurationList")
- proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1.Validation")
- proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1.Variable")
- proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1.WebhookClientConfig")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/admissionregistration/v1/generated.proto", fileDescriptor_3205c7dc5bf0c9bf)
-}
-
-var fileDescriptor_3205c7dc5bf0c9bf = []byte{
- // 2075 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xf7, 0x8a, 0x94, 0x44, 0x3e, 0xea, 0x8b, 0x13, 0x27, 0xa2, 0x1d, 0x87, 0x2b, 0x6c, 0x82,
- 0xc2, 0x46, 0x63, 0x32, 0xb2, 0x53, 0x27, 0x08, 0x8a, 0x06, 0xa2, 0xfc, 0x01, 0xc5, 0x96, 0x2d,
- 0x8c, 0x12, 0xa9, 0x68, 0xdd, 0x22, 0xab, 0xdd, 0x21, 0xb9, 0x11, 0xb9, 0xbb, 0xd8, 0xd9, 0x65,
- 0xac, 0x9e, 0x8a, 0xf6, 0x5e, 0x14, 0xe8, 0x5f, 0xd0, 0xfe, 0x09, 0xbd, 0xb4, 0x40, 0x4f, 0xbd,
- 0xf9, 0x52, 0x20, 0x3d, 0xd5, 0x87, 0x62, 0x51, 0xb3, 0x97, 0x1e, 0x7a, 0x68, 0xaf, 0x02, 0x8a,
- 0x16, 0x33, 0x3b, 0xfb, 0xc9, 0xa5, 0xb5, 0x96, 0x6d, 0xf5, 0xe2, 0x9b, 0xf6, 0x7d, 0xfc, 0xde,
- 0xbc, 0x37, 0x6f, 0xe6, 0xbd, 0x79, 0x14, 0x5c, 0x3f, 0xfc, 0x98, 0xb6, 0x0c, 0xab, 0xad, 0xda,
- 0x46, 0x5b, 0xd5, 0x87, 0x06, 0xa5, 0x86, 0x65, 0x3a, 0xa4, 0x67, 0x50, 0xd7, 0x51, 0x5d, 0xc3,
- 0x32, 0xdb, 0xa3, 0xf5, 0x76, 0x8f, 0x98, 0xc4, 0x51, 0x5d, 0xa2, 0xb7, 0x6c, 0xc7, 0x72, 0x2d,
- 0xf4, 0x6e, 0xa0, 0xd4, 0x52, 0x6d, 0xa3, 0x95, 0xab, 0xd4, 0x1a, 0xad, 0x5f, 0xbc, 0xda, 0x33,
- 0xdc, 0xbe, 0x77, 0xd0, 0xd2, 0xac, 0x61, 0xbb, 0x67, 0xf5, 0xac, 0x36, 0xd7, 0x3d, 0xf0, 0xba,
- 0xfc, 0x8b, 0x7f, 0xf0, 0xbf, 0x02, 0xcc, 0x8b, 0x1f, 0xc6, 0x0b, 0x19, 0xaa, 0x5a, 0xdf, 0x30,
- 0x89, 0x73, 0xd4, 0xb6, 0x0f, 0x7b, 0x8c, 0x40, 0xdb, 0x43, 0xe2, 0xaa, 0x39, 0x2b, 0xb9, 0xd8,
- 0x9e, 0xa6, 0xe5, 0x78, 0xa6, 0x6b, 0x0c, 0xc9, 0x84, 0xc2, 0x8d, 0x93, 0x14, 0xa8, 0xd6, 0x27,
- 0x43, 0x35, 0xab, 0xa7, 0x50, 0x58, 0xde, 0xf0, 0x74, 0xc3, 0xdd, 0x30, 0x4d, 0xcb, 0xe5, 0x3e,
- 0xa2, 0x77, 0xa0, 0x74, 0x48, 0x8e, 0x1a, 0xd2, 0x9a, 0x74, 0xb9, 0xda, 0xa9, 0x3d, 0xf6, 0xe5,
- 0x73, 0x63, 0x5f, 0x2e, 0xdd, 0x25, 0x47, 0x98, 0xd1, 0xd1, 0x06, 0x2c, 0x8f, 0xd4, 0x81, 0x47,
- 0x6e, 0x3d, 0xb2, 0x1d, 0xc2, 0x23, 0xd4, 0x98, 0xe1, 0xa2, 0xab, 0x42, 0x74, 0x79, 0x2f, 0xcd,
- 0xc6, 0x59, 0x79, 0x65, 0x00, 0xf5, 0xf8, 0x6b, 0x5f, 0x75, 0x4c, 0xc3, 0xec, 0xa1, 0xf7, 0xa1,
- 0xd2, 0x35, 0xc8, 0x40, 0xc7, 0xa4, 0x2b, 0x00, 0x57, 0x04, 0x60, 0xe5, 0xb6, 0xa0, 0xe3, 0x48,
- 0x02, 0x5d, 0x81, 0xf9, 0xaf, 0x03, 0xc5, 0x46, 0x89, 0x0b, 0x2f, 0x0b, 0xe1, 0x79, 0x81, 0x87,
- 0x43, 0xbe, 0xd2, 0x85, 0xa5, 0x6d, 0xd5, 0xd5, 0xfa, 0x9b, 0x96, 0xa9, 0x1b, 0xdc, 0xc3, 0x35,
- 0x28, 0x9b, 0xea, 0x90, 0x08, 0x17, 0x17, 0x84, 0x66, 0xf9, 0xbe, 0x3a, 0x24, 0x98, 0x73, 0xd0,
- 0x35, 0x00, 0x92, 0xf5, 0x0f, 0x09, 0x39, 0x48, 0xb8, 0x96, 0x90, 0x52, 0xfe, 0x54, 0x16, 0x86,
- 0x30, 0xa1, 0x96, 0xe7, 0x68, 0x84, 0xa2, 0x47, 0x50, 0x67, 0x70, 0xd4, 0x56, 0x35, 0xb2, 0x4b,
- 0x06, 0x44, 0x73, 0x2d, 0x87, 0x5b, 0xad, 0x5d, 0xbb, 0xde, 0x8a, 0x93, 0x2d, 0xda, 0xb1, 0x96,
- 0x7d, 0xd8, 0x63, 0x04, 0xda, 0x62, 0x89, 0xd1, 0x1a, 0xad, 0xb7, 0xee, 0xa9, 0x07, 0x64, 0x10,
- 0xaa, 0x76, 0xde, 0x1c, 0xfb, 0x72, 0xfd, 0x7e, 0x16, 0x11, 0x4f, 0x1a, 0x41, 0x16, 0x2c, 0x59,
- 0x07, 0x5f, 0x11, 0xcd, 0x8d, 0xcc, 0xce, 0x9c, 0xde, 0x2c, 0x1a, 0xfb, 0xf2, 0xd2, 0x83, 0x14,
- 0x1c, 0xce, 0xc0, 0xa3, 0x23, 0x58, 0x74, 0x84, 0xdf, 0xd8, 0x1b, 0x10, 0xda, 0x28, 0xad, 0x95,
- 0x2e, 0xd7, 0xae, 0x7d, 0xb7, 0x55, 0xe0, 0x4c, 0xb5, 0x98, 0x4b, 0x3a, 0x53, 0xdb, 0x37, 0xdc,
- 0xfe, 0x03, 0x9b, 0x04, 0x1c, 0xda, 0x79, 0x53, 0x84, 0x7c, 0x11, 0x27, 0xa1, 0x71, 0xda, 0x12,
- 0xfa, 0x85, 0x04, 0xe7, 0xc9, 0x23, 0x6d, 0xe0, 0xe9, 0x24, 0x25, 0xd7, 0x28, 0xbf, 0x84, 0x25,
- 0x5c, 0x12, 0x4b, 0x38, 0x7f, 0x2b, 0xc7, 0x02, 0xce, 0xb5, 0x8b, 0x6e, 0x42, 0x6d, 0xc8, 0x12,
- 0x61, 0xc7, 0x1a, 0x18, 0xda, 0x51, 0x63, 0x9e, 0xa7, 0x8f, 0x32, 0xf6, 0xe5, 0xda, 0x76, 0x4c,
- 0x3e, 0xf6, 0xe5, 0xe5, 0xc4, 0xe7, 0xe7, 0x47, 0x36, 0xc1, 0x49, 0x35, 0xe5, 0x77, 0x15, 0x58,
- 0xde, 0xf6, 0xd8, 0xa1, 0x34, 0x7b, 0xfb, 0xe4, 0xa0, 0x6f, 0x59, 0x87, 0x05, 0x32, 0xd7, 0x81,
- 0x05, 0x6d, 0x60, 0x10, 0xd3, 0xdd, 0xb4, 0xcc, 0xae, 0xd1, 0x13, 0xdb, 0xfe, 0x71, 0xa1, 0x18,
- 0x08, 0x2b, 0x9b, 0x09, 0xfd, 0xce, 0x79, 0x61, 0x63, 0x21, 0x49, 0xc5, 0x29, 0x1b, 0xe8, 0x21,
- 0xcc, 0x3a, 0x89, 0x3d, 0xff, 0xa8, 0x90, 0xb1, 0x9c, 0x58, 0x2f, 0x0a, 0x5b, 0xb3, 0x41, 0x70,
- 0x03, 0x50, 0x74, 0x0f, 0x16, 0xbb, 0xaa, 0x31, 0xf0, 0x1c, 0x22, 0xe2, 0x59, 0xe6, 0xce, 0x7f,
- 0x8b, 0xe5, 0xc5, 0xed, 0x24, 0xe3, 0xd8, 0x97, 0xeb, 0x29, 0x02, 0x8f, 0x69, 0x5a, 0x39, 0xbb,
- 0x37, 0xd5, 0x53, 0xed, 0x4d, 0xfe, 0xc1, 0x9e, 0xfd, 0xff, 0x1c, 0xec, 0xda, 0xab, 0x3d, 0xd8,
- 0x37, 0xa1, 0x46, 0x0d, 0x9d, 0xdc, 0xea, 0x76, 0x89, 0xe6, 0xd2, 0xc6, 0x5c, 0x1c, 0xb0, 0xdd,
- 0x98, 0xcc, 0x02, 0x16, 0x7f, 0x6e, 0x0e, 0x54, 0x4a, 0x71, 0x52, 0x0d, 0x7d, 0x02, 0x4b, 0xac,
- 0x0c, 0x59, 0x9e, 0xbb, 0x4b, 0x34, 0xcb, 0xd4, 0x29, 0x3f, 0x15, 0xb3, 0xc1, 0x0a, 0x3e, 0x4f,
- 0x71, 0x70, 0x46, 0x12, 0x7d, 0x01, 0xab, 0x51, 0x16, 0x61, 0x32, 0x32, 0xc8, 0xd7, 0x7b, 0xc4,
- 0x61, 0x1f, 0xb4, 0x51, 0x59, 0x2b, 0x5d, 0xae, 0x76, 0xde, 0x1e, 0xfb, 0xf2, 0xea, 0x46, 0xbe,
- 0x08, 0x9e, 0xa6, 0x8b, 0xbe, 0x04, 0xe4, 0x10, 0xc3, 0x1c, 0x59, 0x1a, 0x4f, 0x3f, 0x91, 0x10,
- 0xc0, 0xfd, 0xfb, 0x60, 0xec, 0xcb, 0x08, 0x4f, 0x70, 0x8f, 0x7d, 0xf9, 0xad, 0x49, 0x2a, 0x4f,
- 0x8f, 0x1c, 0x2c, 0x34, 0x82, 0xe5, 0x61, 0xaa, 0xf2, 0xd0, 0xc6, 0x02, 0x3f, 0x21, 0xd7, 0x0b,
- 0x9d, 0x90, 0x74, 0xd5, 0x8a, 0xeb, 0x6b, 0x9a, 0x4e, 0x71, 0xd6, 0x88, 0xf2, 0x44, 0x82, 0x4b,
- 0x99, 0x9b, 0x23, 0x38, 0xa9, 0x5e, 0x00, 0x8e, 0xbe, 0x84, 0x0a, 0x4b, 0x08, 0x5d, 0x75, 0x55,
- 0x51, 0x8e, 0x3e, 0x28, 0x96, 0x3e, 0x41, 0xae, 0x6c, 0x13, 0x57, 0x8d, 0xcb, 0x61, 0x4c, 0xc3,
- 0x11, 0x2a, 0xda, 0x83, 0x8a, 0xb0, 0x4c, 0x1b, 0x33, 0xdc, 0xe7, 0x0f, 0x8b, 0xf9, 0x9c, 0x5e,
- 0x76, 0xa7, 0xcc, 0xac, 0xe0, 0x08, 0x4b, 0xf9, 0x87, 0x04, 0x6b, 0xcf, 0x72, 0xed, 0x9e, 0x41,
- 0x5d, 0xf4, 0x70, 0xc2, 0xbd, 0x56, 0xc1, 0xd3, 0x61, 0xd0, 0xc0, 0xb9, 0xa8, 0xf5, 0x08, 0x29,
- 0x09, 0xd7, 0xba, 0x30, 0x6b, 0xb8, 0x64, 0x18, 0xfa, 0xb5, 0x71, 0x1a, 0xbf, 0x52, 0x6b, 0x8e,
- 0xef, 0xbd, 0x2d, 0x86, 0x8b, 0x03, 0x78, 0xb6, 0x8b, 0xab, 0x53, 0xaa, 0x12, 0xfa, 0x28, 0xae,
- 0xb6, 0xfc, 0xd6, 0x68, 0x48, 0xfc, 0x20, 0xd4, 0x93, 0xb5, 0x92, 0x33, 0x70, 0x5a, 0x0e, 0xfd,
- 0x5c, 0x02, 0xe4, 0x4c, 0xe0, 0x89, 0x2a, 0x71, 0xea, 0x8b, 0xfb, 0xa2, 0x70, 0x00, 0x4d, 0xf2,
- 0x70, 0x8e, 0x39, 0x45, 0x85, 0xea, 0x8e, 0xea, 0xa8, 0xc3, 0xbb, 0x86, 0xa9, 0xb3, 0x5e, 0x4b,
- 0xb5, 0x0d, 0x71, 0x2c, 0x45, 0x65, 0x8b, 0x92, 0x6b, 0x63, 0x67, 0x4b, 0x70, 0x70, 0x42, 0x8a,
- 0xd5, 0xc1, 0x43, 0xc3, 0xd4, 0x45, 0x67, 0x16, 0xd5, 0x41, 0x86, 0x87, 0x39, 0x47, 0xf9, 0xed,
- 0x0c, 0x54, 0xb8, 0x0d, 0xd6, 0x2d, 0x9e, 0x5c, 0x36, 0xdb, 0x50, 0x8d, 0xee, 0x5a, 0x81, 0x5a,
- 0x17, 0x62, 0xd5, 0xe8, 0x5e, 0xc6, 0xb1, 0x0c, 0xfa, 0x11, 0x54, 0x68, 0x78, 0x03, 0x97, 0x4e,
- 0x7f, 0x03, 0x2f, 0xb0, 0x24, 0x8b, 0xee, 0xde, 0x08, 0x12, 0xb9, 0xb0, 0x6a, 0xb3, 0xd5, 0x13,
- 0x97, 0x38, 0xf7, 0x2d, 0xf7, 0xb6, 0xe5, 0x99, 0xfa, 0x86, 0xc6, 0xa2, 0x27, 0xca, 0xdf, 0x27,
- 0xec, 0xce, 0xdb, 0xc9, 0x17, 0x39, 0xf6, 0xe5, 0xb7, 0xa7, 0xb0, 0xf8, 0x5d, 0x35, 0x0d, 0x5a,
- 0xf9, 0xa3, 0x04, 0x65, 0xb6, 0x85, 0xe8, 0xdb, 0x50, 0x55, 0x6d, 0xe3, 0x8e, 0x63, 0x79, 0x76,
- 0x98, 0x5b, 0x8b, 0x2c, 0x14, 0x1b, 0x3b, 0x5b, 0x01, 0x11, 0xc7, 0x7c, 0xb4, 0x0e, 0xb5, 0x78,
- 0x6b, 0x82, 0x63, 0x51, 0xed, 0x2c, 0xb3, 0x0a, 0x11, 0xef, 0x1e, 0xc5, 0x49, 0x19, 0x86, 0x1f,
- 0xe6, 0x65, 0xd0, 0x35, 0x08, 0xfc, 0xa8, 0x75, 0xc6, 0x31, 0x1f, 0xbd, 0x0f, 0xb3, 0x54, 0xb3,
- 0x6c, 0x22, 0x3c, 0x7f, 0x8b, 0x9d, 0x94, 0x5d, 0x46, 0x38, 0xf6, 0xe5, 0x2a, 0xff, 0x83, 0x7b,
- 0x15, 0x08, 0x29, 0xbf, 0x91, 0x20, 0x27, 0x0d, 0xd1, 0xa7, 0x00, 0x56, 0x9c, 0xef, 0x81, 0x4b,
- 0x32, 0xbf, 0xbe, 0x22, 0xea, 0xb1, 0x2f, 0x2f, 0x46, 0x5f, 0x1c, 0x32, 0xa1, 0x82, 0xee, 0x42,
- 0x99, 0x65, 0xb2, 0x38, 0x2a, 0x57, 0x0a, 0x1f, 0x95, 0x38, 0xdd, 0xd8, 0x17, 0xe6, 0x20, 0xca,
- 0xaf, 0x25, 0x58, 0xd9, 0x25, 0xce, 0xc8, 0xd0, 0x08, 0x26, 0x5d, 0xe2, 0x10, 0x53, 0xcb, 0xe4,
- 0xa0, 0x54, 0x20, 0x07, 0xc3, 0xb4, 0x9e, 0x99, 0x9a, 0xd6, 0x97, 0xa0, 0x6c, 0xab, 0x6e, 0x5f,
- 0xbc, 0x91, 0x2a, 0x8c, 0xbb, 0xa3, 0xba, 0x7d, 0xcc, 0xa9, 0x9c, 0x6b, 0x39, 0x2e, 0x8f, 0xeb,
- 0xac, 0xe0, 0x5a, 0x8e, 0x8b, 0x39, 0x55, 0xf9, 0x95, 0x04, 0x0b, 0x2c, 0x0a, 0x9b, 0x7d, 0xa2,
- 0x1d, 0xb2, 0x17, 0xda, 0xcf, 0x24, 0x40, 0x24, 0xfb, 0x6e, 0x0b, 0x62, 0x59, 0xbb, 0x76, 0xa3,
- 0x50, 0x40, 0x26, 0x9e, 0x7d, 0xf1, 0xd5, 0x31, 0xc1, 0xa2, 0x38, 0xc7, 0x9a, 0xf2, 0xe7, 0x19,
- 0xb8, 0xb0, 0xa7, 0x0e, 0x0c, 0x9d, 0x5f, 0xa7, 0x51, 0xd1, 0x17, 0x15, 0xf7, 0xd5, 0x17, 0x36,
- 0x1d, 0xca, 0xd4, 0x26, 0x9a, 0x48, 0x83, 0x4e, 0x21, 0xaf, 0xa7, 0xae, 0x77, 0xd7, 0x26, 0x5a,
- 0xbc, 0x6f, 0xec, 0x0b, 0x73, 0x74, 0x34, 0x80, 0x39, 0xea, 0xaa, 0xae, 0x47, 0xc5, 0xdd, 0x72,
- 0xf3, 0x05, 0xed, 0x70, 0xac, 0xce, 0x92, 0xb0, 0x34, 0x17, 0x7c, 0x63, 0x61, 0x43, 0xf9, 0xb7,
- 0x04, 0x6b, 0x53, 0x75, 0x3b, 0x86, 0xa9, 0xb3, 0xdd, 0x7f, 0xf5, 0xa1, 0x3d, 0x4c, 0x85, 0x76,
- 0xeb, 0xc5, 0x5c, 0x16, 0xcb, 0x9e, 0x16, 0x61, 0xe5, 0x5f, 0x12, 0xbc, 0x77, 0x92, 0xf2, 0x19,
- 0x34, 0x13, 0x5f, 0xa5, 0x9b, 0x89, 0x5b, 0x2f, 0xc5, 0xe9, 0x29, 0x0d, 0xc5, 0x7f, 0x66, 0x4e,
- 0x76, 0x99, 0x45, 0x88, 0x55, 0x64, 0x9b, 0x13, 0xef, 0xc7, 0x45, 0x33, 0xda, 0xba, 0x9d, 0x88,
- 0x83, 0x13, 0x52, 0x68, 0x1f, 0x2a, 0xb6, 0x28, 0xb7, 0x62, 0x03, 0xaf, 0x16, 0xf2, 0x25, 0xac,
- 0xd1, 0x41, 0x25, 0x0c, 0xbf, 0x70, 0x04, 0xc6, 0x1e, 0x3c, 0xc3, 0xd4, 0x54, 0x25, 0xa7, 0xdc,
- 0x9e, 0xd0, 0x43, 0x47, 0xaa, 0xc1, 0x73, 0x23, 0x4d, 0xc3, 0x19, 0x78, 0xb4, 0x0f, 0xf5, 0x91,
- 0x88, 0x92, 0x65, 0x06, 0x85, 0x31, 0x18, 0x25, 0x54, 0x3b, 0x57, 0xd8, 0x33, 0x6d, 0x2f, 0xcb,
- 0x3c, 0xf6, 0xe5, 0x95, 0x2c, 0x11, 0x4f, 0x62, 0x28, 0x63, 0x09, 0xde, 0x99, 0x1a, 0xff, 0x33,
- 0xc8, 0x35, 0x2d, 0x9d, 0x6b, 0xdf, 0x7b, 0xc1, 0x5c, 0x9b, 0x92, 0x64, 0xb3, 0xcf, 0x70, 0x92,
- 0x67, 0xd7, 0x0f, 0xa1, 0x6a, 0x87, 0xcd, 0x5f, 0x8e, 0x97, 0x27, 0xa4, 0x0a, 0xd3, 0x0a, 0x7a,
- 0x85, 0xe8, 0x13, 0xc7, 0x78, 0xc8, 0x83, 0x95, 0xf0, 0x35, 0xc4, 0x54, 0x0d, 0xd3, 0xa5, 0x39,
- 0x93, 0xaf, 0xc2, 0xf9, 0x72, 0x7e, 0xec, 0xcb, 0x2b, 0xdb, 0x19, 0x40, 0x3c, 0x61, 0x02, 0x75,
- 0xa1, 0x16, 0xef, 0x77, 0x38, 0x07, 0x69, 0x3f, 0x57, 0x80, 0x2d, 0xb3, 0xf3, 0x86, 0x88, 0x68,
- 0x2d, 0xa6, 0x51, 0x9c, 0x04, 0x7e, 0xc9, 0xb3, 0x90, 0x9f, 0xc0, 0x8a, 0x9a, 0x1e, 0xfe, 0xd2,
- 0xc6, 0xec, 0x73, 0x3c, 0xd6, 0x32, 0x93, 0xe3, 0x4e, 0x43, 0xac, 0x7f, 0x25, 0xc3, 0xa0, 0x78,
- 0xc2, 0x4e, 0xde, 0xdb, 0x78, 0xee, 0x0c, 0xde, 0xc6, 0xe8, 0xc7, 0x50, 0x1d, 0xa9, 0x8e, 0xa1,
- 0x1e, 0x0c, 0x08, 0x6d, 0xcc, 0x73, 0x8b, 0x57, 0x0b, 0xee, 0x53, 0xa0, 0x15, 0xf7, 0x64, 0x21,
- 0x85, 0xe2, 0x18, 0x52, 0xf9, 0xc3, 0x0c, 0xc8, 0x27, 0xd4, 0x61, 0xf4, 0x19, 0x20, 0xeb, 0x80,
- 0x12, 0x67, 0x44, 0xf4, 0x3b, 0xc1, 0x3c, 0x3e, 0x7c, 0xf9, 0x94, 0xe2, 0x7e, 0xe8, 0xc1, 0x84,
- 0x04, 0xce, 0xd1, 0x42, 0x3d, 0x58, 0x70, 0x13, 0x4d, 0x9a, 0x48, 0xf6, 0xf5, 0x42, 0x2e, 0x25,
- 0xbb, 0xbb, 0xce, 0xca, 0xd8, 0x97, 0x53, 0xfd, 0x1e, 0x4e, 0x01, 0x23, 0x0d, 0x40, 0x8b, 0xf7,
- 0x6a, 0x32, 0xc3, 0x9f, 0x71, 0x3b, 0xc5, 0xfb, 0x14, 0x55, 0x91, 0xc4, 0x16, 0x25, 0x60, 0x95,
- 0xbf, 0xcc, 0x43, 0x3d, 0x8e, 0xde, 0xeb, 0xa9, 0xe7, 0xeb, 0xa9, 0xe7, 0xb4, 0xa9, 0x27, 0xbc,
- 0x9e, 0x7a, 0x9e, 0x6a, 0xea, 0x99, 0x73, 0xef, 0xd6, 0xce, 0x62, 0x26, 0xf9, 0x57, 0x09, 0x9a,
- 0x13, 0x27, 0xfb, 0xac, 0xa7, 0x92, 0xdf, 0x9f, 0x98, 0x4a, 0xde, 0x78, 0xce, 0x26, 0x68, 0xda,
- 0x5c, 0xf2, 0x9f, 0x12, 0x28, 0xcf, 0x76, 0xef, 0x0c, 0x1a, 0xbc, 0x7e, 0xba, 0xc1, 0xdb, 0x3c,
- 0x9d, 0x6f, 0x45, 0x66, 0x93, 0xff, 0x95, 0x00, 0xe2, 0x26, 0x05, 0xbd, 0x07, 0x89, 0x1f, 0x45,
- 0xc5, 0x35, 0x1d, 0x44, 0x28, 0x41, 0x47, 0x57, 0x60, 0x7e, 0x48, 0x28, 0x55, 0x7b, 0xe1, 0xc4,
- 0x22, 0xfa, 0xcd, 0x76, 0x3b, 0x20, 0xe3, 0x90, 0x8f, 0xf6, 0x61, 0xce, 0x21, 0x2a, 0xb5, 0x4c,
- 0x31, 0xb9, 0xf8, 0x94, 0xbd, 0x5a, 0x31, 0xa7, 0x1c, 0xfb, 0xf2, 0x7a, 0x91, 0xdf, 0xd4, 0x5b,
- 0xe2, 0x91, 0xcb, 0x95, 0xb0, 0x80, 0x43, 0x77, 0xa0, 0x2e, 0x6c, 0x24, 0x16, 0x1c, 0x5c, 0xad,
- 0x17, 0xc4, 0x6a, 0xea, 0xdb, 0x59, 0x01, 0x3c, 0xa9, 0xa3, 0x7c, 0x06, 0x95, 0xb0, 0xfe, 0xa3,
- 0x06, 0x94, 0x13, 0x2f, 0xa5, 0xc0, 0x71, 0x4e, 0xc9, 0x04, 0x66, 0x26, 0x3f, 0x30, 0xca, 0xef,
- 0x25, 0x78, 0x23, 0xa7, 0x0a, 0xa1, 0x0b, 0x50, 0xf2, 0x9c, 0x81, 0x08, 0xc1, 0xfc, 0xd8, 0x97,
- 0x4b, 0x5f, 0xe0, 0x7b, 0x98, 0xd1, 0xd0, 0x43, 0x98, 0xa7, 0xc1, 0xfc, 0x48, 0xe4, 0xd1, 0x77,
- 0x0a, 0x6d, 0x76, 0x76, 0xe6, 0xd4, 0xa9, 0xb1, 0xf0, 0x87, 0xd4, 0x10, 0x12, 0x5d, 0x86, 0x8a,
- 0xa6, 0x76, 0x3c, 0x53, 0x17, 0xf3, 0xae, 0x85, 0xe0, 0x75, 0xb6, 0xb9, 0x11, 0xd0, 0x70, 0xc4,
- 0xed, 0x6c, 0x3d, 0x7e, 0xda, 0x3c, 0xf7, 0xcd, 0xd3, 0xe6, 0xb9, 0x27, 0x4f, 0x9b, 0xe7, 0x7e,
- 0x3a, 0x6e, 0x4a, 0x8f, 0xc7, 0x4d, 0xe9, 0x9b, 0x71, 0x53, 0x7a, 0x32, 0x6e, 0x4a, 0x7f, 0x1b,
- 0x37, 0xa5, 0x5f, 0xfe, 0xbd, 0x79, 0xee, 0x07, 0xef, 0x16, 0xf8, 0x6f, 0x8c, 0xff, 0x05, 0x00,
- 0x00, 0xff, 0xff, 0x1e, 0x59, 0xab, 0xd9, 0xb3, 0x21, 0x00, 0x00,
-}
+func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/admissionregistration/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..04a23c59
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1/generated.protomessage.pb.go
@@ -0,0 +1,76 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*AuditAnnotation) ProtoMessage() {}
+
+func (*ExpressionWarning) ProtoMessage() {}
+
+func (*MatchCondition) ProtoMessage() {}
+
+func (*MatchResources) ProtoMessage() {}
+
+func (*MutatingWebhook) ProtoMessage() {}
+
+func (*MutatingWebhookConfiguration) ProtoMessage() {}
+
+func (*MutatingWebhookConfigurationList) ProtoMessage() {}
+
+func (*NamedRuleWithOperations) ProtoMessage() {}
+
+func (*ParamKind) ProtoMessage() {}
+
+func (*ParamRef) ProtoMessage() {}
+
+func (*Rule) ProtoMessage() {}
+
+func (*RuleWithOperations) ProtoMessage() {}
+
+func (*ServiceReference) ProtoMessage() {}
+
+func (*TypeChecking) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicy) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
+
+func (*ValidatingWebhook) ProtoMessage() {}
+
+func (*ValidatingWebhookConfiguration) ProtoMessage() {}
+
+func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
+
+func (*Validation) ProtoMessage() {}
+
+func (*Variable) ProtoMessage() {}
+
+func (*WebhookClientConfig) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1/types.go b/operator/vendor/k8s.io/api/admissionregistration/v1/types.go
index 4efeb267..311c05c0 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1/types.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1/types.go
@@ -1098,17 +1098,18 @@ type MutatingWebhook struct {
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"`
}
-// ReinvocationPolicyType specifies what type of policy the admission hook uses.
+// ReinvocationPolicyType specifies what type of policy is used when other admission plugins also perform
+// modifications.
// +enum
type ReinvocationPolicyType string
const (
- // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a
+ // NeverReinvocationPolicy indicates that the mutation must not be called more than once in a
// single admission evaluation.
NeverReinvocationPolicy ReinvocationPolicyType = "Never"
- // IfNeededReinvocationPolicy indicates that the webhook may be called at least one
+ // IfNeededReinvocationPolicy indicates that the mutation may be called at least one
// additional time as part of the admission evaluation if the object being admitted is
- // modified by other admission plugins after the initial webhook call.
+ // modified by other admission plugins after the initial mutation call.
IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded"
)
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/admissionregistration/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..3264285c
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1/zz_generated.model_name.go
@@ -0,0 +1,157 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AuditAnnotation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.AuditAnnotation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExpressionWarning) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ExpressionWarning"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MatchCondition) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.MatchCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MatchResources) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.MatchResources"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingWebhook) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.MutatingWebhook"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingWebhookConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingWebhookConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamedRuleWithOperations) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.NamedRuleWithOperations"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParamKind) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ParamKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParamRef) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ParamRef"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Rule) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.Rule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuleWithOperations) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.RuleWithOperations"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceReference) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ServiceReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeChecking) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.TypeChecking"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBinding) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBindingSpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBindingSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyStatus) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingWebhook) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingWebhook"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingWebhookConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingWebhookConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Validation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.Validation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Variable) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.Variable"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WebhookClientConfig) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1.WebhookClientConfig"
+}
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
index 344af9ae..8cac29df 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.admissionregistration.v1alpha1
+
// +groupName=admissionregistration.k8s.io
// Package v1alpha1 is the v1alpha1 version of the API.
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
index 993ff6f2..82290439 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
@@ -24,904 +24,66 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1"
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
-func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} }
-func (*ApplyConfiguration) ProtoMessage() {}
-func (*ApplyConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{0}
-}
-func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ApplyConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplyConfiguration.Merge(m, src)
-}
-func (m *ApplyConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ApplyConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo
-
-func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
-func (*AuditAnnotation) ProtoMessage() {}
-func (*AuditAnnotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{1}
-}
-func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AuditAnnotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuditAnnotation.Merge(m, src)
-}
-func (m *AuditAnnotation) XXX_Size() int {
- return m.Size()
-}
-func (m *AuditAnnotation) XXX_DiscardUnknown() {
- xxx_messageInfo_AuditAnnotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
-
-func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
-func (*ExpressionWarning) ProtoMessage() {}
-func (*ExpressionWarning) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{2}
-}
-func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExpressionWarning) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExpressionWarning.Merge(m, src)
-}
-func (m *ExpressionWarning) XXX_Size() int {
- return m.Size()
-}
-func (m *ExpressionWarning) XXX_DiscardUnknown() {
- xxx_messageInfo_ExpressionWarning.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
-
-func (m *JSONPatch) Reset() { *m = JSONPatch{} }
-func (*JSONPatch) ProtoMessage() {}
-func (*JSONPatch) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{3}
-}
-func (m *JSONPatch) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JSONPatch) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JSONPatch.Merge(m, src)
-}
-func (m *JSONPatch) XXX_Size() int {
- return m.Size()
-}
-func (m *JSONPatch) XXX_DiscardUnknown() {
- xxx_messageInfo_JSONPatch.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_JSONPatch proto.InternalMessageInfo
-
-func (m *MatchCondition) Reset() { *m = MatchCondition{} }
-func (*MatchCondition) ProtoMessage() {}
-func (*MatchCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{4}
-}
-func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MatchCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MatchCondition.Merge(m, src)
-}
-func (m *MatchCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *MatchCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_MatchCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
-
-func (m *MatchResources) Reset() { *m = MatchResources{} }
-func (*MatchResources) ProtoMessage() {}
-func (*MatchResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{5}
-}
-func (m *MatchResources) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MatchResources) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MatchResources.Merge(m, src)
-}
-func (m *MatchResources) XXX_Size() int {
- return m.Size()
-}
-func (m *MatchResources) XXX_DiscardUnknown() {
- xxx_messageInfo_MatchResources.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MatchResources proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} }
-func (*MutatingAdmissionPolicy) ProtoMessage() {}
-func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{6}
-}
-func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} }
-func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
-func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{7}
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} }
-func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
-func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{8}
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} }
-func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
-func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{9}
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} }
-func (*MutatingAdmissionPolicyList) ProtoMessage() {}
-func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{10}
-}
-func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} }
-func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
-func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{11}
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m)
-}
+func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
-var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo
+func (m *JSONPatch) Reset() { *m = JSONPatch{} }
-func (m *Mutation) Reset() { *m = Mutation{} }
-func (*Mutation) ProtoMessage() {}
-func (*Mutation) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{12}
-}
-func (m *Mutation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Mutation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Mutation.Merge(m, src)
-}
-func (m *Mutation) XXX_Size() int {
- return m.Size()
-}
-func (m *Mutation) XXX_DiscardUnknown() {
- xxx_messageInfo_Mutation.DiscardUnknown(m)
-}
+func (m *MatchCondition) Reset() { *m = MatchCondition{} }
-var xxx_messageInfo_Mutation proto.InternalMessageInfo
+func (m *MatchResources) Reset() { *m = MatchResources{} }
-func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
-func (*NamedRuleWithOperations) ProtoMessage() {}
-func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{13}
-}
-func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedRuleWithOperations.Merge(m, src)
-}
-func (m *NamedRuleWithOperations) XXX_Size() int {
- return m.Size()
-}
-func (m *NamedRuleWithOperations) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m)
-}
+func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} }
-var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} }
-func (m *ParamKind) Reset() { *m = ParamKind{} }
-func (*ParamKind) ProtoMessage() {}
-func (*ParamKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{14}
-}
-func (m *ParamKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParamKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParamKind.Merge(m, src)
-}
-func (m *ParamKind) XXX_Size() int {
- return m.Size()
-}
-func (m *ParamKind) XXX_DiscardUnknown() {
- xxx_messageInfo_ParamKind.DiscardUnknown(m)
-}
+func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} }
-var xxx_messageInfo_ParamKind proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} }
-func (m *ParamRef) Reset() { *m = ParamRef{} }
-func (*ParamRef) ProtoMessage() {}
-func (*ParamRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{15}
-}
-func (m *ParamRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParamRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParamRef.Merge(m, src)
-}
-func (m *ParamRef) XXX_Size() int {
- return m.Size()
-}
-func (m *ParamRef) XXX_DiscardUnknown() {
- xxx_messageInfo_ParamRef.DiscardUnknown(m)
-}
+func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} }
-var xxx_messageInfo_ParamRef proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} }
-func (m *TypeChecking) Reset() { *m = TypeChecking{} }
-func (*TypeChecking) ProtoMessage() {}
-func (*TypeChecking) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{16}
-}
-func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeChecking) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeChecking.Merge(m, src)
-}
-func (m *TypeChecking) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeChecking) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeChecking.DiscardUnknown(m)
-}
+func (m *Mutation) Reset() { *m = Mutation{} }
-var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
+func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
-func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
-func (*ValidatingAdmissionPolicy) ProtoMessage() {}
-func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{17}
-}
-func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m)
-}
+func (m *ParamKind) Reset() { *m = ParamKind{} }
-var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
+func (m *ParamRef) Reset() { *m = ParamRef{} }
-func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
-func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{18}
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m)
-}
+func (m *TypeChecking) Reset() { *m = TypeChecking{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
-func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
-func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{19}
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
-func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{20}
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
-func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{21}
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
-func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
-func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{22}
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
-var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
-func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
-func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{23}
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
-func (m *Validation) Reset() { *m = Validation{} }
-func (*Validation) ProtoMessage() {}
-func (*Validation) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{24}
-}
-func (m *Validation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Validation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Validation.Merge(m, src)
-}
-func (m *Validation) XXX_Size() int {
- return m.Size()
-}
-func (m *Validation) XXX_DiscardUnknown() {
- xxx_messageInfo_Validation.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
-var xxx_messageInfo_Validation proto.InternalMessageInfo
+func (m *Validation) Reset() { *m = Validation{} }
-func (m *Variable) Reset() { *m = Variable{} }
-func (*Variable) ProtoMessage() {}
-func (*Variable) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c49182728ae0af5, []int{25}
-}
-func (m *Variable) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Variable) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Variable.Merge(m, src)
-}
-func (m *Variable) XXX_Size() int {
- return m.Size()
-}
-func (m *Variable) XXX_DiscardUnknown() {
- xxx_messageInfo_Variable.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Variable proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ApplyConfiguration")
- proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1alpha1.AuditAnnotation")
- proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExpressionWarning")
- proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1alpha1.JSONPatch")
- proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchCondition")
- proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1alpha1.MatchResources")
- proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy")
- proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding")
- proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingList")
- proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec")
- proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyList")
- proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec")
- proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Mutation")
- proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.NamedRuleWithOperations")
- proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamKind")
- proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1alpha1.ParamRef")
- proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1alpha1.TypeChecking")
- proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy")
- proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding")
- proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList")
- proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec")
- proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList")
- proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec")
- proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus")
- proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1alpha1.Validation")
- proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1alpha1.Variable")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptor_2c49182728ae0af5)
-}
-
-var fileDescriptor_2c49182728ae0af5 = []byte{
- // 1783 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xdd, 0x6f, 0x1b, 0x4b,
- 0x15, 0xcf, 0xda, 0xce, 0x87, 0xc7, 0xf9, 0xf2, 0xd0, 0x12, 0x37, 0xa5, 0xde, 0x68, 0x55, 0xa1,
- 0x46, 0x82, 0x35, 0x49, 0x0b, 0xa5, 0x55, 0x51, 0x95, 0x6d, 0x9b, 0xb6, 0x69, 0x9d, 0x44, 0x53,
- 0x94, 0x20, 0x04, 0x12, 0x93, 0xf5, 0xc4, 0xde, 0xc6, 0xfb, 0xc1, 0xce, 0x3a, 0x34, 0x02, 0x89,
- 0x4a, 0x08, 0x09, 0xde, 0x78, 0xe0, 0x85, 0x37, 0xc4, 0x1f, 0xc0, 0x03, 0xfc, 0x05, 0xbc, 0xf5,
- 0xb1, 0x8f, 0xe5, 0x81, 0x15, 0x35, 0x20, 0xf1, 0x0c, 0xd2, 0xbd, 0x52, 0x5e, 0xee, 0xd5, 0xcc,
- 0xce, 0x7e, 0x79, 0xed, 0xc6, 0x4e, 0xd3, 0xf4, 0xe1, 0xde, 0x37, 0xcf, 0xf9, 0xf8, 0x9d, 0x39,
- 0x67, 0xce, 0x9c, 0x39, 0xc7, 0x0b, 0x6e, 0x1d, 0x7c, 0x97, 0xaa, 0x86, 0x5d, 0xc3, 0x8e, 0x51,
- 0xc3, 0x0d, 0xd3, 0xa0, 0xd4, 0xb0, 0x2d, 0x97, 0x34, 0x0d, 0xea, 0xb9, 0xd8, 0x33, 0x6c, 0xab,
- 0x76, 0xb8, 0x82, 0xdb, 0x4e, 0x0b, 0xaf, 0xd4, 0x9a, 0xc4, 0x22, 0x2e, 0xf6, 0x48, 0x43, 0x75,
- 0x5c, 0xdb, 0xb3, 0xe1, 0x72, 0xa0, 0xaa, 0x62, 0xc7, 0x50, 0xfb, 0xaa, 0xaa, 0xa1, 0xea, 0xe2,
- 0x37, 0x9b, 0x86, 0xd7, 0xea, 0xec, 0xa9, 0xba, 0x6d, 0xd6, 0x9a, 0x76, 0xd3, 0xae, 0x71, 0x84,
- 0xbd, 0xce, 0x3e, 0x5f, 0xf1, 0x05, 0xff, 0x15, 0x20, 0x2f, 0x5e, 0x1f, 0x62, 0x53, 0xbd, 0xdb,
- 0x59, 0xbc, 0x11, 0x2b, 0x99, 0x58, 0x6f, 0x19, 0x16, 0x71, 0x8f, 0x6a, 0xce, 0x41, 0x93, 0x11,
- 0x68, 0xcd, 0x24, 0x1e, 0xee, 0xa7, 0x55, 0x1b, 0xa4, 0xe5, 0x76, 0x2c, 0xcf, 0x30, 0x49, 0x46,
- 0xe1, 0x3b, 0x27, 0x29, 0x50, 0xbd, 0x45, 0x4c, 0xdc, 0xab, 0xa7, 0x3c, 0x02, 0x70, 0xcd, 0x71,
- 0xda, 0x47, 0xf7, 0x6c, 0x6b, 0xdf, 0x68, 0x76, 0x02, 0x3f, 0xe0, 0x2a, 0x00, 0xe4, 0x85, 0xe3,
- 0x12, 0xee, 0x61, 0x45, 0x5a, 0x92, 0xae, 0x15, 0x35, 0xf8, 0xca, 0x97, 0xc7, 0xba, 0xbe, 0x0c,
- 0x1e, 0x44, 0x1c, 0x94, 0x90, 0x52, 0x28, 0x98, 0x5b, 0xeb, 0x34, 0x0c, 0x6f, 0xcd, 0xb2, 0x6c,
- 0x2f, 0x80, 0xb9, 0x02, 0xf2, 0x07, 0xe4, 0x48, 0xe8, 0x97, 0x84, 0x7e, 0xfe, 0x09, 0x39, 0x42,
- 0x8c, 0x0e, 0xd7, 0xc0, 0xdc, 0x21, 0x6e, 0x77, 0x48, 0x0c, 0x58, 0xc9, 0x71, 0xd1, 0x05, 0x21,
- 0x3a, 0xb7, 0x93, 0x66, 0xa3, 0x5e, 0x79, 0xa5, 0x0d, 0xca, 0xf1, 0x6a, 0x17, 0xbb, 0x96, 0x61,
- 0x35, 0xe1, 0x37, 0xc0, 0xd4, 0xbe, 0x41, 0xda, 0x0d, 0x44, 0xf6, 0x05, 0xe0, 0xbc, 0x00, 0x9c,
- 0x5a, 0x17, 0x74, 0x14, 0x49, 0xc0, 0x65, 0x30, 0xf9, 0xb3, 0x40, 0xb1, 0x92, 0xe7, 0xc2, 0x73,
- 0x42, 0x78, 0x52, 0xe0, 0xa1, 0x90, 0xaf, 0xdc, 0x05, 0xc5, 0x8d, 0x67, 0x5b, 0x9b, 0xdb, 0xd8,
- 0xd3, 0x5b, 0xa7, 0x8a, 0xd1, 0x3e, 0x98, 0xad, 0x33, 0xe5, 0x7b, 0xb6, 0xd5, 0x30, 0x78, 0x88,
- 0x96, 0x40, 0xc1, 0xc2, 0x26, 0x11, 0xfa, 0xd3, 0x42, 0xbf, 0xb0, 0x89, 0x4d, 0x82, 0x38, 0xa7,
- 0xc7, 0x4e, 0x6e, 0x28, 0x3b, 0x7f, 0x2f, 0x08, 0x43, 0x88, 0x50, 0xbb, 0xe3, 0xea, 0x84, 0xc2,
- 0x17, 0xa0, 0xcc, 0xe0, 0xa8, 0x83, 0x75, 0xf2, 0x8c, 0xb4, 0x89, 0xee, 0xd9, 0x2e, 0xb7, 0x5a,
- 0x5a, 0xbd, 0xae, 0xc6, 0x57, 0x26, 0x4a, 0x1e, 0xd5, 0x39, 0x68, 0x32, 0x02, 0x55, 0x59, 0x8e,
- 0xaa, 0x87, 0x2b, 0xea, 0x53, 0xbc, 0x47, 0xda, 0xa1, 0xaa, 0x76, 0xb1, 0xeb, 0xcb, 0xe5, 0xcd,
- 0x5e, 0x44, 0x94, 0x35, 0x02, 0x6d, 0x30, 0x6b, 0xef, 0x3d, 0x27, 0xba, 0x17, 0x99, 0xcd, 0x9d,
- 0xde, 0x2c, 0xec, 0xfa, 0xf2, 0xec, 0x56, 0x0a, 0x0e, 0xf5, 0xc0, 0xc3, 0x5f, 0x82, 0x19, 0x57,
- 0xf8, 0x8d, 0x3a, 0x6d, 0x42, 0x2b, 0xf9, 0xa5, 0xfc, 0xb5, 0xd2, 0xaa, 0xa6, 0x0e, 0x5d, 0x19,
- 0x54, 0xe6, 0x58, 0x83, 0x29, 0xef, 0x1a, 0x5e, 0x6b, 0xcb, 0x21, 0x01, 0x9f, 0x6a, 0x17, 0x45,
- 0xe0, 0x67, 0x50, 0xd2, 0x00, 0x4a, 0xdb, 0x83, 0xbf, 0x97, 0xc0, 0x05, 0xf2, 0x42, 0x6f, 0x77,
- 0x1a, 0x24, 0x25, 0x57, 0x29, 0x9c, 0xd9, 0x46, 0xbe, 0x26, 0x36, 0x72, 0xe1, 0x41, 0x1f, 0x3b,
- 0xa8, 0xaf, 0x75, 0x78, 0x1f, 0x94, 0x4c, 0x96, 0x14, 0xdb, 0x76, 0xdb, 0xd0, 0x8f, 0x2a, 0x93,
- 0x3c, 0x95, 0x94, 0xae, 0x2f, 0x97, 0xea, 0x31, 0xf9, 0xd8, 0x97, 0xe7, 0x12, 0xcb, 0xef, 0x1f,
- 0x39, 0x04, 0x25, 0xd5, 0x94, 0xff, 0x48, 0x60, 0xa1, 0xde, 0x61, 0x37, 0xdc, 0x6a, 0xae, 0x85,
- 0x9b, 0x0f, 0x78, 0xf0, 0x27, 0x60, 0x8a, 0x1d, 0x5b, 0x03, 0x7b, 0x58, 0xe4, 0xd6, 0xb7, 0x86,
- 0x3b, 0xe4, 0xe0, 0x44, 0xeb, 0xc4, 0xc3, 0x71, 0x6e, 0xc7, 0x34, 0x14, 0xa1, 0xc2, 0x16, 0x28,
- 0x50, 0x87, 0xe8, 0x22, 0x85, 0xd6, 0x47, 0x88, 0xe4, 0x80, 0x3d, 0x3f, 0x73, 0x88, 0x1e, 0xdf,
- 0x3b, 0xb6, 0x42, 0xdc, 0x82, 0xf2, 0x7f, 0x09, 0x54, 0x07, 0xe8, 0x68, 0x86, 0xd5, 0x60, 0x85,
- 0xe6, 0xc3, 0xbb, 0x6b, 0xa7, 0xdc, 0xad, 0xbf, 0xbf, 0xbb, 0x62, 0xeb, 0x03, 0xbd, 0xfe, 0x9f,
- 0x04, 0x94, 0x77, 0xab, 0x3e, 0x35, 0xa8, 0x07, 0x7f, 0x94, 0xf1, 0x5c, 0x1d, 0xf2, 0x36, 0x1b,
- 0x34, 0xf0, 0x3b, 0x2a, 0xc9, 0x21, 0x25, 0xe1, 0xb5, 0x05, 0xc6, 0x0d, 0x8f, 0x98, 0xb4, 0x92,
- 0xe3, 0xf7, 0xe5, 0xf1, 0x99, 0xb9, 0xad, 0xcd, 0x08, 0xab, 0xe3, 0x8f, 0x19, 0x3e, 0x0a, 0xcc,
- 0x28, 0x7f, 0xce, 0x9d, 0xe4, 0x34, 0x8b, 0x10, 0xab, 0xc4, 0x0e, 0x27, 0x6e, 0xc6, 0x15, 0x3b,
- 0x3a, 0xbe, 0xed, 0x88, 0x83, 0x12, 0x52, 0xf0, 0xc7, 0x60, 0xca, 0xc1, 0x2e, 0x36, 0xc3, 0xb7,
- 0x28, 0x5d, 0xf6, 0x4e, 0xf2, 0x66, 0x5b, 0xa8, 0x6a, 0xd3, 0x2c, 0x52, 0xe1, 0x0a, 0x45, 0x90,
- 0xb0, 0x03, 0x66, 0xcd, 0x54, 0x9d, 0xe7, 0x6f, 0x58, 0x69, 0xf5, 0xd6, 0x28, 0x21, 0x4b, 0x01,
- 0x04, 0x15, 0x36, 0x4d, 0x43, 0x3d, 0x46, 0x94, 0x7f, 0x4b, 0xe0, 0xf2, 0x80, 0x80, 0x9d, 0x43,
- 0x7a, 0x34, 0xd3, 0xe9, 0xa1, 0x9d, 0x41, 0x7a, 0xf4, 0xcf, 0x8b, 0x3f, 0x4e, 0x0c, 0x74, 0x93,
- 0x27, 0x04, 0x06, 0x45, 0x7e, 0x12, 0x4f, 0x0c, 0xab, 0x21, 0xfc, 0xbc, 0x31, 0xea, 0xe9, 0x32,
- 0x5d, 0x6d, 0xa6, 0xeb, 0xcb, 0xc5, 0x68, 0x89, 0x62, 0x54, 0xf8, 0x73, 0x30, 0x6f, 0x8a, 0x8e,
- 0x81, 0x01, 0x18, 0x96, 0x47, 0x45, 0x1e, 0xbd, 0xc7, 0x11, 0x5f, 0xe8, 0xfa, 0xf2, 0x7c, 0xbd,
- 0x07, 0x16, 0x65, 0x0c, 0xc1, 0x06, 0x28, 0x1e, 0x62, 0xd7, 0xc0, 0x7b, 0xf1, 0x23, 0x3a, 0x4a,
- 0xf6, 0xee, 0x08, 0x5d, 0xad, 0x2c, 0xa2, 0x5b, 0x0c, 0x29, 0x14, 0xc5, 0xc0, 0xcc, 0x8a, 0xd9,
- 0x09, 0x3a, 0xc6, 0xf0, 0x85, 0xbc, 0x3e, 0xf2, 0x91, 0xda, 0x56, 0x6c, 0x25, 0xa4, 0x50, 0x14,
- 0x03, 0xc3, 0xa7, 0x60, 0x66, 0x1f, 0x1b, 0xed, 0x8e, 0x4b, 0xc4, 0xf3, 0x37, 0xce, 0xef, 0xef,
- 0xd7, 0xd9, 0x63, 0xbe, 0x9e, 0x64, 0x1c, 0xfb, 0x72, 0x39, 0x45, 0xe0, 0x4f, 0x60, 0x5a, 0x19,
- 0xfe, 0x02, 0xcc, 0x99, 0xa9, 0x46, 0x8e, 0x56, 0x26, 0xf8, 0xce, 0x47, 0x3e, 0x95, 0x08, 0x21,
- 0xee, 0x7a, 0xd3, 0x74, 0x8a, 0x7a, 0x4d, 0xc1, 0xdf, 0x48, 0x00, 0xba, 0xc4, 0xb0, 0x0e, 0x6d,
- 0x9d, 0x43, 0xa6, 0x1e, 0xf4, 0x1f, 0x08, 0x18, 0x88, 0x32, 0x12, 0xc7, 0xbe, 0x7c, 0x7b, 0x88,
- 0x19, 0x46, 0xcd, 0x6a, 0xf2, 0x18, 0xf4, 0xb1, 0xa9, 0xfc, 0x35, 0x07, 0xa6, 0xc2, 0x78, 0xc3,
- 0x3b, 0xec, 0x3e, 0x78, 0x7a, 0x8b, 0x49, 0x8b, 0x4e, 0xb5, 0x1a, 0x1e, 0xca, 0x76, 0xc8, 0x38,
- 0x4e, 0x2e, 0x50, 0xac, 0x00, 0x7f, 0x2d, 0x01, 0x88, 0x33, 0xb3, 0x88, 0x28, 0x68, 0xdf, 0x1b,
- 0x21, 0xae, 0xd9, 0x81, 0x46, 0xfb, 0x2a, 0x0b, 0x48, 0x96, 0x8e, 0xfa, 0x18, 0x64, 0xb7, 0xfa,
- 0x39, 0xb5, 0x2d, 0xbe, 0xc7, 0x4a, 0x61, 0xe4, 0x5b, 0x1d, 0x4d, 0x08, 0xc1, 0xad, 0x8e, 0x96,
- 0x28, 0x46, 0x55, 0xde, 0x48, 0x60, 0x61, 0x40, 0x67, 0x07, 0x6f, 0xc6, 0xdd, 0x2b, 0x6f, 0xaf,
- 0x2b, 0xd2, 0x52, 0xfe, 0x5a, 0x51, 0x2b, 0x27, 0xbb, 0x4e, 0xce, 0x40, 0x69, 0x39, 0xf8, 0x2b,
- 0x96, 0x15, 0x19, 0x3c, 0x51, 0x2d, 0x6e, 0x0e, 0xe3, 0x81, 0xda, 0xa7, 0xd1, 0x5c, 0x8c, 0xd2,
- 0x29, 0xc3, 0x43, 0x7d, 0xcc, 0x29, 0x18, 0xc4, 0x85, 0x8c, 0xbd, 0x98, 0xd8, 0x31, 0x76, 0x88,
- 0xdb, 0x6f, 0x46, 0x5a, 0xdb, 0x7e, 0x2c, 0x38, 0x28, 0x21, 0xc5, 0x26, 0xa2, 0x03, 0x56, 0x4f,
- 0x73, 0xe9, 0x89, 0x88, 0x17, 0x46, 0xce, 0x51, 0xfe, 0x92, 0x03, 0xd1, 0x5b, 0x38, 0xc4, 0x00,
- 0x55, 0x03, 0xc5, 0x68, 0x28, 0x11, 0xa8, 0x51, 0xa9, 0x88, 0x06, 0x18, 0x14, 0xcb, 0xb0, 0x37,
- 0x9b, 0x86, 0xa3, 0x4a, 0xfe, 0xf4, 0xa3, 0x0a, 0x7f, 0xb3, 0xa3, 0x21, 0x25, 0x82, 0x84, 0x1e,
- 0x58, 0xe0, 0xf5, 0x9d, 0x78, 0xc4, 0xdd, 0xb4, 0xbd, 0x75, 0xbb, 0x63, 0x35, 0xd6, 0x74, 0x9e,
- 0xeb, 0x05, 0xbe, 0xbb, 0xdb, 0x5d, 0x5f, 0x5e, 0xd8, 0xee, 0x2f, 0x72, 0xec, 0xcb, 0x97, 0x07,
- 0xb0, 0xf8, 0x7d, 0x1a, 0x04, 0xad, 0xfc, 0x41, 0x02, 0xd3, 0x4c, 0xe2, 0x5e, 0x8b, 0xe8, 0x07,
- 0xac, 0x79, 0x65, 0x45, 0x84, 0xf4, 0xce, 0xce, 0x41, 0xb6, 0x95, 0x56, 0xef, 0x8c, 0x90, 0xf0,
- 0x99, 0x01, 0x3c, 0xce, 0x99, 0x0c, 0x8b, 0xa2, 0x3e, 0x36, 0x95, 0x7f, 0xe4, 0xc0, 0xa5, 0x1d,
- 0xdc, 0x36, 0x1a, 0x1f, 0x69, 0xa8, 0x78, 0x9e, 0xea, 0xb2, 0x1f, 0x8d, 0xf4, 0xc4, 0x0d, 0xd8,
- 0xf5, 0xa0, 0x06, 0x1b, 0xba, 0x60, 0x82, 0x7a, 0xd8, 0xeb, 0x84, 0x9d, 0xda, 0xc6, 0x99, 0x58,
- 0xe3, 0x88, 0xda, 0xac, 0xb0, 0x37, 0x11, 0xac, 0x91, 0xb0, 0xa4, 0x7c, 0x2a, 0x81, 0xa5, 0x81,
- 0xba, 0xe7, 0x37, 0xcc, 0xfc, 0x34, 0x15, 0xe6, 0xad, 0xb3, 0x70, 0xfc, 0xa4, 0x71, 0xe6, 0x13,
- 0x09, 0x5c, 0x3d, 0x49, 0xf9, 0x1c, 0x3a, 0x56, 0x27, 0xdd, 0xb1, 0x3e, 0x39, 0x43, 0xd7, 0x07,
- 0xb4, 0xae, 0xbf, 0xcd, 0x9f, 0xec, 0xf8, 0x97, 0x43, 0x4d, 0xea, 0x1f, 0xb2, 0x5d, 0x50, 0x3e,
- 0x14, 0x11, 0xb3, 0xad, 0xa0, 0x6a, 0x06, 0xfd, 0x68, 0x51, 0x5b, 0xee, 0xfa, 0x72, 0x79, 0xa7,
- 0x97, 0x79, 0xec, 0xcb, 0xf3, 0xbd, 0x44, 0x94, 0xc5, 0x50, 0xfe, 0x2b, 0x81, 0x2b, 0x03, 0xcf,
- 0xe2, 0x1c, 0xb2, 0xcf, 0x48, 0x67, 0xdf, 0xfd, 0x33, 0xc9, 0xbe, 0xfe, 0x69, 0xf7, 0xa7, 0x89,
- 0x77, 0xb8, 0xfa, 0x85, 0x98, 0x99, 0xda, 0xa0, 0x14, 0x67, 0x40, 0x38, 0x35, 0x7d, 0xfb, 0x14,
- 0x21, 0xb7, 0x2d, 0xed, 0x2b, 0x22, 0xc6, 0xa5, 0x98, 0x46, 0x51, 0x12, 0x3e, 0x3b, 0xd5, 0x14,
- 0xde, 0x67, 0xaa, 0x79, 0x29, 0x81, 0x79, 0x9c, 0xfe, 0x0f, 0x9f, 0x56, 0xc6, 0xb9, 0x07, 0xb7,
- 0x47, 0xe9, 0xbf, 0xd3, 0x10, 0x5a, 0x45, 0xb8, 0x31, 0xdf, 0xc3, 0xa0, 0x28, 0x63, 0xed, 0x23,
- 0x0f, 0x56, 0xa9, 0x81, 0x77, 0xf2, 0x03, 0x0d, 0xbc, 0xca, 0xdf, 0x72, 0x40, 0x3e, 0xe1, 0x29,
- 0x87, 0x1b, 0x00, 0xda, 0x7b, 0x94, 0xb8, 0x87, 0xa4, 0xf1, 0x30, 0xf8, 0x64, 0x13, 0x76, 0xd0,
- 0xf9, 0xb8, 0xbd, 0xda, 0xca, 0x48, 0xa0, 0x3e, 0x5a, 0xd0, 0x04, 0xd3, 0x5e, 0xa2, 0xf3, 0x1b,
- 0x65, 0x22, 0x10, 0x8e, 0x25, 0x1b, 0x47, 0x6d, 0xbe, 0xeb, 0xcb, 0xa9, 0x56, 0x12, 0xa5, 0xe0,
- 0xa1, 0x0e, 0x80, 0x1e, 0x9f, 0x5e, 0x70, 0x01, 0x6a, 0xc3, 0x95, 0xb3, 0xf8, 0xcc, 0xa2, 0x27,
- 0x28, 0x71, 0x5c, 0x09, 0x58, 0xe5, 0x33, 0x09, 0x80, 0xf8, 0x56, 0xc0, 0xab, 0x20, 0xf1, 0x29,
- 0x44, 0xbc, 0x62, 0x05, 0x06, 0x81, 0x12, 0x74, 0xb8, 0x0c, 0x26, 0x4d, 0x42, 0x29, 0x6e, 0x86,
- 0x73, 0x40, 0xf4, 0xa9, 0xa7, 0x1e, 0x90, 0x51, 0xc8, 0x87, 0xbb, 0x60, 0xc2, 0x25, 0x98, 0x8a,
- 0xf9, 0xb3, 0xa8, 0xdd, 0x65, 0x6d, 0x15, 0xe2, 0x94, 0x63, 0x5f, 0x5e, 0x19, 0xe6, 0xa3, 0x9e,
- 0x2a, 0xba, 0x30, 0xae, 0x84, 0x04, 0x1c, 0x7c, 0x08, 0xca, 0xc2, 0x46, 0x62, 0xc3, 0xc1, 0xad,
- 0xbd, 0x24, 0x76, 0x53, 0xae, 0xf7, 0x0a, 0xa0, 0xac, 0x8e, 0xb2, 0x01, 0xa6, 0xc2, 0xec, 0x82,
- 0x15, 0x50, 0x48, 0x3c, 0xdf, 0x81, 0xe3, 0x9c, 0xd2, 0x13, 0x98, 0x5c, 0xff, 0xc0, 0x68, 0x5b,
- 0xaf, 0xde, 0x56, 0xc7, 0x5e, 0xbf, 0xad, 0x8e, 0xbd, 0x79, 0x5b, 0x1d, 0x7b, 0xd9, 0xad, 0x4a,
- 0xaf, 0xba, 0x55, 0xe9, 0x75, 0xb7, 0x2a, 0xbd, 0xe9, 0x56, 0xa5, 0x7f, 0x76, 0xab, 0xd2, 0xef,
- 0xfe, 0x55, 0x1d, 0xfb, 0xe1, 0xf2, 0xd0, 0x1f, 0x65, 0x3f, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xac,
- 0xc8, 0x8c, 0x78, 0xc0, 0x1d, 0x00, 0x00,
-}
+func (m *Variable) Reset() { *m = Variable{} }
func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..651a01f0
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,74 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*ApplyConfiguration) ProtoMessage() {}
+
+func (*AuditAnnotation) ProtoMessage() {}
+
+func (*ExpressionWarning) ProtoMessage() {}
+
+func (*JSONPatch) ProtoMessage() {}
+
+func (*MatchCondition) ProtoMessage() {}
+
+func (*MatchResources) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicy) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyList) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
+
+func (*Mutation) ProtoMessage() {}
+
+func (*NamedRuleWithOperations) ProtoMessage() {}
+
+func (*ParamKind) ProtoMessage() {}
+
+func (*ParamRef) ProtoMessage() {}
+
+func (*TypeChecking) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicy) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
+
+func (*Validation) ProtoMessage() {}
+
+func (*Variable) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
index f183498a..459f7944 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
@@ -930,7 +930,8 @@ type JSONPatch struct {
Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
}
-// ReinvocationPolicyType specifies what type of policy the admission mutation uses.
+// ReinvocationPolicyType specifies what type of policy is used when other admission plugins also perform
+// modifications.
// +enum
type ReinvocationPolicyType = v1.ReinvocationPolicyType
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..ea43d464
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,152 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ApplyConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ApplyConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AuditAnnotation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExpressionWarning) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JSONPatch) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.JSONPatch"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MatchCondition) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MatchCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MatchResources) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MatchResources"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyBinding) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyBindingSpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBindingSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Mutation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.Mutation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamedRuleWithOperations) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParamKind) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ParamKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParamRef) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ParamRef"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeChecking) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.TypeChecking"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBinding) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBindingSpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyStatus) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Validation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.Validation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Variable) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1alpha1.Variable"
+}
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
index 40d83157..016a81fa 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.admissionregistration.v1beta1
+
// +groupName=admissionregistration.k8s.io
// Package v1beta1 is the v1beta1 version of the API.
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
index bf1ae594..8e79eae3 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
@@ -24,1164 +24,83 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_api_admissionregistration_v1 "k8s.io/api/admissionregistration/v1"
v11 "k8s.io/api/admissionregistration/v1"
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
-func (m *ApplyConfiguration) Reset() { *m = ApplyConfiguration{} }
-func (*ApplyConfiguration) ProtoMessage() {}
-func (*ApplyConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{0}
-}
-func (m *ApplyConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ApplyConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ApplyConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplyConfiguration.Merge(m, src)
-}
-func (m *ApplyConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ApplyConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplyConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplyConfiguration proto.InternalMessageInfo
-
-func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
-func (*AuditAnnotation) ProtoMessage() {}
-func (*AuditAnnotation) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{1}
-}
-func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuditAnnotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AuditAnnotation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuditAnnotation.Merge(m, src)
-}
-func (m *AuditAnnotation) XXX_Size() int {
- return m.Size()
-}
-func (m *AuditAnnotation) XXX_DiscardUnknown() {
- xxx_messageInfo_AuditAnnotation.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
-
-func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
-func (*ExpressionWarning) ProtoMessage() {}
-func (*ExpressionWarning) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{2}
-}
-func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExpressionWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExpressionWarning) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExpressionWarning.Merge(m, src)
-}
-func (m *ExpressionWarning) XXX_Size() int {
- return m.Size()
-}
-func (m *ExpressionWarning) XXX_DiscardUnknown() {
- xxx_messageInfo_ExpressionWarning.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
-
-func (m *JSONPatch) Reset() { *m = JSONPatch{} }
-func (*JSONPatch) ProtoMessage() {}
-func (*JSONPatch) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{3}
-}
-func (m *JSONPatch) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JSONPatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JSONPatch) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JSONPatch.Merge(m, src)
-}
-func (m *JSONPatch) XXX_Size() int {
- return m.Size()
-}
-func (m *JSONPatch) XXX_DiscardUnknown() {
- xxx_messageInfo_JSONPatch.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_JSONPatch proto.InternalMessageInfo
-
-func (m *MatchCondition) Reset() { *m = MatchCondition{} }
-func (*MatchCondition) ProtoMessage() {}
-func (*MatchCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{4}
-}
-func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MatchCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MatchCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MatchCondition.Merge(m, src)
-}
-func (m *MatchCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *MatchCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_MatchCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
-
-func (m *MatchResources) Reset() { *m = MatchResources{} }
-func (*MatchResources) ProtoMessage() {}
-func (*MatchResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{5}
-}
-func (m *MatchResources) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MatchResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MatchResources) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MatchResources.Merge(m, src)
-}
-func (m *MatchResources) XXX_Size() int {
- return m.Size()
-}
-func (m *MatchResources) XXX_DiscardUnknown() {
- xxx_messageInfo_MatchResources.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MatchResources proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} }
-func (*MutatingAdmissionPolicy) ProtoMessage() {}
-func (*MutatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{6}
-}
-func (m *MutatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicy.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicy proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} }
-func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
-func (*MutatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{7}
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyBinding.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyBinding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyBinding proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} }
-func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
-func (*MutatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{8}
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyBindingList.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyBindingList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyBindingList proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} }
-func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
-func (*MutatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{9}
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} }
-func (*MutatingAdmissionPolicyList) ProtoMessage() {}
-func (*MutatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{10}
-}
-func (m *MutatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicyList.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicyList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicyList proto.InternalMessageInfo
-
-func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} }
-func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
-func (*MutatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{11}
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingAdmissionPolicySpec.Merge(m, src)
-}
-func (m *MutatingAdmissionPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingAdmissionPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingAdmissionPolicySpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingAdmissionPolicySpec proto.InternalMessageInfo
-
-func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
-func (*MutatingWebhook) ProtoMessage() {}
-func (*MutatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{12}
-}
-func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingWebhook) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingWebhook.Merge(m, src)
-}
-func (m *MutatingWebhook) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingWebhook) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingWebhook.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
-
-func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
-func (*MutatingWebhookConfiguration) ProtoMessage() {}
-func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{13}
-}
-func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src)
-}
-func (m *MutatingWebhookConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
-
-func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
-func (*MutatingWebhookConfigurationList) ProtoMessage() {}
-func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{14}
-}
-func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src)
-}
-func (m *MutatingWebhookConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
-
-func (m *Mutation) Reset() { *m = Mutation{} }
-func (*Mutation) ProtoMessage() {}
-func (*Mutation) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{15}
-}
-func (m *Mutation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Mutation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Mutation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Mutation.Merge(m, src)
-}
-func (m *Mutation) XXX_Size() int {
- return m.Size()
-}
-func (m *Mutation) XXX_DiscardUnknown() {
- xxx_messageInfo_Mutation.DiscardUnknown(m)
-}
+func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
-var xxx_messageInfo_Mutation proto.InternalMessageInfo
+func (m *JSONPatch) Reset() { *m = JSONPatch{} }
-func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
-func (*NamedRuleWithOperations) ProtoMessage() {}
-func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{16}
-}
-func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamedRuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamedRuleWithOperations) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamedRuleWithOperations.Merge(m, src)
-}
-func (m *NamedRuleWithOperations) XXX_Size() int {
- return m.Size()
-}
-func (m *NamedRuleWithOperations) XXX_DiscardUnknown() {
- xxx_messageInfo_NamedRuleWithOperations.DiscardUnknown(m)
-}
+func (m *MatchCondition) Reset() { *m = MatchCondition{} }
-var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
+func (m *MatchResources) Reset() { *m = MatchResources{} }
-func (m *ParamKind) Reset() { *m = ParamKind{} }
-func (*ParamKind) ProtoMessage() {}
-func (*ParamKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{17}
-}
-func (m *ParamKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParamKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParamKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParamKind.Merge(m, src)
-}
-func (m *ParamKind) XXX_Size() int {
- return m.Size()
-}
-func (m *ParamKind) XXX_DiscardUnknown() {
- xxx_messageInfo_ParamKind.DiscardUnknown(m)
-}
+func (m *MutatingAdmissionPolicy) Reset() { *m = MutatingAdmissionPolicy{} }
-var xxx_messageInfo_ParamKind proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicyBinding) Reset() { *m = MutatingAdmissionPolicyBinding{} }
-func (m *ParamRef) Reset() { *m = ParamRef{} }
-func (*ParamRef) ProtoMessage() {}
-func (*ParamRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{18}
-}
-func (m *ParamRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParamRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParamRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParamRef.Merge(m, src)
-}
-func (m *ParamRef) XXX_Size() int {
- return m.Size()
-}
-func (m *ParamRef) XXX_DiscardUnknown() {
- xxx_messageInfo_ParamRef.DiscardUnknown(m)
-}
+func (m *MutatingAdmissionPolicyBindingList) Reset() { *m = MutatingAdmissionPolicyBindingList{} }
-var xxx_messageInfo_ParamRef proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicyBindingSpec) Reset() { *m = MutatingAdmissionPolicyBindingSpec{} }
-func (m *ServiceReference) Reset() { *m = ServiceReference{} }
-func (*ServiceReference) ProtoMessage() {}
-func (*ServiceReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{19}
-}
-func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceReference.Merge(m, src)
-}
-func (m *ServiceReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceReference.DiscardUnknown(m)
-}
+func (m *MutatingAdmissionPolicyList) Reset() { *m = MutatingAdmissionPolicyList{} }
-var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
+func (m *MutatingAdmissionPolicySpec) Reset() { *m = MutatingAdmissionPolicySpec{} }
-func (m *TypeChecking) Reset() { *m = TypeChecking{} }
-func (*TypeChecking) ProtoMessage() {}
-func (*TypeChecking) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{20}
-}
-func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeChecking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeChecking) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeChecking.Merge(m, src)
-}
-func (m *TypeChecking) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeChecking) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeChecking.DiscardUnknown(m)
-}
+func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
-var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
+func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
-func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
-func (*ValidatingAdmissionPolicy) ProtoMessage() {}
-func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{21}
-}
-func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicy.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicy.DiscardUnknown(m)
-}
+func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
-var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
+func (m *Mutation) Reset() { *m = Mutation{} }
-func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
-func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{22}
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBinding.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBinding.DiscardUnknown(m)
-}
+func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
+func (m *ParamKind) Reset() { *m = ParamKind{} }
-func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
-func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{23}
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingList.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingList.DiscardUnknown(m)
-}
+func (m *ParamRef) Reset() { *m = ParamRef{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageInfo
+func (m *ServiceReference) Reset() { *m = ServiceReference{} }
-func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
-func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{24}
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyBindingSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
-func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{25}
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyList.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
-
-func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
-func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
-func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{26}
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicySpec.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicySpec.DiscardUnknown(m)
-}
+func (m *TypeChecking) Reset() { *m = TypeChecking{} }
-var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
-func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
-func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
-func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{27}
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingAdmissionPolicyStatus.Merge(m, src)
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingAdmissionPolicyStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingAdmissionPolicyStatus.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
-var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
-
-func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
-func (*ValidatingWebhook) ProtoMessage() {}
-func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{28}
-}
-func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingWebhook) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingWebhook.Merge(m, src)
-}
-func (m *ValidatingWebhook) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingWebhook) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
-var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
-func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
-func (*ValidatingWebhookConfiguration) ProtoMessage() {}
-func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{29}
-}
-func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src)
-}
-func (m *ValidatingWebhookConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
-var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
+func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
-func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
-func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
-func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{30}
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src)
-}
-func (m *ValidatingWebhookConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m)
-}
+func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
-var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
+func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
-func (m *Validation) Reset() { *m = Validation{} }
-func (*Validation) ProtoMessage() {}
-func (*Validation) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{31}
-}
-func (m *Validation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Validation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Validation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Validation.Merge(m, src)
-}
-func (m *Validation) XXX_Size() int {
- return m.Size()
-}
-func (m *Validation) XXX_DiscardUnknown() {
- xxx_messageInfo_Validation.DiscardUnknown(m)
-}
+func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
-var xxx_messageInfo_Validation proto.InternalMessageInfo
+func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
-func (m *Variable) Reset() { *m = Variable{} }
-func (*Variable) ProtoMessage() {}
-func (*Variable) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{32}
-}
-func (m *Variable) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Variable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Variable) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Variable.Merge(m, src)
-}
-func (m *Variable) XXX_Size() int {
- return m.Size()
-}
-func (m *Variable) XXX_DiscardUnknown() {
- xxx_messageInfo_Variable.DiscardUnknown(m)
-}
+func (m *Validation) Reset() { *m = Validation{} }
-var xxx_messageInfo_Variable proto.InternalMessageInfo
+func (m *Variable) Reset() { *m = Variable{} }
-func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
-func (*WebhookClientConfig) ProtoMessage() {}
-func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_7f7c65a4f012fb19, []int{33}
-}
-func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *WebhookClientConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WebhookClientConfig.Merge(m, src)
-}
-func (m *WebhookClientConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *WebhookClientConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ApplyConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ApplyConfiguration")
- proto.RegisterType((*AuditAnnotation)(nil), "k8s.io.api.admissionregistration.v1beta1.AuditAnnotation")
- proto.RegisterType((*ExpressionWarning)(nil), "k8s.io.api.admissionregistration.v1beta1.ExpressionWarning")
- proto.RegisterType((*JSONPatch)(nil), "k8s.io.api.admissionregistration.v1beta1.JSONPatch")
- proto.RegisterType((*MatchCondition)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchCondition")
- proto.RegisterType((*MatchResources)(nil), "k8s.io.api.admissionregistration.v1beta1.MatchResources")
- proto.RegisterType((*MutatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicy")
- proto.RegisterType((*MutatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding")
- proto.RegisterType((*MutatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList")
- proto.RegisterType((*MutatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec")
- proto.RegisterType((*MutatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList")
- proto.RegisterType((*MutatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec")
- proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook")
- proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration")
- proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList")
- proto.RegisterType((*Mutation)(nil), "k8s.io.api.admissionregistration.v1beta1.Mutation")
- proto.RegisterType((*NamedRuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.NamedRuleWithOperations")
- proto.RegisterType((*ParamKind)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamKind")
- proto.RegisterType((*ParamRef)(nil), "k8s.io.api.admissionregistration.v1beta1.ParamRef")
- proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference")
- proto.RegisterType((*TypeChecking)(nil), "k8s.io.api.admissionregistration.v1beta1.TypeChecking")
- proto.RegisterType((*ValidatingAdmissionPolicy)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy")
- proto.RegisterType((*ValidatingAdmissionPolicyBinding)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding")
- proto.RegisterType((*ValidatingAdmissionPolicyBindingList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList")
- proto.RegisterType((*ValidatingAdmissionPolicyBindingSpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec")
- proto.RegisterType((*ValidatingAdmissionPolicyList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList")
- proto.RegisterType((*ValidatingAdmissionPolicySpec)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec")
- proto.RegisterType((*ValidatingAdmissionPolicyStatus)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus")
- proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook")
- proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration")
- proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList")
- proto.RegisterType((*Validation)(nil), "k8s.io.api.admissionregistration.v1beta1.Validation")
- proto.RegisterType((*Variable)(nil), "k8s.io.api.admissionregistration.v1beta1.Variable")
- proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_7f7c65a4f012fb19)
-}
-
-var fileDescriptor_7f7c65a4f012fb19 = []byte{
- // 2215 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7,
- 0x15, 0xf6, 0x92, 0x92, 0x45, 0x3e, 0xca, 0x92, 0x38, 0x71, 0x2a, 0xfa, 0x8f, 0x14, 0x16, 0x41,
- 0x21, 0x03, 0x2d, 0x59, 0x2b, 0x41, 0xe2, 0x3a, 0x29, 0x02, 0xae, 0x62, 0x3b, 0x76, 0x24, 0x59,
- 0x18, 0x39, 0x52, 0xd1, 0x26, 0x40, 0x56, 0xcb, 0x21, 0xb9, 0x11, 0xb9, 0xcb, 0xee, 0x2c, 0x65,
- 0xab, 0x05, 0xda, 0x02, 0x2d, 0x90, 0x1e, 0x0b, 0xf4, 0x52, 0xa0, 0xa7, 0xde, 0x7b, 0x69, 0xef,
- 0x05, 0x7a, 0xf4, 0x31, 0xb7, 0x1a, 0x28, 0x4a, 0x54, 0x4c, 0xd1, 0x9e, 0x7a, 0x48, 0x81, 0xf6,
- 0xa0, 0x4b, 0x8b, 0x99, 0x9d, 0xfd, 0xdf, 0x95, 0x56, 0xb2, 0x2c, 0x17, 0x85, 0x6f, 0xda, 0xf7,
- 0xe6, 0xbd, 0x37, 0xef, 0xcd, 0x9b, 0xf7, 0xbe, 0x79, 0x22, 0xdc, 0xdc, 0xb9, 0x49, 0xeb, 0xba,
- 0xd9, 0x50, 0x07, 0x7a, 0x43, 0x6d, 0xf5, 0x75, 0x4a, 0x75, 0xd3, 0xb0, 0x48, 0x47, 0xa7, 0xb6,
- 0xa5, 0xda, 0xba, 0x69, 0x34, 0x76, 0x6f, 0x6c, 0x13, 0x5b, 0xbd, 0xd1, 0xe8, 0x10, 0x83, 0x58,
- 0xaa, 0x4d, 0x5a, 0xf5, 0x81, 0x65, 0xda, 0x26, 0x5a, 0x74, 0x24, 0xeb, 0xea, 0x40, 0xaf, 0x27,
- 0x4a, 0xd6, 0x85, 0xe4, 0xe5, 0xaf, 0x77, 0x74, 0xbb, 0x3b, 0xdc, 0xae, 0x6b, 0x66, 0xbf, 0xd1,
- 0x31, 0x3b, 0x66, 0x83, 0x2b, 0xd8, 0x1e, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x39, 0x8a, 0x2f,
- 0xbf, 0x9e, 0x61, 0x4b, 0xd1, 0xdd, 0x5c, 0x7e, 0xc3, 0x17, 0xea, 0xab, 0x5a, 0x57, 0x37, 0x88,
- 0xb5, 0xd7, 0x18, 0xec, 0x74, 0x18, 0x81, 0x36, 0xfa, 0xc4, 0x56, 0x93, 0xa4, 0x1a, 0x69, 0x52,
- 0xd6, 0xd0, 0xb0, 0xf5, 0x3e, 0x89, 0x09, 0xbc, 0x79, 0x94, 0x00, 0xd5, 0xba, 0xa4, 0xaf, 0x46,
- 0xe5, 0xe4, 0xf7, 0x01, 0x35, 0x07, 0x83, 0xde, 0xde, 0xb2, 0x69, 0xb4, 0xf5, 0xce, 0xd0, 0xf1,
- 0x03, 0x2d, 0x01, 0x90, 0xc7, 0x03, 0x8b, 0x70, 0x0f, 0x2b, 0xd2, 0x82, 0xb4, 0x58, 0x54, 0xd0,
- 0x93, 0x51, 0xed, 0xdc, 0x78, 0x54, 0x83, 0xdb, 0x1e, 0x07, 0x07, 0x56, 0xc9, 0x14, 0x66, 0x9b,
- 0xc3, 0x96, 0x6e, 0x37, 0x0d, 0xc3, 0xb4, 0x1d, 0x35, 0xd7, 0x20, 0xbf, 0x43, 0xf6, 0x84, 0x7c,
- 0x49, 0xc8, 0xe7, 0x3f, 0x20, 0x7b, 0x98, 0xd1, 0x51, 0x13, 0x66, 0x77, 0xd5, 0xde, 0x90, 0xf8,
- 0x0a, 0x2b, 0x39, 0xbe, 0x74, 0x5e, 0x2c, 0x9d, 0xdd, 0x0c, 0xb3, 0x71, 0x74, 0xbd, 0xdc, 0x83,
- 0xb2, 0xff, 0xb5, 0xa5, 0x5a, 0x86, 0x6e, 0x74, 0xd0, 0xd7, 0xa0, 0xd0, 0xd6, 0x49, 0xaf, 0x85,
- 0x49, 0x5b, 0x28, 0x9c, 0x13, 0x0a, 0x0b, 0x77, 0x04, 0x1d, 0x7b, 0x2b, 0xd0, 0x75, 0x98, 0x7a,
- 0xe4, 0x08, 0x56, 0xf2, 0x7c, 0xf1, 0xac, 0x58, 0x3c, 0x25, 0xf4, 0x61, 0x97, 0x2f, 0xbf, 0x0b,
- 0xc5, 0xfb, 0x1b, 0x0f, 0xd6, 0xd6, 0x55, 0x5b, 0xeb, 0x9e, 0x28, 0x46, 0x6d, 0x98, 0x59, 0x65,
- 0xc2, 0xcb, 0xa6, 0xd1, 0xd2, 0x79, 0x88, 0x16, 0x60, 0xc2, 0x50, 0xfb, 0x44, 0xc8, 0x4f, 0x0b,
- 0xf9, 0x89, 0x35, 0xb5, 0x4f, 0x30, 0xe7, 0x44, 0xec, 0xe4, 0x32, 0xd9, 0xf9, 0xe3, 0x84, 0x30,
- 0x84, 0x09, 0x35, 0x87, 0x96, 0x46, 0x28, 0x7a, 0x0c, 0x65, 0xa6, 0x8e, 0x0e, 0x54, 0x8d, 0x6c,
- 0x90, 0x1e, 0xd1, 0x6c, 0xd3, 0xe2, 0x56, 0x4b, 0x4b, 0xaf, 0xd7, 0xfd, 0x1b, 0xe3, 0x25, 0x4f,
- 0x7d, 0xb0, 0xd3, 0x61, 0x04, 0x5a, 0x67, 0x39, 0x5a, 0xdf, 0xbd, 0x51, 0x5f, 0x51, 0xb7, 0x49,
- 0xcf, 0x15, 0x55, 0x5e, 0x1d, 0x8f, 0x6a, 0xe5, 0xb5, 0xa8, 0x46, 0x1c, 0x37, 0x82, 0x4c, 0x98,
- 0x31, 0xb7, 0x3f, 0x25, 0x9a, 0xed, 0x99, 0xcd, 0x9d, 0xdc, 0x2c, 0x1a, 0x8f, 0x6a, 0x33, 0x0f,
- 0x42, 0xea, 0x70, 0x44, 0x3d, 0xfa, 0x21, 0x5c, 0xb0, 0x84, 0xdf, 0x78, 0xd8, 0x23, 0xb4, 0x92,
- 0x5f, 0xc8, 0x2f, 0x96, 0x96, 0x9a, 0xf5, 0xac, 0x85, 0xa1, 0xce, 0xfc, 0x6a, 0x31, 0xd9, 0x2d,
- 0xdd, 0xee, 0x3e, 0x18, 0x10, 0x87, 0x4d, 0x95, 0x57, 0x45, 0xdc, 0x2f, 0xe0, 0xa0, 0x7e, 0x1c,
- 0x36, 0x87, 0x7e, 0x21, 0xc1, 0x45, 0xf2, 0x58, 0xeb, 0x0d, 0x5b, 0x24, 0xb4, 0xae, 0x32, 0x71,
- 0x5a, 0xfb, 0xb8, 0x2a, 0xf6, 0x71, 0xf1, 0x76, 0x82, 0x19, 0x9c, 0x68, 0x1c, 0xbd, 0x07, 0xa5,
- 0x3e, 0x4b, 0x89, 0x75, 0xb3, 0xa7, 0x6b, 0x7b, 0x95, 0x29, 0x9e, 0x48, 0xf2, 0x78, 0x54, 0x2b,
- 0xad, 0xfa, 0xe4, 0x83, 0x51, 0x6d, 0x36, 0xf0, 0xf9, 0x70, 0x6f, 0x40, 0x70, 0x50, 0x4c, 0xfe,
- 0xab, 0x04, 0xf3, 0xab, 0x43, 0x76, 0xbf, 0x8d, 0x4e, 0xd3, 0xdd, 0xbb, 0xc3, 0x43, 0x9f, 0x40,
- 0x81, 0x1d, 0x5a, 0x4b, 0xb5, 0x55, 0x91, 0x59, 0xdf, 0xc8, 0x76, 0xc4, 0xce, 0x79, 0xae, 0x12,
- 0x5b, 0xf5, 0x33, 0xdb, 0xa7, 0x61, 0x4f, 0x2b, 0xea, 0xc0, 0x04, 0x1d, 0x10, 0x4d, 0x24, 0xd0,
- 0xed, 0xec, 0x81, 0x4c, 0xd9, 0xf2, 0xc6, 0x80, 0x68, 0xfe, 0xa5, 0x63, 0x5f, 0x98, 0x1b, 0x90,
- 0xff, 0x29, 0x41, 0x35, 0x45, 0x46, 0xd1, 0x8d, 0x16, 0xab, 0x32, 0xcf, 0xdf, 0x5b, 0x23, 0xe4,
- 0xed, 0xca, 0x33, 0x7b, 0x2b, 0x76, 0x9e, 0xea, 0xf4, 0x97, 0x12, 0xc8, 0x87, 0x8b, 0xae, 0xe8,
- 0xd4, 0x46, 0x1f, 0xc5, 0x1c, 0xaf, 0x67, 0xbc, 0xc9, 0x3a, 0x75, 0xdc, 0xf6, 0xca, 0xb1, 0x4b,
- 0x09, 0x38, 0xdd, 0x87, 0x49, 0xdd, 0x26, 0x7d, 0x5a, 0xc9, 0xf1, 0xcb, 0xf2, 0xfe, 0x69, 0x79,
- 0xad, 0x5c, 0x10, 0x46, 0x27, 0xef, 0x31, 0xf5, 0xd8, 0xb1, 0x22, 0xff, 0x26, 0x77, 0x94, 0xcf,
- 0x2c, 0x40, 0xac, 0x08, 0x0f, 0x38, 0x71, 0xcd, 0x2f, 0xd6, 0xde, 0xe1, 0xad, 0x7b, 0x1c, 0x1c,
- 0x58, 0xc5, 0xe2, 0x34, 0x50, 0x2d, 0xb5, 0xef, 0xb6, 0xa1, 0xd2, 0xd2, 0x52, 0x76, 0x67, 0xd6,
- 0x85, 0xa4, 0x32, 0xcd, 0xe2, 0xe4, 0x7e, 0x61, 0x4f, 0x23, 0xb2, 0x61, 0xa6, 0x1f, 0xaa, 0xf0,
- 0xbc, 0x7b, 0x95, 0x96, 0x6e, 0x1e, 0x23, 0x60, 0x21, 0x79, 0xa7, 0xb4, 0x86, 0x69, 0x38, 0x62,
- 0x43, 0xfe, 0x42, 0x82, 0x2b, 0x29, 0xe1, 0x3a, 0x83, 0xdc, 0x68, 0x87, 0x73, 0xa3, 0xf9, 0xec,
- 0xb9, 0x91, 0x9c, 0x14, 0xbf, 0x3a, 0x9f, 0xea, 0x25, 0xcf, 0x86, 0x4f, 0xa0, 0xc8, 0xcf, 0xe1,
- 0x03, 0xdd, 0x68, 0x25, 0xf4, 0xd0, 0x2c, 0x47, 0xcb, 0x44, 0x95, 0x0b, 0xe3, 0x51, 0xad, 0xe8,
- 0x7d, 0x62, 0x5f, 0x29, 0xfa, 0x3e, 0xcc, 0xf5, 0x05, 0x50, 0x60, 0xf2, 0xba, 0x61, 0x53, 0x91,
- 0x43, 0x27, 0x3f, 0xdf, 0x8b, 0xe3, 0x51, 0x6d, 0x6e, 0x35, 0xa2, 0x15, 0xc7, 0xec, 0x20, 0x0d,
- 0x8a, 0xbb, 0xaa, 0xa5, 0xab, 0xdb, 0x7e, 0xeb, 0x3c, 0x46, 0xe2, 0x6e, 0x0a, 0x51, 0xa5, 0x2c,
- 0x42, 0x5b, 0x74, 0x29, 0x14, 0xfb, 0x7a, 0x99, 0x91, 0xfe, 0xd0, 0x81, 0x89, 0x6e, 0x5f, 0x5c,
- 0x3a, 0xee, 0x71, 0x9a, 0x86, 0x6f, 0xc4, 0xa5, 0x50, 0xec, 0xeb, 0x45, 0x2b, 0x70, 0xa1, 0xad,
- 0xea, 0xbd, 0xa1, 0x45, 0x44, 0xd3, 0x9b, 0xe4, 0x17, 0xf7, 0xab, 0xac, 0x83, 0xdf, 0x09, 0x32,
- 0x0e, 0x46, 0xb5, 0x72, 0x88, 0xc0, 0x1b, 0x5f, 0x58, 0x18, 0xfd, 0x00, 0x66, 0xfb, 0x21, 0xf0,
- 0x46, 0x2b, 0xe7, 0xf9, 0xc6, 0x8f, 0x7b, 0x24, 0x9e, 0x02, 0x1f, 0xe8, 0x86, 0xe9, 0x14, 0x47,
- 0x2d, 0xa1, 0x9f, 0x49, 0x80, 0x2c, 0xa2, 0x1b, 0xbb, 0xa6, 0xc6, 0x35, 0x86, 0xba, 0xf8, 0xb7,
- 0x85, 0x1a, 0x84, 0x63, 0x2b, 0x0e, 0x46, 0xb5, 0x5b, 0x19, 0x9e, 0x2d, 0xf5, 0xb8, 0x24, 0x0f,
- 0x41, 0x82, 0x4d, 0xf9, 0x6f, 0x05, 0x98, 0x75, 0x6f, 0xc7, 0x16, 0xd9, 0xee, 0x9a, 0xe6, 0x4e,
- 0x06, 0x18, 0xfb, 0x08, 0xa6, 0xb5, 0x9e, 0x4e, 0x0c, 0xdb, 0x79, 0x69, 0x88, 0x6c, 0xfe, 0x56,
- 0xf6, 0xd0, 0x09, 0x53, 0xcb, 0x01, 0x25, 0xca, 0x45, 0x61, 0x68, 0x3a, 0x48, 0xc5, 0x21, 0x43,
- 0xe8, 0x23, 0x98, 0xb4, 0x02, 0x28, 0xf0, 0xad, 0x2c, 0x16, 0xeb, 0x09, 0x98, 0xcb, 0x2b, 0x15,
- 0x0e, 0xc8, 0x72, 0x94, 0xc6, 0x53, 0x6c, 0xe2, 0x59, 0x52, 0x2c, 0x82, 0xd1, 0x8a, 0x27, 0xc2,
- 0x68, 0xc9, 0x50, 0x7f, 0xf2, 0xc5, 0x40, 0xfd, 0xd2, 0xf3, 0x85, 0xfa, 0xef, 0x41, 0x89, 0xea,
- 0x2d, 0x72, 0xbb, 0xdd, 0x26, 0x9a, 0xcd, 0xee, 0xa3, 0x17, 0xb0, 0x0d, 0x9f, 0xcc, 0x02, 0xe6,
- 0x7f, 0x2e, 0xf7, 0x54, 0x4a, 0x71, 0x50, 0x0c, 0xdd, 0x82, 0x19, 0xf6, 0x46, 0x36, 0x87, 0xf6,
- 0x06, 0xd1, 0x4c, 0xa3, 0x45, 0xf9, 0xbd, 0x9a, 0x74, 0x76, 0xf0, 0x30, 0xc4, 0xc1, 0x91, 0x95,
- 0xe8, 0x43, 0x98, 0xf7, 0xb2, 0x08, 0x93, 0x5d, 0x9d, 0x3c, 0xda, 0x24, 0x16, 0xe5, 0xd5, 0xa1,
- 0xb0, 0x90, 0x5f, 0x2c, 0x2a, 0x57, 0xc6, 0xa3, 0xda, 0x7c, 0x33, 0x79, 0x09, 0x4e, 0x93, 0x45,
- 0x3f, 0x4d, 0xbe, 0xef, 0xc0, 0x1d, 0x7c, 0x78, 0x56, 0x77, 0x3d, 0xa9, 0xe6, 0x4d, 0x9f, 0x55,
- 0xcd, 0x93, 0xff, 0x2c, 0xc1, 0xd5, 0x48, 0xa1, 0x09, 0x8f, 0x29, 0x9e, 0x3f, 0x04, 0xff, 0x2e,
- 0x14, 0x84, 0x65, 0x17, 0x74, 0x7c, 0xf3, 0xf8, 0xa0, 0x43, 0x68, 0x50, 0x26, 0x98, 0x29, 0xec,
- 0x29, 0x94, 0xff, 0x21, 0xc1, 0xc2, 0x61, 0xfe, 0x9d, 0x01, 0xa2, 0xda, 0x09, 0x23, 0xaa, 0x3b,
- 0x27, 0x76, 0x2e, 0xb4, 0xf1, 0x14, 0x58, 0xf5, 0xdb, 0x1c, 0x14, 0xdc, 0x3e, 0x8d, 0xde, 0x61,
- 0x18, 0xca, 0xd6, 0xba, 0x2c, 0xf5, 0xc4, 0x54, 0xa3, 0xea, 0x36, 0xf3, 0x75, 0x97, 0x71, 0x10,
- 0xfc, 0xc0, 0xbe, 0x00, 0xbf, 0x1e, 0x6a, 0x6c, 0x6e, 0x25, 0x20, 0xf0, 0x3b, 0xd9, 0xbd, 0x88,
- 0xcf, 0xbe, 0x94, 0xaf, 0xb0, 0xcb, 0x15, 0xa7, 0xe3, 0x04, 0x7b, 0x0c, 0x08, 0x7e, 0x4a, 0x4d,
- 0x83, 0x6f, 0x91, 0x57, 0xfe, 0x63, 0x01, 0x41, 0x6f, 0x96, 0xe4, 0x00, 0x41, 0xef, 0x13, 0xfb,
- 0x4a, 0xe5, 0xa7, 0x12, 0xcc, 0xa7, 0x4c, 0x01, 0xd0, 0x5b, 0xfe, 0x9c, 0x83, 0x57, 0xe7, 0x8a,
- 0xc4, 0x0b, 0x4e, 0x39, 0x38, 0xa0, 0xe0, 0x0c, 0x1c, 0x5e, 0x87, 0x7e, 0xc2, 0x8a, 0x4b, 0x4c,
- 0x9f, 0x68, 0xc9, 0x27, 0x6e, 0x90, 0x97, 0x3d, 0x14, 0x12, 0xe3, 0xe1, 0x04, 0x73, 0xb2, 0x0a,
- 0x3e, 0xf6, 0x65, 0x0f, 0x2c, 0x75, 0xa0, 0x8b, 0xf2, 0x17, 0x7d, 0x60, 0x35, 0xd7, 0xef, 0x09,
- 0x0e, 0x0e, 0xac, 0x62, 0xa0, 0x63, 0x87, 0x21, 0xf0, 0x5c, 0x18, 0x74, 0x70, 0x2c, 0xcd, 0x39,
- 0xf2, 0xef, 0x72, 0xe0, 0xbd, 0x9d, 0x32, 0x60, 0x94, 0x06, 0x14, 0xbd, 0x9e, 0x26, 0xb4, 0x7a,
- 0x00, 0xd3, 0xeb, 0x7f, 0xd8, 0x5f, 0x83, 0x3e, 0x86, 0x02, 0x75, 0x3b, 0x5d, 0xfe, 0xe4, 0x9d,
- 0x8e, 0xbf, 0xf1, 0xbc, 0x1e, 0xe7, 0xa9, 0x44, 0x36, 0xcc, 0xf3, 0x27, 0x01, 0xb1, 0x89, 0xb5,
- 0x66, 0xda, 0x77, 0xcc, 0xa1, 0xd1, 0x6a, 0x6a, 0x3c, 0xd3, 0x1d, 0x98, 0x71, 0x8b, 0xf5, 0x96,
- 0xf5, 0xe4, 0x25, 0x07, 0xa3, 0xda, 0x95, 0x14, 0x16, 0xbf, 0x4d, 0x69, 0xaa, 0xe5, 0x5f, 0x4b,
- 0x30, 0xb7, 0x41, 0xac, 0x5d, 0x5d, 0x23, 0x98, 0xb4, 0x89, 0x45, 0x0c, 0x2d, 0x12, 0x1a, 0x29,
- 0x43, 0x68, 0xdc, 0x68, 0xe7, 0x52, 0xa3, 0x7d, 0x15, 0x26, 0x06, 0xaa, 0xdd, 0x15, 0x53, 0xd7,
- 0x02, 0xe3, 0xae, 0xab, 0x76, 0x17, 0x73, 0x2a, 0xe7, 0x9a, 0x96, 0xcd, 0x1d, 0x9d, 0x14, 0x5c,
- 0xd3, 0xb2, 0x31, 0xa7, 0xca, 0xbf, 0x94, 0x60, 0x9a, 0x79, 0xb1, 0xdc, 0x25, 0xda, 0x8e, 0x6e,
- 0x74, 0xd0, 0x67, 0x12, 0x20, 0x12, 0x9d, 0x04, 0x3b, 0x37, 0xa2, 0xb4, 0xf4, 0x76, 0xf6, 0x3b,
- 0x19, 0x9b, 0x26, 0xfb, 0x69, 0x1d, 0x63, 0x51, 0x9c, 0x60, 0x52, 0xfe, 0x53, 0x0e, 0x2e, 0x6d,
- 0xaa, 0x3d, 0xbd, 0xf5, 0x82, 0x66, 0x64, 0x7a, 0x68, 0x6a, 0x74, 0xf7, 0x38, 0x2f, 0xb7, 0x94,
- 0x4d, 0xa7, 0x0d, 0x8c, 0xd0, 0xf7, 0xe0, 0x3c, 0xb5, 0x55, 0x7b, 0xe8, 0xce, 0x1e, 0xee, 0x9d,
- 0x86, 0x31, 0xae, 0x50, 0x99, 0x11, 0xe6, 0xce, 0x3b, 0xdf, 0x58, 0x18, 0x92, 0xff, 0x2d, 0xc1,
- 0x42, 0xaa, 0xec, 0xd9, 0x8d, 0xe6, 0x06, 0xa1, 0x20, 0xaf, 0x9d, 0x82, 0xdf, 0x47, 0x0d, 0xe7,
- 0xfe, 0x25, 0xc1, 0x6b, 0x47, 0x09, 0x9f, 0x01, 0x60, 0x30, 0xc3, 0x80, 0xe1, 0xfe, 0xe9, 0x79,
- 0x9e, 0x02, 0x1a, 0x3e, 0xcb, 0x1f, 0xed, 0xf7, 0xcb, 0x11, 0x5d, 0xe0, 0x1f, 0x3d, 0x5b, 0x50,
- 0xde, 0x15, 0xf1, 0x32, 0x0d, 0xa7, 0xa4, 0x3b, 0x13, 0x96, 0xa2, 0x72, 0x9d, 0x3d, 0xe4, 0x36,
- 0xa3, 0xcc, 0x83, 0x51, 0x6d, 0x2e, 0x4a, 0xc4, 0x71, 0x1d, 0xf2, 0xdf, 0x25, 0xb8, 0x96, 0x7a,
- 0x12, 0x67, 0x90, 0x7a, 0xdd, 0x70, 0xea, 0x2d, 0x9f, 0x46, 0xea, 0xa5, 0xce, 0xff, 0xae, 0x1d,
- 0x5a, 0x0d, 0xff, 0xcf, 0x27, 0x80, 0x3b, 0x50, 0xf2, 0x8f, 0xdf, 0x1d, 0x9c, 0xbc, 0x71, 0xfc,
- 0x78, 0x9b, 0x86, 0xf2, 0x8a, 0x08, 0x70, 0xc9, 0xa7, 0x51, 0x1c, 0xd4, 0x7e, 0xca, 0x13, 0x94,
- 0x1f, 0xc1, 0x9c, 0x1a, 0xfe, 0x2f, 0x34, 0xad, 0x4c, 0x1e, 0xf7, 0xe1, 0x16, 0xf9, 0x3f, 0xb6,
- 0x52, 0x11, 0x4e, 0xcc, 0x45, 0x18, 0x14, 0xc7, 0x8c, 0xbd, 0xd8, 0x29, 0x61, 0x68, 0x74, 0x3b,
- 0xf5, 0x7c, 0x46, 0xb7, 0xf2, 0x1f, 0x72, 0x50, 0x3b, 0xa2, 0x7d, 0xa3, 0xfb, 0x80, 0xcc, 0x6d,
- 0x4a, 0xac, 0x5d, 0xd2, 0xba, 0xeb, 0xfc, 0xe2, 0xc0, 0x85, 0xf5, 0x79, 0x1f, 0x50, 0x3d, 0x88,
- 0xad, 0xc0, 0x09, 0x52, 0xa8, 0x07, 0xd3, 0x76, 0x00, 0xea, 0x89, 0x5b, 0xf0, 0x66, 0x76, 0xbf,
- 0x82, 0x40, 0x51, 0x99, 0x1b, 0x8f, 0x6a, 0x21, 0xe8, 0x88, 0x43, 0xda, 0x91, 0x06, 0xa0, 0xf9,
- 0x47, 0xe7, 0xa4, 0x7e, 0x23, 0x5b, 0x15, 0xf3, 0x4f, 0xcc, 0xeb, 0x3b, 0x81, 0xc3, 0x0a, 0xa8,
- 0x95, 0xf7, 0xa7, 0xa0, 0xec, 0x87, 0xf0, 0xe5, 0x10, 0xf5, 0xe5, 0x10, 0xf5, 0xd0, 0x21, 0x2a,
- 0xbc, 0x1c, 0xa2, 0x9e, 0x68, 0x88, 0x9a, 0x50, 0x8b, 0x4b, 0x67, 0x36, 0xbd, 0xdc, 0x97, 0xa0,
- 0x1a, 0xbb, 0xe3, 0x67, 0x3d, 0xbf, 0xfc, 0x38, 0x36, 0xbf, 0x7c, 0xfb, 0x24, 0xb0, 0x29, 0x6d,
- 0x82, 0xf9, 0xa5, 0x04, 0xf2, 0xe1, 0x3e, 0xfe, 0x4f, 0xff, 0x62, 0xe0, 0xf0, 0xad, 0xa7, 0x80,
- 0xc3, 0xff, 0x48, 0x00, 0x3e, 0x98, 0x41, 0xaf, 0x41, 0xe0, 0x47, 0x58, 0xa2, 0x74, 0x3b, 0x61,
- 0x0a, 0xd0, 0xd1, 0x75, 0x98, 0xea, 0x13, 0x4a, 0xd5, 0x8e, 0x3b, 0x10, 0xf1, 0x7e, 0x64, 0xb6,
- 0xea, 0x90, 0xb1, 0xcb, 0x47, 0x5b, 0x70, 0xde, 0x22, 0x2a, 0x15, 0xd3, 0xcc, 0xa2, 0xf2, 0x2e,
- 0x7b, 0x05, 0x63, 0x4e, 0x39, 0x18, 0xd5, 0x6e, 0x64, 0xf9, 0x39, 0x61, 0x5d, 0x3c, 0x9a, 0xb9,
- 0x10, 0x16, 0xea, 0xd0, 0x5d, 0x28, 0x0b, 0x1b, 0x81, 0x0d, 0x3b, 0x95, 0xf6, 0x92, 0xd8, 0x4d,
- 0x79, 0x35, 0xba, 0x00, 0xc7, 0x65, 0xe4, 0xfb, 0x50, 0x70, 0x81, 0x01, 0xaa, 0xc0, 0x44, 0xe0,
- 0xbd, 0xe5, 0x38, 0xce, 0x29, 0x91, 0xc0, 0xe4, 0x92, 0x03, 0x23, 0xff, 0x5e, 0x82, 0x57, 0x12,
- 0x9a, 0x12, 0xba, 0x04, 0xf9, 0xa1, 0xd5, 0x13, 0x21, 0x98, 0x1a, 0x8f, 0x6a, 0xf9, 0x0f, 0xf1,
- 0x0a, 0x66, 0x34, 0xa4, 0xc2, 0x14, 0x75, 0xc6, 0x53, 0x22, 0x99, 0x6e, 0x65, 0x3f, 0xf1, 0xe8,
- 0x5c, 0x4b, 0x29, 0xb1, 0x33, 0x70, 0xa9, 0xae, 0x5e, 0xb4, 0x08, 0x05, 0x4d, 0x55, 0x86, 0x46,
- 0xab, 0xe7, 0x9c, 0xd7, 0xb4, 0xf3, 0xc6, 0x5b, 0x6e, 0x3a, 0x34, 0xec, 0x71, 0x95, 0xb5, 0x27,
- 0xfb, 0xd5, 0x73, 0x9f, 0xef, 0x57, 0xcf, 0x3d, 0xdd, 0xaf, 0x9e, 0xfb, 0xf1, 0xb8, 0x2a, 0x3d,
- 0x19, 0x57, 0xa5, 0xcf, 0xc7, 0x55, 0xe9, 0xe9, 0xb8, 0x2a, 0xfd, 0x65, 0x5c, 0x95, 0x7e, 0xfe,
- 0x45, 0xf5, 0xdc, 0x77, 0x16, 0xb3, 0xfe, 0x98, 0xf5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x13,
- 0x7c, 0x49, 0xa4, 0xf7, 0x2a, 0x00, 0x00,
-}
+func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
func (m *ApplyConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
index fb47a200..d184664e 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
@@ -815,7 +815,6 @@ message TypeChecking {
repeated ExpressionWarning expressionWarnings = 1;
}
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..67b85ac6
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,90 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*ApplyConfiguration) ProtoMessage() {}
+
+func (*AuditAnnotation) ProtoMessage() {}
+
+func (*ExpressionWarning) ProtoMessage() {}
+
+func (*JSONPatch) ProtoMessage() {}
+
+func (*MatchCondition) ProtoMessage() {}
+
+func (*MatchResources) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicy) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyBinding) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyBindingList) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicyList) ProtoMessage() {}
+
+func (*MutatingAdmissionPolicySpec) ProtoMessage() {}
+
+func (*MutatingWebhook) ProtoMessage() {}
+
+func (*MutatingWebhookConfiguration) ProtoMessage() {}
+
+func (*MutatingWebhookConfigurationList) ProtoMessage() {}
+
+func (*Mutation) ProtoMessage() {}
+
+func (*NamedRuleWithOperations) ProtoMessage() {}
+
+func (*ParamKind) ProtoMessage() {}
+
+func (*ParamRef) ProtoMessage() {}
+
+func (*ServiceReference) ProtoMessage() {}
+
+func (*TypeChecking) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicy) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
+
+func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
+
+func (*ValidatingWebhook) ProtoMessage() {}
+
+func (*ValidatingWebhookConfiguration) ProtoMessage() {}
+
+func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
+
+func (*Validation) ProtoMessage() {}
+
+func (*Variable) ProtoMessage() {}
+
+func (*WebhookClientConfig) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
index cffdda82..c7259d3d 100644
--- a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/types.go
@@ -26,6 +26,7 @@ import (
type Rule = v1.Rule
// ScopeType specifies a scope for a Rule.
+// +enum
type ScopeType = v1.ScopeType
const (
@@ -87,7 +88,6 @@ const (
SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun"
)
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -1072,16 +1072,18 @@ type MutatingWebhook struct {
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"`
}
-// ReinvocationPolicyType specifies what type of policy the admission hook uses.
+// ReinvocationPolicyType specifies what type of policy is used when other admission plugins also perform
+// modifications.
+// +enum
type ReinvocationPolicyType = v1.ReinvocationPolicyType
const (
- // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a
+ // NeverReinvocationPolicy indicates that the mutation must not be called more than once in a
// single admission evaluation.
NeverReinvocationPolicy ReinvocationPolicyType = "Never"
- // IfNeededReinvocationPolicy indicates that the webhook may be called at least one
+ // IfNeededReinvocationPolicy indicates that the mutation may be called at least one
// additional time as part of the admission evaluation if the object being admitted is
- // modified by other admission plugins after the initial webhook call.
+ // modified by other admission plugins after the initial mutation call.
IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded"
)
diff --git a/operator/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..f747248e
--- /dev/null
+++ b/operator/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,192 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ApplyConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ApplyConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AuditAnnotation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.AuditAnnotation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExpressionWarning) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ExpressionWarning"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JSONPatch) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.JSONPatch"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MatchCondition) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MatchCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MatchResources) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MatchResources"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyBinding) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyBindingSpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBindingSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingAdmissionPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingWebhook) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingWebhook"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingWebhookConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MutatingWebhookConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Mutation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.Mutation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamedRuleWithOperations) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParamKind) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ParamKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParamRef) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ParamRef"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceReference) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ServiceReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeChecking) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.TypeChecking"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBinding) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyBindingSpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingAdmissionPolicyStatus) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingWebhook) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingWebhook"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingWebhookConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ValidatingWebhookConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Validation) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.Validation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Variable) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.Variable"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WebhookClientConfig) OpenAPIModelName() string {
+ return "io.k8s.api.admissionregistration.v1beta1.WebhookClientConfig"
+}
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2/doc.go b/operator/vendor/k8s.io/api/apidiscovery/v2/doc.go
index f46d33e9..9d30b342 100644
--- a/operator/vendor/k8s.io/api/apidiscovery/v2/doc.go
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.apidiscovery.v2
+
// +groupName=apidiscovery.k8s.io
package v2
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go b/operator/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go
index 5c37feaa..5056c8a4 100644
--- a/operator/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2/generated.pb.go
@@ -24,227 +24,22 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} }
-func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} }
-func (*APIGroupDiscovery) ProtoMessage() {}
-func (*APIGroupDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_e0b7287280068d8f, []int{0}
-}
-func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroupDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroupDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroupDiscovery.Merge(m, src)
-}
-func (m *APIGroupDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroupDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroupDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo
-
-func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} }
-func (*APIGroupDiscoveryList) ProtoMessage() {}
-func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) {
- return fileDescriptor_e0b7287280068d8f, []int{1}
-}
-func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroupDiscoveryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroupDiscoveryList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroupDiscoveryList.Merge(m, src)
-}
-func (m *APIGroupDiscoveryList) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroupDiscoveryList) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroupDiscoveryList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo
-
-func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} }
-func (*APIResourceDiscovery) ProtoMessage() {}
-func (*APIResourceDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_e0b7287280068d8f, []int{2}
-}
-func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIResourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIResourceDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIResourceDiscovery.Merge(m, src)
-}
-func (m *APIResourceDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APIResourceDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APIResourceDiscovery.DiscardUnknown(m)
-}
+func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} }
-var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo
+func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} }
-func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} }
-func (*APISubresourceDiscovery) ProtoMessage() {}
-func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_e0b7287280068d8f, []int{3}
-}
-func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APISubresourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APISubresourceDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APISubresourceDiscovery.Merge(m, src)
-}
-func (m *APISubresourceDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APISubresourceDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APISubresourceDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo
-
-func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} }
-func (*APIVersionDiscovery) ProtoMessage() {}
-func (*APIVersionDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_e0b7287280068d8f, []int{4}
-}
-func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIVersionDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIVersionDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIVersionDiscovery.Merge(m, src)
-}
-func (m *APIVersionDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APIVersionDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APIVersionDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIVersionDiscovery proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*APIGroupDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIGroupDiscovery")
- proto.RegisterType((*APIGroupDiscoveryList)(nil), "k8s.io.api.apidiscovery.v2.APIGroupDiscoveryList")
- proto.RegisterType((*APIResourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIResourceDiscovery")
- proto.RegisterType((*APISubresourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APISubresourceDiscovery")
- proto.RegisterType((*APIVersionDiscovery)(nil), "k8s.io.api.apidiscovery.v2.APIVersionDiscovery")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/apidiscovery/v2/generated.proto", fileDescriptor_e0b7287280068d8f)
-}
-
-var fileDescriptor_e0b7287280068d8f = []byte{
- // 736 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4c,
- 0x14, 0x8d, 0x09, 0xf9, 0x48, 0x26, 0xc9, 0xd7, 0x30, 0x80, 0x6a, 0x65, 0xe1, 0xa0, 0x6c, 0x4a,
- 0xab, 0x32, 0x86, 0x94, 0xa2, 0x2e, 0x9b, 0x94, 0xb6, 0x8a, 0xfa, 0x87, 0x26, 0x15, 0x8b, 0xaa,
- 0x95, 0xea, 0x38, 0x83, 0xe3, 0x82, 0x7f, 0x34, 0xe3, 0x44, 0x62, 0xd7, 0x47, 0xe8, 0x13, 0xf4,
- 0x79, 0xe8, 0x8e, 0x05, 0x0b, 0x56, 0x51, 0x49, 0x77, 0x7d, 0x04, 0x56, 0xd5, 0x8c, 0xc7, 0x3f,
- 0x21, 0x44, 0x41, 0x5d, 0x74, 0x81, 0x84, 0xcf, 0x9c, 0x73, 0xee, 0x3d, 0xd7, 0xd7, 0x13, 0xf0,
- 0xe0, 0xe8, 0x09, 0x43, 0xb6, 0xa7, 0x1b, 0xbe, 0xcd, 0xff, 0x7a, 0x36, 0x33, 0xbd, 0x21, 0xa1,
- 0x27, 0xfa, 0xb0, 0xa1, 0x5b, 0xc4, 0x25, 0xd4, 0x08, 0x48, 0x0f, 0xf9, 0xd4, 0x0b, 0x3c, 0x58,
- 0x0d, 0xb9, 0xc8, 0xf0, 0x6d, 0x94, 0xe6, 0xa2, 0x61, 0xa3, 0xba, 0x69, 0xd9, 0x41, 0x7f, 0xd0,
- 0x45, 0xa6, 0xe7, 0xe8, 0x96, 0x67, 0x79, 0xba, 0x90, 0x74, 0x07, 0x87, 0xe2, 0x49, 0x3c, 0x88,
- 0xff, 0x42, 0xab, 0xea, 0x4e, 0x52, 0xd6, 0x31, 0xcc, 0xbe, 0xed, 0xf2, 0x92, 0xfe, 0x91, 0xc5,
- 0x01, 0xa6, 0x3b, 0x24, 0x30, 0xf4, 0xe1, 0xf6, 0xf5, 0x06, 0xaa, 0xfa, 0x2c, 0x15, 0x1d, 0xb8,
- 0x81, 0xed, 0x90, 0x29, 0xc1, 0xee, 0x3c, 0x01, 0x33, 0xfb, 0xc4, 0x31, 0xae, 0xeb, 0xea, 0xe7,
- 0x0a, 0x58, 0x6e, 0xee, 0xb7, 0x5f, 0x52, 0x6f, 0xe0, 0xef, 0x45, 0x31, 0xe1, 0x67, 0x90, 0xe7,
- 0x9d, 0xf5, 0x8c, 0xc0, 0x50, 0x95, 0x75, 0x65, 0xa3, 0xd8, 0xd8, 0x42, 0xc9, 0x48, 0xe2, 0x02,
- 0xc8, 0x3f, 0xb2, 0x38, 0xc0, 0x10, 0x67, 0xa3, 0xe1, 0x36, 0x7a, 0xd7, 0xfd, 0x42, 0xcc, 0xe0,
- 0x0d, 0x09, 0x8c, 0x16, 0x3c, 0x1d, 0xd5, 0x32, 0xe3, 0x51, 0x0d, 0x24, 0x18, 0x8e, 0x5d, 0xe1,
- 0x27, 0x90, 0x1f, 0x12, 0xca, 0x6c, 0xcf, 0x65, 0xea, 0xc2, 0x7a, 0x76, 0xa3, 0xd8, 0xd0, 0xd1,
- 0xec, 0xa1, 0xa3, 0xe6, 0x7e, 0xfb, 0x20, 0xa4, 0xc7, 0x4d, 0xb6, 0x2a, 0xb2, 0x40, 0x5e, 0x9e,
- 0x30, 0x1c, 0x5b, 0xd6, 0x7f, 0x28, 0x60, 0x6d, 0x2a, 0xd6, 0x6b, 0x9b, 0x05, 0xf0, 0xe3, 0x54,
- 0x34, 0x74, 0xbb, 0x68, 0x5c, 0x2d, 0x82, 0xc5, 0x75, 0x23, 0x24, 0x15, 0x0b, 0x83, 0x9c, 0x1d,
- 0x10, 0x27, 0xca, 0xb4, 0x39, 0x27, 0xd3, 0x64, 0x7f, 0xad, 0xb2, 0x74, 0xce, 0xb5, 0xb9, 0x07,
- 0x0e, 0xad, 0xea, 0xdf, 0x17, 0xc1, 0x6a, 0x73, 0xbf, 0x8d, 0x09, 0xf3, 0x06, 0xd4, 0x24, 0xc9,
- 0x5b, 0x7a, 0x08, 0xf2, 0x54, 0x82, 0x22, 0x4a, 0x21, 0x69, 0x2d, 0x22, 0xe3, 0x98, 0x01, 0x8f,
- 0x41, 0x89, 0x12, 0xe6, 0x7b, 0x2e, 0x23, 0xaf, 0x6c, 0xb7, 0xa7, 0x2e, 0x88, 0xf0, 0xbb, 0xb7,
- 0x0b, 0x2f, 0x1a, 0x95, 0x73, 0xe6, 0xea, 0x56, 0x65, 0x3c, 0xaa, 0x95, 0x70, 0xca, 0x0f, 0x4f,
- 0xb8, 0xc3, 0x1d, 0x90, 0x63, 0xa6, 0xe7, 0x13, 0x35, 0x2b, 0x1a, 0xd3, 0xa2, 0x64, 0x1d, 0x0e,
- 0x5e, 0x8d, 0x6a, 0xe5, 0xa8, 0x43, 0x01, 0xe0, 0x90, 0x0c, 0xf7, 0x40, 0x85, 0xd9, 0xae, 0x35,
- 0x38, 0x36, 0x68, 0x74, 0xae, 0x2e, 0x0a, 0x03, 0x55, 0x1a, 0x54, 0x3a, 0xd7, 0xce, 0xf1, 0x94,
- 0x02, 0xd6, 0x40, 0x6e, 0x48, 0x68, 0x97, 0xa9, 0xb9, 0xf5, 0xec, 0x46, 0xa1, 0x55, 0xe0, 0x75,
- 0x0f, 0x38, 0x80, 0x43, 0x1c, 0x22, 0x00, 0x58, 0xdf, 0xa3, 0xc1, 0x5b, 0xc3, 0x21, 0x4c, 0xfd,
- 0x4f, 0xb0, 0xfe, 0xe7, 0xab, 0xda, 0x89, 0x51, 0x9c, 0x62, 0x70, 0xbe, 0x69, 0x04, 0xc4, 0xf2,
- 0xa8, 0x4d, 0x98, 0xba, 0x94, 0xf0, 0x9f, 0xc5, 0x28, 0x4e, 0x31, 0xa0, 0x03, 0x4a, 0x6c, 0xd0,
- 0x8d, 0x26, 0xcf, 0xd4, 0xbc, 0x58, 0x86, 0x47, 0x73, 0x96, 0xa1, 0x93, 0x48, 0x92, 0x95, 0x58,
- 0x95, 0xb9, 0x4b, 0xa9, 0x53, 0x86, 0x27, 0xec, 0xeb, 0xe7, 0x0b, 0xe0, 0xee, 0x0c, 0x3d, 0x7c,
- 0x0c, 0x8a, 0x29, 0xae, 0x5c, 0x93, 0x15, 0x69, 0x5a, 0x4c, 0x49, 0x70, 0x9a, 0xf7, 0x8f, 0x97,
- 0x85, 0x81, 0xb2, 0x61, 0x9a, 0xc4, 0x0f, 0x48, 0xef, 0xfd, 0x89, 0x4f, 0x98, 0x9a, 0x15, 0x03,
- 0xfb, 0xdb, 0x72, 0x6b, 0x32, 0x5e, 0xb9, 0x99, 0x36, 0xc5, 0x93, 0x35, 0x92, 0x2d, 0x59, 0xbc,
- 0x79, 0x4b, 0xea, 0xbf, 0x15, 0xb0, 0x72, 0xc3, 0xbd, 0x03, 0xef, 0x83, 0x25, 0x79, 0xcf, 0xc8,
- 0x71, 0xde, 0x91, 0xf5, 0x96, 0x24, 0x15, 0x47, 0xe7, 0xd0, 0x00, 0x85, 0x64, 0x0b, 0xc2, 0x2b,
- 0x61, 0x6b, 0xce, 0x16, 0x4c, 0x7d, 0xe6, 0xad, 0x65, 0x69, 0x5f, 0xc0, 0xf1, 0xfb, 0x4f, 0x5c,
- 0xe1, 0x73, 0x50, 0x38, 0xa4, 0x84, 0xf5, 0x5d, 0xc2, 0x98, 0xfc, 0xd8, 0xee, 0x45, 0x82, 0x17,
- 0xd1, 0xc1, 0xd5, 0xa8, 0x06, 0x63, 0xc3, 0x18, 0xc5, 0x89, 0xb2, 0xf5, 0xf4, 0xf4, 0x52, 0xcb,
- 0x9c, 0x5d, 0x6a, 0x99, 0x8b, 0x4b, 0x2d, 0xf3, 0x75, 0xac, 0x29, 0xa7, 0x63, 0x4d, 0x39, 0x1b,
- 0x6b, 0xca, 0xc5, 0x58, 0x53, 0x7e, 0x8e, 0x35, 0xe5, 0xdb, 0x2f, 0x2d, 0xf3, 0xa1, 0x3a, 0xfb,
- 0x37, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0x35, 0x6a, 0x0f, 0x60, 0x07, 0x00, 0x00,
-}
+func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} }
func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/apidiscovery/v2/generated.protomessage.pb.go
new file mode 100644
index 00000000..35fe0d2a
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2/generated.protomessage.pb.go
@@ -0,0 +1,32 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v2
+
+func (*APIGroupDiscovery) ProtoMessage() {}
+
+func (*APIGroupDiscoveryList) ProtoMessage() {}
+
+func (*APIResourceDiscovery) ProtoMessage() {}
+
+func (*APISubresourceDiscovery) ProtoMessage() {}
+
+func (*APIVersionDiscovery) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/apidiscovery/v2/zz_generated.model_name.go
new file mode 100644
index 00000000..40724b0f
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroupDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2.APIGroupDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroupDiscoveryList) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2.APIGroupDiscoveryList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIResourceDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2.APIResourceDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APISubresourceDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2.APISubresourceDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIVersionDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2.APIVersionDiscovery"
+}
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
index d4fceab6..244986bb 100644
--- a/operator/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.apidiscovery.v2beta1
// +groupName=apidiscovery.k8s.io
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go
index 398c5f94..8994f570 100644
--- a/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go
@@ -24,228 +24,22 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} }
-func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} }
-func (*APIGroupDiscovery) ProtoMessage() {}
-func (*APIGroupDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_48661e6ba3d554f3, []int{0}
-}
-func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroupDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroupDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroupDiscovery.Merge(m, src)
-}
-func (m *APIGroupDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroupDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroupDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo
-
-func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} }
-func (*APIGroupDiscoveryList) ProtoMessage() {}
-func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) {
- return fileDescriptor_48661e6ba3d554f3, []int{1}
-}
-func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroupDiscoveryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroupDiscoveryList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroupDiscoveryList.Merge(m, src)
-}
-func (m *APIGroupDiscoveryList) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroupDiscoveryList) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroupDiscoveryList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo
-
-func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} }
-func (*APIResourceDiscovery) ProtoMessage() {}
-func (*APIResourceDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_48661e6ba3d554f3, []int{2}
-}
-func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIResourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIResourceDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIResourceDiscovery.Merge(m, src)
-}
-func (m *APIResourceDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APIResourceDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APIResourceDiscovery.DiscardUnknown(m)
-}
+func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} }
-var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo
+func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} }
-func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} }
-func (*APISubresourceDiscovery) ProtoMessage() {}
-func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_48661e6ba3d554f3, []int{3}
-}
-func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APISubresourceDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APISubresourceDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APISubresourceDiscovery.Merge(m, src)
-}
-func (m *APISubresourceDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APISubresourceDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APISubresourceDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo
-
-func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} }
-func (*APIVersionDiscovery) ProtoMessage() {}
-func (*APIVersionDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_48661e6ba3d554f3, []int{4}
-}
-func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIVersionDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIVersionDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIVersionDiscovery.Merge(m, src)
-}
-func (m *APIVersionDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *APIVersionDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_APIVersionDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIVersionDiscovery proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*APIGroupDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIGroupDiscovery")
- proto.RegisterType((*APIGroupDiscoveryList)(nil), "k8s.io.api.apidiscovery.v2beta1.APIGroupDiscoveryList")
- proto.RegisterType((*APIResourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIResourceDiscovery")
- proto.RegisterType((*APISubresourceDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APISubresourceDiscovery")
- proto.RegisterType((*APIVersionDiscovery)(nil), "k8s.io.api.apidiscovery.v2beta1.APIVersionDiscovery")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_48661e6ba3d554f3)
-}
-
-var fileDescriptor_48661e6ba3d554f3 = []byte{
- // 740 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4a,
- 0x18, 0x8d, 0x09, 0xb9, 0x24, 0x93, 0xe4, 0xde, 0x30, 0x80, 0xae, 0xc5, 0xc2, 0x46, 0xd9, 0x5c,
- 0xae, 0xd4, 0x8e, 0x4b, 0x04, 0x88, 0x6d, 0x52, 0x68, 0x15, 0xf5, 0x0f, 0x4d, 0x2a, 0x2a, 0x55,
- 0x5d, 0xd4, 0x71, 0x06, 0xc7, 0x85, 0xd8, 0xd6, 0xcc, 0x24, 0x12, 0xbb, 0x3e, 0x42, 0xdf, 0xa1,
- 0x2f, 0xc3, 0xaa, 0x62, 0xd1, 0x05, 0xdd, 0x44, 0x25, 0x7d, 0x80, 0xee, 0x59, 0x55, 0x33, 0x1e,
- 0xff, 0x84, 0x80, 0x88, 0xba, 0xe8, 0x22, 0x52, 0x7c, 0xe6, 0x9c, 0xf3, 0x7d, 0xe7, 0xcb, 0xe7,
- 0x09, 0xb0, 0x4e, 0xf6, 0x18, 0xf2, 0x02, 0xcb, 0x0e, 0x3d, 0xf1, 0xe9, 0x79, 0xcc, 0x09, 0x46,
- 0x84, 0x9e, 0x59, 0xa3, 0x46, 0x97, 0x70, 0x7b, 0xcb, 0x72, 0x89, 0x4f, 0xa8, 0xcd, 0x49, 0x0f,
- 0x85, 0x34, 0xe0, 0x01, 0x34, 0x23, 0x01, 0xb2, 0x43, 0x0f, 0x65, 0x05, 0x48, 0x09, 0xd6, 0x1f,
- 0xba, 0x1e, 0xef, 0x0f, 0xbb, 0xc8, 0x09, 0x06, 0x96, 0x1b, 0xb8, 0x81, 0x25, 0x75, 0xdd, 0xe1,
- 0xb1, 0x7c, 0x92, 0x0f, 0xf2, 0x5b, 0xe4, 0xb7, 0xbe, 0x9d, 0x36, 0x30, 0xb0, 0x9d, 0xbe, 0xe7,
- 0x8b, 0xe2, 0xe1, 0x89, 0x2b, 0x00, 0x66, 0x0d, 0x08, 0xb7, 0xad, 0xd1, 0x4c, 0x17, 0xeb, 0xd6,
- 0x5d, 0x2a, 0x3a, 0xf4, 0xb9, 0x37, 0x20, 0x33, 0x82, 0xdd, 0xfb, 0x04, 0xcc, 0xe9, 0x93, 0x81,
- 0x7d, 0x53, 0x57, 0xff, 0xa6, 0x81, 0xe5, 0xe6, 0x61, 0xfb, 0x29, 0x0d, 0x86, 0xe1, 0x7e, 0x9c,
- 0x15, 0xbe, 0x07, 0x45, 0xd1, 0x59, 0xcf, 0xe6, 0xb6, 0xae, 0x6d, 0x68, 0x9b, 0xe5, 0xc6, 0x23,
- 0x94, 0xce, 0x25, 0x29, 0x80, 0xc2, 0x13, 0x57, 0x00, 0x0c, 0x09, 0x36, 0x1a, 0x6d, 0xa1, 0x57,
- 0xdd, 0x0f, 0xc4, 0xe1, 0x2f, 0x08, 0xb7, 0x5b, 0xf0, 0x7c, 0x6c, 0xe6, 0x26, 0x63, 0x13, 0xa4,
- 0x18, 0x4e, 0x5c, 0x61, 0x17, 0x14, 0x47, 0x84, 0x32, 0x2f, 0xf0, 0x99, 0xbe, 0xb0, 0x91, 0xdf,
- 0x2c, 0x37, 0xb6, 0xd1, 0x3d, 0x93, 0x47, 0xcd, 0xc3, 0xf6, 0x51, 0xa4, 0x49, 0x3a, 0x6d, 0xd5,
- 0x54, 0x95, 0xa2, 0x3a, 0x61, 0x38, 0xf1, 0xad, 0x7f, 0xd1, 0xc0, 0xda, 0x4c, 0xb6, 0xe7, 0x1e,
- 0xe3, 0xf0, 0xdd, 0x4c, 0x3e, 0x34, 0x5f, 0x3e, 0xa1, 0x96, 0xe9, 0x92, 0xba, 0x31, 0x92, 0xc9,
- 0xf6, 0x06, 0x14, 0x3c, 0x4e, 0x06, 0x71, 0xb0, 0xc6, 0x3c, 0xc1, 0xa6, 0x9b, 0x6c, 0x55, 0x95,
- 0x7d, 0xa1, 0x2d, 0x8c, 0x70, 0xe4, 0x57, 0xff, 0xbc, 0x08, 0x56, 0x9b, 0x87, 0x6d, 0x4c, 0x58,
- 0x30, 0xa4, 0x0e, 0x49, 0x7f, 0xaf, 0x07, 0xa0, 0x48, 0x15, 0x28, 0xf3, 0x94, 0xd2, 0xfe, 0x62,
- 0x32, 0x4e, 0x18, 0xf0, 0x14, 0x54, 0x28, 0x61, 0x61, 0xe0, 0x33, 0xf2, 0xcc, 0xf3, 0x7b, 0xfa,
- 0x82, 0x9c, 0xc0, 0xee, 0x7c, 0x13, 0x90, 0x8d, 0xaa, 0x61, 0x0b, 0x75, 0xab, 0x36, 0x19, 0x9b,
- 0x15, 0x9c, 0xf1, 0xc3, 0x53, 0xee, 0x70, 0x1b, 0x14, 0x98, 0x13, 0x84, 0x44, 0xcf, 0xcb, 0xc6,
- 0x8c, 0x38, 0x59, 0x47, 0x80, 0xd7, 0x63, 0xb3, 0x1a, 0x77, 0x28, 0x01, 0x1c, 0x91, 0xe1, 0x3e,
- 0xa8, 0x31, 0xcf, 0x77, 0x87, 0xa7, 0x36, 0x8d, 0xcf, 0xf5, 0x45, 0x69, 0xa0, 0x2b, 0x83, 0x5a,
- 0xe7, 0xc6, 0x39, 0x9e, 0x51, 0x40, 0x13, 0x14, 0x46, 0x84, 0x76, 0x99, 0x5e, 0xd8, 0xc8, 0x6f,
- 0x96, 0x5a, 0x25, 0x51, 0xf7, 0x48, 0x00, 0x38, 0xc2, 0x21, 0x02, 0x80, 0xf5, 0x03, 0xca, 0x5f,
- 0xda, 0x03, 0xc2, 0xf4, 0xbf, 0x24, 0xeb, 0x6f, 0xb1, 0xb4, 0x9d, 0x04, 0xc5, 0x19, 0x86, 0xe0,
- 0x3b, 0x36, 0x27, 0x6e, 0x40, 0x3d, 0xc2, 0xf4, 0xa5, 0x94, 0xff, 0x38, 0x41, 0x71, 0x86, 0x01,
- 0x29, 0xa8, 0xb0, 0x61, 0x37, 0x9e, 0x3c, 0xd3, 0x8b, 0x72, 0x23, 0xf6, 0xe6, 0xd9, 0x88, 0x4e,
- 0xaa, 0x4b, 0xf7, 0x62, 0x55, 0x85, 0xaf, 0x64, 0x4e, 0x19, 0x9e, 0xaa, 0x51, 0xff, 0xba, 0x00,
- 0xfe, 0xbd, 0x43, 0x0f, 0x77, 0x40, 0x39, 0xc3, 0x55, 0xbb, 0xb2, 0xa2, 0x4c, 0xcb, 0x19, 0x09,
- 0xce, 0xf2, 0xfe, 0xf0, 0xc6, 0x30, 0x50, 0xb5, 0x1d, 0x87, 0x84, 0x9c, 0xf4, 0x5e, 0x9f, 0x85,
- 0x84, 0xe9, 0x79, 0x39, 0xb5, 0xdf, 0x2d, 0xb7, 0xa6, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x3c, 0x5d,
- 0x23, 0x5d, 0x95, 0xc5, 0xdb, 0x57, 0xa5, 0xfe, 0x53, 0x03, 0x2b, 0xb7, 0xdc, 0x40, 0xf0, 0x7f,
- 0xb0, 0xa4, 0x6e, 0x1c, 0x35, 0xce, 0x7f, 0x54, 0xbd, 0x25, 0x45, 0xc5, 0xf1, 0x39, 0x3c, 0x06,
- 0xa5, 0x74, 0x15, 0xa2, 0xcb, 0x61, 0x67, 0x9e, 0x55, 0x98, 0x79, 0xe1, 0x5b, 0xcb, 0xaa, 0x46,
- 0x09, 0x27, 0x4b, 0x90, 0x5a, 0xc3, 0x03, 0x50, 0x3a, 0xa6, 0x84, 0xf5, 0x7d, 0xc2, 0x98, 0x7a,
- 0xed, 0xfe, 0x8b, 0x05, 0x4f, 0xe2, 0x83, 0xeb, 0xb1, 0x09, 0x13, 0xc3, 0x04, 0xc5, 0xa9, 0xb2,
- 0x75, 0x70, 0x7e, 0x65, 0xe4, 0x2e, 0xae, 0x8c, 0xdc, 0xe5, 0x95, 0x91, 0xfb, 0x38, 0x31, 0xb4,
- 0xf3, 0x89, 0xa1, 0x5d, 0x4c, 0x0c, 0xed, 0x72, 0x62, 0x68, 0xdf, 0x27, 0x86, 0xf6, 0xe9, 0x87,
- 0x91, 0x7b, 0x6b, 0xde, 0xf3, 0x0f, 0xfb, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x85, 0x3b, 0x06,
- 0x83, 0x07, 0x00, 0x00,
-}
+func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} }
func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..0998c461
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/generated.protomessage.pb.go
@@ -0,0 +1,32 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v2beta1
+
+func (*APIGroupDiscovery) ProtoMessage() {}
+
+func (*APIGroupDiscoveryList) ProtoMessage() {}
+
+func (*APIResourceDiscovery) ProtoMessage() {}
+
+func (*APISubresourceDiscovery) ProtoMessage() {}
+
+func (*APIVersionDiscovery) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..8054db78
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apidiscovery/v2beta1/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v2beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroupDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2beta1.APIGroupDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroupDiscoveryList) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2beta1.APIGroupDiscoveryList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIResourceDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2beta1.APIResourceDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APISubresourceDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2beta1.APISubresourceDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIVersionDiscovery) OpenAPIModelName() string {
+ return "io.k8s.api.apidiscovery.v2beta1.APIVersionDiscovery"
+}
diff --git a/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
index 867d7416..7a2253c8 100644
--- a/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.api.apiserverinternal.v1alpha1
// +groupName=internal.apiserver.k8s.io
diff --git a/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
index b0343ffc..0c73dc29 100644
--- a/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
@@ -24,258 +24,22 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *StorageVersion) Reset() { *m = StorageVersion{} }
-func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} }
-func (*ServerStorageVersion) ProtoMessage() {}
-func (*ServerStorageVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_126bcbf538b54729, []int{0}
-}
-func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServerStorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServerStorageVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerStorageVersion.Merge(m, src)
-}
-func (m *ServerStorageVersion) XXX_Size() int {
- return m.Size()
-}
-func (m *ServerStorageVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerStorageVersion.DiscardUnknown(m)
-}
+func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} }
-var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo
+func (m *StorageVersionList) Reset() { *m = StorageVersionList{} }
-func (m *StorageVersion) Reset() { *m = StorageVersion{} }
-func (*StorageVersion) ProtoMessage() {}
-func (*StorageVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_126bcbf538b54729, []int{1}
-}
-func (m *StorageVersion) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersion.Merge(m, src)
-}
-func (m *StorageVersion) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersion.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersion proto.InternalMessageInfo
+func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} }
-func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} }
-func (*StorageVersionCondition) ProtoMessage() {}
-func (*StorageVersionCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_126bcbf538b54729, []int{2}
-}
-func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionCondition.Merge(m, src)
-}
-func (m *StorageVersionCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo
-
-func (m *StorageVersionList) Reset() { *m = StorageVersionList{} }
-func (*StorageVersionList) ProtoMessage() {}
-func (*StorageVersionList) Descriptor() ([]byte, []int) {
- return fileDescriptor_126bcbf538b54729, []int{3}
-}
-func (m *StorageVersionList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionList.Merge(m, src)
-}
-func (m *StorageVersionList) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionList) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo
-
-func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} }
-func (*StorageVersionSpec) ProtoMessage() {}
-func (*StorageVersionSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_126bcbf538b54729, []int{4}
-}
-func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionSpec.Merge(m, src)
-}
-func (m *StorageVersionSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo
-
-func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} }
-func (*StorageVersionStatus) ProtoMessage() {}
-func (*StorageVersionStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_126bcbf538b54729, []int{5}
-}
-func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionStatus.Merge(m, src)
-}
-func (m *StorageVersionStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ServerStorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.ServerStorageVersion")
- proto.RegisterType((*StorageVersion)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersion")
- proto.RegisterType((*StorageVersionCondition)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionCondition")
- proto.RegisterType((*StorageVersionList)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionList")
- proto.RegisterType((*StorageVersionSpec)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionSpec")
- proto.RegisterType((*StorageVersionStatus)(nil), "k8s.io.api.apiserverinternal.v1alpha1.StorageVersionStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_126bcbf538b54729)
-}
-
-var fileDescriptor_126bcbf538b54729 = []byte{
- // 770 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0x13, 0x41,
- 0x14, 0xee, 0xd2, 0x52, 0x60, 0xaa, 0xad, 0x8c, 0x10, 0x6a, 0x4d, 0xb6, 0xd8, 0x04, 0x83, 0x1a,
- 0x77, 0xa5, 0x11, 0x23, 0x9a, 0x68, 0x58, 0x20, 0x06, 0x85, 0x60, 0xa6, 0xc4, 0x03, 0x7a, 0x70,
- 0xba, 0x1d, 0xb7, 0x2b, 0xdd, 0x9d, 0xcd, 0xce, 0xb4, 0x09, 0x17, 0xe3, 0x4f, 0xd0, 0xff, 0xe1,
- 0xd1, 0x1f, 0xc1, 0xc9, 0x70, 0x24, 0x31, 0x69, 0x64, 0xfd, 0x17, 0x9c, 0xcc, 0xcc, 0x6e, 0xb7,
- 0x6c, 0x5b, 0x62, 0xc3, 0xa1, 0x49, 0xe7, 0xbd, 0xf7, 0x7d, 0xef, 0xcd, 0x37, 0xdf, 0xcc, 0x82,
- 0xd5, 0xc3, 0xa7, 0x4c, 0xb3, 0xa9, 0x8e, 0x3d, 0x5b, 0xfc, 0x18, 0xf1, 0x3b, 0xc4, 0xb7, 0x5d,
- 0x4e, 0x7c, 0x17, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0x79, 0x4d, 0xbc, 0xa2, 0x5b, 0xc4, 0x25, 0x3e,
- 0xe6, 0xa4, 0xa1, 0x79, 0x3e, 0xe5, 0x14, 0x2e, 0x85, 0x30, 0x0d, 0x7b, 0xb6, 0x36, 0x04, 0xd3,
- 0x7a, 0xb0, 0xd2, 0x43, 0xcb, 0xe6, 0xcd, 0x76, 0x5d, 0x33, 0xa9, 0xa3, 0x5b, 0xd4, 0xa2, 0xba,
- 0x44, 0xd7, 0xdb, 0x9f, 0xe4, 0x4a, 0x2e, 0xe4, 0xbf, 0x90, 0xb5, 0xf4, 0xb8, 0x3f, 0x8c, 0x83,
- 0xcd, 0xa6, 0xed, 0x12, 0xff, 0x48, 0xf7, 0x0e, 0x2d, 0x39, 0x99, 0xee, 0x10, 0x8e, 0xf5, 0xce,
- 0xd0, 0x2c, 0x25, 0xfd, 0x32, 0x94, 0xdf, 0x76, 0xb9, 0xed, 0x90, 0x21, 0xc0, 0x93, 0xff, 0x01,
- 0x98, 0xd9, 0x24, 0x0e, 0x1e, 0xc4, 0x55, 0xbe, 0x4f, 0x80, 0xb9, 0x9a, 0xdc, 0x69, 0x8d, 0x53,
- 0x1f, 0x5b, 0xe4, 0x1d, 0xf1, 0x99, 0x4d, 0x5d, 0xb8, 0x0a, 0x72, 0xd8, 0xb3, 0xc3, 0xd4, 0xf6,
- 0x66, 0x51, 0x59, 0x54, 0x96, 0x67, 0x8c, 0x9b, 0xc7, 0xdd, 0x72, 0x2a, 0xe8, 0x96, 0x73, 0xeb,
- 0x6f, 0xb7, 0x7b, 0x29, 0x74, 0xb1, 0x0e, 0xae, 0x83, 0x02, 0x71, 0x4d, 0xda, 0xb0, 0x5d, 0x2b,
- 0x62, 0x2a, 0x4e, 0x48, 0xe8, 0x42, 0x04, 0x2d, 0x6c, 0x25, 0xd3, 0x68, 0xb0, 0x1e, 0x6e, 0x80,
- 0xd9, 0x06, 0x31, 0x69, 0x03, 0xd7, 0x5b, 0xbd, 0x69, 0x58, 0x31, 0xbd, 0x98, 0x5e, 0x9e, 0x31,
- 0xe6, 0x83, 0x6e, 0x79, 0x76, 0x73, 0x30, 0x89, 0x86, 0xeb, 0xe1, 0x33, 0x90, 0x97, 0x07, 0xd8,
- 0x88, 0x19, 0x32, 0x92, 0x01, 0x06, 0xdd, 0x72, 0xbe, 0x96, 0xc8, 0xa0, 0x81, 0xca, 0xca, 0xcf,
- 0x09, 0x90, 0x1f, 0x50, 0xe3, 0x23, 0x98, 0x16, 0x47, 0xd5, 0xc0, 0x1c, 0x4b, 0x29, 0x72, 0xd5,
- 0x47, 0x5a, 0xdf, 0x2e, 0xb1, 0xe2, 0x9a, 0x77, 0x68, 0x49, 0xef, 0x68, 0xa2, 0x5a, 0xeb, 0xac,
- 0x68, 0x7b, 0xf5, 0xcf, 0xc4, 0xe4, 0xbb, 0x84, 0x63, 0x03, 0x46, 0x0a, 0x80, 0x7e, 0x0c, 0xc5,
- 0xac, 0xf0, 0x3d, 0xc8, 0x30, 0x8f, 0x98, 0x52, 0xad, 0x5c, 0x75, 0x4d, 0x1b, 0xcb, 0x8c, 0x5a,
- 0x72, 0xcc, 0x9a, 0x47, 0x4c, 0xe3, 0x5a, 0xd4, 0x26, 0x23, 0x56, 0x48, 0x92, 0x42, 0x13, 0x64,
- 0x19, 0xc7, 0xbc, 0x2d, 0x74, 0x14, 0xf4, 0xcf, 0xaf, 0x46, 0x2f, 0x29, 0x8c, 0x7c, 0xd4, 0x20,
- 0x1b, 0xae, 0x51, 0x44, 0x5d, 0xf9, 0x91, 0x06, 0x0b, 0x49, 0xc0, 0x06, 0x75, 0x1b, 0x36, 0x17,
- 0xfa, 0xbd, 0x04, 0x19, 0x7e, 0xe4, 0x91, 0xc8, 0x46, 0x0f, 0x7a, 0x23, 0xee, 0x1f, 0x79, 0xe4,
- 0xbc, 0x5b, 0xbe, 0x7d, 0x09, 0x4c, 0xa4, 0x91, 0x04, 0xc2, 0xb5, 0x78, 0x07, 0xa1, 0x9d, 0xee,
- 0x24, 0x87, 0x38, 0xef, 0x96, 0x0b, 0x31, 0x2c, 0x39, 0x17, 0x7c, 0x0d, 0x20, 0xad, 0x87, 0x47,
- 0xfc, 0x2a, 0x74, 0xbf, 0x70, 0xa5, 0x10, 0x22, 0x6d, 0x94, 0x22, 0x1a, 0xb8, 0x37, 0x54, 0x81,
- 0x46, 0xa0, 0x60, 0x07, 0xc0, 0x16, 0x66, 0x7c, 0xdf, 0xc7, 0x2e, 0x0b, 0x47, 0xb4, 0x1d, 0x52,
- 0xcc, 0x48, 0x51, 0xef, 0x8f, 0xe7, 0x08, 0x81, 0xe8, 0xf7, 0xdd, 0x19, 0x62, 0x43, 0x23, 0x3a,
- 0xc0, 0xbb, 0x20, 0xeb, 0x13, 0xcc, 0xa8, 0x5b, 0x9c, 0x94, 0xdb, 0x8f, 0xcf, 0x00, 0xc9, 0x28,
- 0x8a, 0xb2, 0xf0, 0x1e, 0x98, 0x72, 0x08, 0x63, 0xd8, 0x22, 0xc5, 0xac, 0x2c, 0x2c, 0x44, 0x85,
- 0x53, 0xbb, 0x61, 0x18, 0xf5, 0xf2, 0x95, 0x5f, 0x0a, 0x80, 0x49, 0xdd, 0x77, 0x6c, 0xc6, 0xe1,
- 0x87, 0x21, 0xa7, 0x6b, 0xe3, 0xed, 0x4b, 0xa0, 0xa5, 0xcf, 0x6f, 0x44, 0x2d, 0xa7, 0x7b, 0x91,
- 0x0b, 0x2e, 0x3f, 0x00, 0x93, 0x36, 0x27, 0x8e, 0x38, 0xc5, 0xf4, 0x72, 0xae, 0xba, 0x7a, 0x25,
- 0x1f, 0x1a, 0xd7, 0xa3, 0x0e, 0x93, 0xdb, 0x82, 0x0b, 0x85, 0x94, 0x95, 0xb9, 0xc1, 0xfd, 0x88,
- 0x0b, 0x50, 0xf9, 0x2d, 0x1e, 0xb8, 0x11, 0x36, 0x86, 0x5f, 0x40, 0x81, 0x25, 0xe2, 0xac, 0xa8,
- 0xc8, 0xa1, 0xc6, 0xbe, 0x1c, 0x23, 0x9e, 0xcd, 0xfe, 0x33, 0x97, 0x8c, 0x33, 0x34, 0xd8, 0x0c,
- 0xee, 0x81, 0x79, 0x93, 0x3a, 0x0e, 0x75, 0xb7, 0x46, 0xbe, 0x97, 0xb7, 0x82, 0x6e, 0x79, 0x7e,
- 0x63, 0x54, 0x01, 0x1a, 0x8d, 0x83, 0x3e, 0x00, 0x66, 0xef, 0x0a, 0x84, 0x0f, 0x66, 0xae, 0xfa,
- 0xe2, 0x4a, 0x02, 0xc7, 0x37, 0xa9, 0xff, 0x66, 0xc5, 0x21, 0x86, 0x2e, 0x74, 0x31, 0xde, 0x1c,
- 0x9f, 0xa9, 0xa9, 0x93, 0x33, 0x35, 0x75, 0x7a, 0xa6, 0xa6, 0xbe, 0x06, 0xaa, 0x72, 0x1c, 0xa8,
- 0xca, 0x49, 0xa0, 0x2a, 0xa7, 0x81, 0xaa, 0xfc, 0x09, 0x54, 0xe5, 0xdb, 0x5f, 0x35, 0x75, 0xb0,
- 0x34, 0xd6, 0x07, 0xf9, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x04, 0x7d, 0x78, 0xb8, 0x07,
- 0x00, 0x00,
-}
+func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} }
func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..b0839952
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,34 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*ServerStorageVersion) ProtoMessage() {}
+
+func (*StorageVersion) ProtoMessage() {}
+
+func (*StorageVersionCondition) ProtoMessage() {}
+
+func (*StorageVersionList) ProtoMessage() {}
+
+func (*StorageVersionSpec) ProtoMessage() {}
+
+func (*StorageVersionStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..5f48f5ff
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apiserverinternal/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,52 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServerStorageVersion) OpenAPIModelName() string {
+ return "io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersion) OpenAPIModelName() string {
+ return "io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionList) OpenAPIModelName() string {
+ return "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus"
+}
diff --git a/operator/vendor/k8s.io/api/apps/v1/doc.go b/operator/vendor/k8s.io/api/apps/v1/doc.go
index 51fe12c5..122ae8ee 100644
--- a/operator/vendor/k8s.io/api/apps/v1/doc.go
+++ b/operator/vendor/k8s.io/api/apps/v1/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.apps.v1
package v1
diff --git a/operator/vendor/k8s.io/api/apps/v1/generated.pb.go b/operator/vendor/k8s.io/api/apps/v1/generated.pb.go
index eacc2593..b46ed0dc 100644
--- a/operator/vendor/k8s.io/api/apps/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/apps/v1/generated.pb.go
@@ -24,12 +24,10 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -37,1039 +35,67 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
-func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
-func (*ControllerRevision) ProtoMessage() {}
-func (*ControllerRevision) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{0}
-}
-func (m *ControllerRevision) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ControllerRevision) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ControllerRevision.Merge(m, src)
-}
-func (m *ControllerRevision) XXX_Size() int {
- return m.Size()
-}
-func (m *ControllerRevision) XXX_DiscardUnknown() {
- xxx_messageInfo_ControllerRevision.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo
-
-func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
-func (*ControllerRevisionList) ProtoMessage() {}
-func (*ControllerRevisionList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{1}
-}
-func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ControllerRevisionList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ControllerRevisionList.Merge(m, src)
-}
-func (m *ControllerRevisionList) XXX_Size() int {
- return m.Size()
-}
-func (m *ControllerRevisionList) XXX_DiscardUnknown() {
- xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo
-
-func (m *DaemonSet) Reset() { *m = DaemonSet{} }
-func (*DaemonSet) ProtoMessage() {}
-func (*DaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{2}
-}
-func (m *DaemonSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSet.Merge(m, src)
-}
-func (m *DaemonSet) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSet) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSet proto.InternalMessageInfo
-
-func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
-func (*DaemonSetCondition) ProtoMessage() {}
-func (*DaemonSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{3}
-}
-func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetCondition.Merge(m, src)
-}
-func (m *DaemonSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo
-
-func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
-func (*DaemonSetList) ProtoMessage() {}
-func (*DaemonSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{4}
-}
-func (m *DaemonSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetList.Merge(m, src)
-}
-func (m *DaemonSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo
-
-func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
-func (*DaemonSetSpec) ProtoMessage() {}
-func (*DaemonSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{5}
-}
-func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetSpec.Merge(m, src)
-}
-func (m *DaemonSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo
-
-func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
-func (*DaemonSetStatus) ProtoMessage() {}
-func (*DaemonSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{6}
-}
-func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetStatus.Merge(m, src)
-}
-func (m *DaemonSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo
-
-func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
-func (*DaemonSetUpdateStrategy) ProtoMessage() {}
-func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{7}
-}
-func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src)
-}
-func (m *DaemonSetUpdateStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo
-
-func (m *Deployment) Reset() { *m = Deployment{} }
-func (*Deployment) ProtoMessage() {}
-func (*Deployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{8}
-}
-func (m *Deployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Deployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Deployment.Merge(m, src)
-}
-func (m *Deployment) XXX_Size() int {
- return m.Size()
-}
-func (m *Deployment) XXX_DiscardUnknown() {
- xxx_messageInfo_Deployment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Deployment proto.InternalMessageInfo
-
-func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (*DeploymentCondition) ProtoMessage() {}
-func (*DeploymentCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{9}
-}
-func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentCondition.Merge(m, src)
-}
-func (m *DeploymentCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
-
-func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-func (*DeploymentList) ProtoMessage() {}
-func (*DeploymentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{10}
-}
-func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentList.Merge(m, src)
-}
-func (m *DeploymentList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
-
-func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-func (*DeploymentSpec) ProtoMessage() {}
-func (*DeploymentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{11}
-}
-func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentSpec.Merge(m, src)
-}
-func (m *DeploymentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
-
-func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (*DeploymentStatus) ProtoMessage() {}
-func (*DeploymentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{12}
-}
-func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStatus.Merge(m, src)
-}
-func (m *DeploymentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
-
-func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-func (*DeploymentStrategy) ProtoMessage() {}
-func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{13}
-}
-func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStrategy.Merge(m, src)
-}
-func (m *DeploymentStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
-}
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
-var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
+func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
-func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
-func (*ReplicaSet) ProtoMessage() {}
-func (*ReplicaSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{14}
-}
-func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSet.Merge(m, src)
-}
-func (m *ReplicaSet) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSet) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSet.DiscardUnknown(m)
-}
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
-var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
-func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
-func (*ReplicaSetCondition) ProtoMessage() {}
-func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{15}
-}
-func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetCondition.Merge(m, src)
-}
-func (m *ReplicaSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m)
-}
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
-var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
+func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
-func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
-func (*ReplicaSetList) ProtoMessage() {}
-func (*ReplicaSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{16}
-}
-func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetList.Merge(m, src)
-}
-func (m *ReplicaSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetList.DiscardUnknown(m)
-}
+func (m *Deployment) Reset() { *m = Deployment{} }
-var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
-func (*ReplicaSetSpec) ProtoMessage() {}
-func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{17}
-}
-func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetSpec.Merge(m, src)
-}
-func (m *ReplicaSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m)
-}
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
-func (*ReplicaSetStatus) ProtoMessage() {}
-func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{18}
-}
-func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetStatus.Merge(m, src)
-}
-func (m *ReplicaSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m)
-}
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
-func (*RollingUpdateDaemonSet) ProtoMessage() {}
-func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{19}
-}
-func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src)
-}
-func (m *RollingUpdateDaemonSet) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m)
-}
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
-var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
+func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
-func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (*RollingUpdateDeployment) ProtoMessage() {}
-func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{20}
-}
-func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDeployment.Merge(m, src)
-}
-func (m *RollingUpdateDeployment) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDeployment) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m)
-}
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
-var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
-func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
-func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
-func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{21}
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src)
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m)
-}
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
-var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo
+func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
-func (m *StatefulSet) Reset() { *m = StatefulSet{} }
-func (*StatefulSet) ProtoMessage() {}
-func (*StatefulSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{22}
-}
-func (m *StatefulSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSet.Merge(m, src)
-}
-func (m *StatefulSet) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSet) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSet.DiscardUnknown(m)
-}
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-var xxx_messageInfo_StatefulSet proto.InternalMessageInfo
+func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
-func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
-func (*StatefulSetCondition) ProtoMessage() {}
-func (*StatefulSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{23}
-}
-func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetCondition.Merge(m, src)
-}
-func (m *StatefulSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m)
-}
+func (m *StatefulSet) Reset() { *m = StatefulSet{} }
-var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo
+func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
-func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
-func (*StatefulSetList) ProtoMessage() {}
-func (*StatefulSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{24}
-}
-func (m *StatefulSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetList.Merge(m, src)
-}
-func (m *StatefulSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo
-
-func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
-func (*StatefulSetOrdinals) ProtoMessage() {}
-func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{25}
-}
-func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetOrdinals.Merge(m, src)
-}
-func (m *StatefulSetOrdinals) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetOrdinals) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m)
-}
+func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
-var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo
+func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() {
*m = StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
-func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
-func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{26}
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src)
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m)
-}
-var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo
+func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
-func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
-func (*StatefulSetSpec) ProtoMessage() {}
-func (*StatefulSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{27}
-}
-func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetSpec.Merge(m, src)
-}
-func (m *StatefulSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m)
-}
+func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
-var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo
-
-func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
-func (*StatefulSetStatus) ProtoMessage() {}
-func (*StatefulSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{28}
-}
-func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetStatus.Merge(m, src)
-}
-func (m *StatefulSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo
-
-func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
-func (*StatefulSetUpdateStrategy) ProtoMessage() {}
-func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_5b781835628d5338, []int{29}
-}
-func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src)
-}
-func (m *StatefulSetUpdateStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision")
- proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList")
- proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet")
- proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition")
- proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList")
- proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec")
- proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus")
- proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy")
- proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment")
- proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition")
- proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList")
- proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec")
- proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus")
- proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy")
- proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet")
- proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition")
- proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList")
- proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec")
- proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus")
- proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet")
- proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment")
- proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy")
- proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet")
- proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition")
- proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList")
- proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1.StatefulSetOrdinals")
- proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy")
- proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec")
- proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus")
- proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/apps/v1/generated.proto", fileDescriptor_5b781835628d5338)
-}
-
-var fileDescriptor_5b781835628d5338 = []byte{
- // 2225 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x0d, 0x2d, 0xc9, 0x1e, 0xa9, 0x12, 0x63, 0x37, 0xa4, 0xbb, 0x71,
- 0x6d, 0x25, 0x8e, 0xc9, 0xda, 0x71, 0x82, 0xc0, 0x29, 0x12, 0x88, 0x54, 0x9a, 0xba, 0xd1, 0x57,
- 0x87, 0x92, 0x03, 0xb8, 0x69, 0xd1, 0xd1, 0x72, 0x4c, 0x6d, 0xbc, 0x5f, 0xd8, 0x1d, 0x2a, 0x16,
- 0x7a, 0x29, 0x0a, 0x14, 0xe8, 0x21, 0x87, 0xfe, 0x0d, 0xfd, 0x07, 0x8a, 0xa2, 0x68, 0x6e, 0x45,
- 0x50, 0xf4, 0xe2, 0x4b, 0x81, 0xa0, 0x97, 0xe6, 0x44, 0xd4, 0xcc, 0xa9, 0x28, 0x7a, 0x6b, 0x2f,
- 0xbe, 0xb4, 0x98, 0xd9, 0xd9, 0xef, 0x59, 0x91, 0x92, 0x63, 0xa5, 0x09, 0x7c, 0xe3, 0xce, 0x7b,
- 0xef, 0x37, 0x6f, 0x66, 0xde, 0x9b, 0xf7, 0x9b, 0x19, 0x02, 0xf5, 0xfe, 0xeb, 0x5e, 0x43, 0xb7,
- 0x9b, 0xd8, 0xd1, 0x9b, 0xd8, 0x71, 0xbc, 0xe6, 0xc1, 0xf5, 0x66, 0x8f, 0x58, 0xc4, 0xc5, 0x94,
- 0x74, 0x1b, 0x8e, 0x6b, 0x53, 0x1b, 0x42, 0x5f, 0xa7, 0x81, 0x1d, 0xbd, 0xc1, 0x74, 0x1a, 0x07,
- 0xd7, 0xcf, 0x5f, 0xeb, 0xe9, 0x74, 0xbf, 0xbf, 0xd7, 0xd0, 0x6c, 0xb3, 0xd9, 0xb3, 0x7b, 0x76,
- 0x93, 0xab, 0xee, 0xf5, 0xef, 0xf1, 0x2f, 0xfe, 0xc1, 0x7f, 0xf9, 0x10, 0xe7, 0xe3, 0xdd, 0x68,
- 0xb6, 0x4b, 0x24, 0xdd, 0x9c, 0xbf, 0x19, 0xe9, 0x98, 0x58, 0xdb, 0xd7, 0x2d, 0xe2, 0x1e, 0x36,
- 0x9d, 0xfb, 0x3d, 0xd6, 0xe0, 0x35, 0x4d, 0x42, 0xb1, 0xcc, 0xaa, 0x99, 0x67, 0xe5, 0xf6, 0x2d,
- 0xaa, 0x9b, 0x24, 0x63, 0xf0, 0xda, 0x28, 0x03, 0x4f, 0xdb, 0x27, 0x26, 0xce, 0xd8, 0xbd, 0x92,
- 0x67, 0xd7, 0xa7, 0xba, 0xd1, 0xd4, 0x2d, 0xea, 0x51, 0x37, 0x6d, 0xa4, 0xfe, 0x47, 0x01, 0xb0,
- 0x6d, 0x5b, 0xd4, 0xb5, 0x0d, 0x83, 0xb8, 0x88, 0x1c, 0xe8, 0x9e, 0x6e, 0x5b, 0xf0, 0xa7, 0xa0,
- 0xcc, 0xc6, 0xd3, 0xc5, 0x14, 0x57, 0x95, 0x8b, 0xca, 0x4a, 0xe5, 0xc6, 0x77, 0x1a, 0xd1, 0x24,
- 0x87, 0xf0, 0x0d, 0xe7, 0x7e, 0x8f, 0x35, 0x78, 0x0d, 0xa6, 0xdd, 0x38, 0xb8, 0xde, 0xd8, 0xda,
- 0xfb, 0x80, 0x68, 0x74, 0x83, 0x50, 0xdc, 0x82, 0x0f, 0x07, 0xf5, 0x89, 0xe1, 0xa0, 0x0e, 0xa2,
- 0x36, 0x14, 0xa2, 0xc2, 0x2d, 0x50, 0xe4, 0xe8, 0x05, 0x8e, 0x7e, 0x2d, 0x17, 0x5d, 0x0c, 0xba,
- 0x81, 0xf0, 0x87, 0x6f, 0x3f, 0xa0, 0xc4, 0x62, 0xee, 0xb5, 0xce, 0x08, 0xe8, 0xe2, 0x1a, 0xa6,
- 0x18, 0x71, 0x20, 0xf8, 0x32, 0x28, 0xbb, 0xc2, 0xfd, 0xea, 0xe4, 0x45, 0x65, 0x65, 0xb2, 0x75,
- 0x56, 0x68, 0x95, 0x83, 0x61, 0xa1, 0x50, 0x43, 0xfd, 0xb3, 0x02, 0x96, 0xb2, 0xe3, 0x5e, 0xd7,
- 0x3d, 0x0a, 0xdf, 0xcf, 0x8c, 0xbd, 0x31, 0xde, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xd8, 0x71, 0xd0,
- 0x12, 0x1b, 0xf7, 0xbb, 0xa0, 0xa4, 0x53, 0x62, 0x7a, 0xd5, 0xc2, 0xc5, 0xc9, 0x95, 0xca, 0x8d,
- 0xcb, 0x8d, 0x6c, 0xec, 0x36, 0xb2, 0x8e, 0xb5, 0x66, 0x05, 0x64, 0xe9, 0x36, 0x33, 0x46, 0x3e,
- 0x86, 0xfa, 0x5f, 0x05, 0xcc, 0xac, 0x61, 0x62, 0xda, 0x56, 0x87, 0xd0, 0x53, 0x58, 0xb4, 0x36,
- 0x28, 0x7a, 0x0e, 0xd1, 0xc4, 0xa2, 0x7d, 0x4b, 0xe6, 0x7b, 0xe8, 0x4e, 0xc7, 0x21, 0x5a, 0xb4,
- 0x50, 0xec, 0x0b, 0x71, 0x63, 0xf8, 0x2e, 0x98, 0xf2, 0x28, 0xa6, 0x7d, 0x8f, 0x2f, 0x53, 0xe5,
- 0xc6, 0x0b, 0x47, 0xc3, 0x70, 0xd5, 0xd6, 0x9c, 0x00, 0x9a, 0xf2, 0xbf, 0x91, 0x80, 0x50, 0xff,
- 0x51, 0x00, 0x30, 0xd4, 0x6d, 0xdb, 0x56, 0x57, 0xa7, 0x2c, 0x7e, 0x6f, 0x81, 0x22, 0x3d, 0x74,
- 0x08, 0x9f, 0x86, 0x99, 0xd6, 0xe5, 0xc0, 0x8b, 0x9d, 0x43, 0x87, 0x3c, 0x1e, 0xd4, 0x97, 0xb2,
- 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0x5c, 0x0f, 0xfd, 0x2b, 0x70, 0xeb, 0x9b, 0xc9, 0xae, 0x1f, 0x0f,
- 0xea, 0x92, 0xcd, 0xa2, 0x11, 0x22, 0x25, 0x1d, 0x84, 0x07, 0x00, 0x1a, 0xd8, 0xa3, 0x3b, 0x2e,
- 0xb6, 0x3c, 0xbf, 0x27, 0xdd, 0x24, 0x62, 0xe4, 0x2f, 0x8d, 0xb7, 0x3c, 0xcc, 0xa2, 0x75, 0x5e,
- 0x78, 0x01, 0xd7, 0x33, 0x68, 0x48, 0xd2, 0x03, 0xbc, 0x0c, 0xa6, 0x5c, 0x82, 0x3d, 0xdb, 0xaa,
- 0x16, 0xf9, 0x28, 0xc2, 0x09, 0x44, 0xbc, 0x15, 0x09, 0x29, 0x7c, 0x11, 0x4c, 0x9b, 0xc4, 0xf3,
- 0x70, 0x8f, 0x54, 0x4b, 0x5c, 0x71, 0x5e, 0x28, 0x4e, 0x6f, 0xf8, 0xcd, 0x28, 0x90, 0xab, 0xbf,
- 0x53, 0xc0, 0x6c, 0x38, 0x73, 0xa7, 0x90, 0x2a, 0xad, 0x64, 0xaa, 0x3c, 0x7f, 0x64, 0x9c, 0xe4,
- 0x64, 0xc8, 0x27, 0x93, 0x31, 0x9f, 0x59, 0x10, 0xc2, 0x1f, 0x83, 0xb2, 0x47, 0x0c, 0xa2, 0x51,
- 0xdb, 0x15, 0x3e, 0xbf, 0x32, 0xa6, 0xcf, 0x78, 0x8f, 0x18, 0x1d, 0x61, 0xda, 0x3a, 0xc3, 0x9c,
- 0x0e, 0xbe, 0x50, 0x08, 0x09, 0x7f, 0x08, 0xca, 0x94, 0x98, 0x8e, 0x81, 0x29, 0x11, 0x69, 0x92,
- 0x88, 0x6f, 0x16, 0x2e, 0x0c, 0x6c, 0xdb, 0xee, 0xee, 0x08, 0x35, 0x9e, 0x28, 0xe1, 0x3c, 0x04,
- 0xad, 0x28, 0x84, 0x81, 0xf7, 0xc1, 0x5c, 0xdf, 0xe9, 0x32, 0x4d, 0xca, 0xb6, 0xee, 0xde, 0xa1,
- 0x08, 0x9f, 0xab, 0x47, 0x4e, 0xc8, 0x6e, 0xc2, 0xa4, 0xb5, 0x24, 0x3a, 0x98, 0x4b, 0xb6, 0xa3,
- 0x14, 0x34, 0x5c, 0x05, 0xf3, 0xa6, 0x6e, 0x21, 0x82, 0xbb, 0x87, 0x1d, 0xa2, 0xd9, 0x56, 0xd7,
- 0xe3, 0x01, 0x54, 0x6a, 0x2d, 0x0b, 0x80, 0xf9, 0x8d, 0xa4, 0x18, 0xa5, 0xf5, 0xe1, 0x3a, 0x58,
- 0x0c, 0xf6, 0xd9, 0xef, 0xeb, 0x1e, 0xb5, 0xdd, 0xc3, 0x75, 0xdd, 0xd4, 0x69, 0x75, 0x8a, 0xe3,
- 0x54, 0x87, 0x83, 0xfa, 0x22, 0x92, 0xc8, 0x91, 0xd4, 0x4a, 0xfd, 0x68, 0x0a, 0xcc, 0xa7, 0x76,
- 0x03, 0x78, 0x07, 0x2c, 0x69, 0x7d, 0xd7, 0x25, 0x16, 0xdd, 0xec, 0x9b, 0x7b, 0xc4, 0xed, 0x68,
- 0xfb, 0xa4, 0xdb, 0x37, 0x48, 0x97, 0xaf, 0x68, 0xa9, 0x55, 0x13, 0xbe, 0x2e, 0xb5, 0xa5, 0x5a,
- 0x28, 0xc7, 0x1a, 0xfe, 0x00, 0x40, 0x8b, 0x37, 0x6d, 0xe8, 0x9e, 0x17, 0x62, 0x16, 0x38, 0x66,
- 0x98, 0x80, 0x9b, 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x3e, 0x76, 0x89, 0xa7, 0xbb, 0xa4, 0x9b, 0xf6,
- 0x71, 0x32, 0xe9, 0xe3, 0x9a, 0x54, 0x0b, 0xe5, 0x58, 0xc3, 0x57, 0x41, 0xc5, 0xef, 0x8d, 0xcf,
- 0xb9, 0x58, 0x9c, 0x05, 0x01, 0x56, 0xd9, 0x8c, 0x44, 0x28, 0xae, 0xc7, 0x86, 0x66, 0xef, 0x79,
- 0xc4, 0x3d, 0x20, 0xdd, 0x77, 0x7c, 0x0e, 0xc0, 0x0a, 0x65, 0x89, 0x17, 0xca, 0x70, 0x68, 0x5b,
- 0x19, 0x0d, 0x24, 0xb1, 0x62, 0x43, 0xf3, 0xa3, 0x26, 0x33, 0xb4, 0xa9, 0xe4, 0xd0, 0x76, 0xa5,
- 0x5a, 0x28, 0xc7, 0x9a, 0xc5, 0x9e, 0xef, 0xf2, 0xea, 0x01, 0xd6, 0x0d, 0xbc, 0x67, 0x90, 0xea,
- 0x74, 0x32, 0xf6, 0x36, 0x93, 0x62, 0x94, 0xd6, 0x87, 0xef, 0x80, 0x73, 0x7e, 0xd3, 0xae, 0x85,
- 0x43, 0x90, 0x32, 0x07, 0x79, 0x4e, 0x80, 0x9c, 0xdb, 0x4c, 0x2b, 0xa0, 0xac, 0x0d, 0xbc, 0x05,
- 0xe6, 0x34, 0xdb, 0x30, 0x78, 0x3c, 0xb6, 0xed, 0xbe, 0x45, 0xab, 0x33, 0x1c, 0x05, 0xb2, 0x1c,
- 0x6a, 0x27, 0x24, 0x28, 0xa5, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x82, 0xfc, 0x42,
- 0x9f, 0xad, 0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x43, 0x53, 0x3f, 0x51, 0xc0, 0x72, 0x4e,
- 0x8e, 0xc3, 0xb7, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d,
- 0xcc, 0x32, 0xde, 0xa1, 0x5b, 0x3d, 0x5f, 0x45, 0xec, 0x60, 0x2f, 0xc9, 0x7c, 0x47, 0x71, 0xc5,
- 0x68, 0x1b, 0x3e, 0x37, 0x1c, 0xd4, 0x67, 0x13, 0x32, 0x94, 0xc4, 0x54, 0x7f, 0x51, 0x00, 0x60,
- 0x8d, 0x38, 0x86, 0x7d, 0x68, 0x12, 0xeb, 0x34, 0x58, 0xcb, 0x5a, 0x82, 0xb5, 0xa8, 0xd2, 0x85,
- 0x08, 0xfd, 0xc9, 0xa5, 0x2d, 0xeb, 0x29, 0xda, 0x72, 0x69, 0x04, 0xce, 0xd1, 0xbc, 0xe5, 0x6f,
- 0x93, 0x60, 0x21, 0x52, 0x8e, 0x88, 0xcb, 0x1b, 0x89, 0x25, 0xbc, 0x92, 0x5a, 0xc2, 0x65, 0x89,
- 0xc9, 0x53, 0x63, 0x2e, 0x1f, 0x80, 0x39, 0xc6, 0x2b, 0xfc, 0x55, 0xe3, 0xac, 0x65, 0xea, 0xd8,
- 0xac, 0x25, 0xac, 0x3a, 0xeb, 0x09, 0x24, 0x94, 0x42, 0xce, 0x61, 0x49, 0xd3, 0x5f, 0x45, 0x96,
- 0xf4, 0x7b, 0x05, 0xcc, 0x45, 0xcb, 0x74, 0x0a, 0x34, 0xa9, 0x9d, 0xa4, 0x49, 0xb5, 0xa3, 0xe3,
- 0x32, 0x87, 0x27, 0xfd, 0xb5, 0x18, 0xf7, 0x9a, 0x13, 0xa5, 0x15, 0x76, 0xa0, 0x72, 0x0c, 0x5d,
- 0xc3, 0x9e, 0x28, 0xab, 0x67, 0xfc, 0xc3, 0x94, 0xdf, 0x86, 0x42, 0x69, 0x82, 0x52, 0x15, 0x9e,
- 0x2e, 0xa5, 0x9a, 0xfc, 0x62, 0x28, 0xd5, 0x0e, 0x28, 0x7b, 0x01, 0x99, 0x2a, 0x72, 0xc8, 0xcb,
- 0xa3, 0xd2, 0x59, 0xf0, 0xa8, 0x10, 0x35, 0x64, 0x50, 0x21, 0x92, 0x8c, 0x3b, 0x95, 0xbe, 0x4c,
- 0xee, 0xc4, 0xc2, 0xdb, 0xc1, 0x7d, 0x8f, 0x74, 0x79, 0x2a, 0x95, 0xa3, 0xf0, 0xde, 0xe6, 0xad,
- 0x48, 0x48, 0xe1, 0x2e, 0x58, 0x76, 0x5c, 0xbb, 0xe7, 0x12, 0xcf, 0x5b, 0x23, 0xb8, 0x6b, 0xe8,
- 0x16, 0x09, 0x06, 0xe0, 0x57, 0xbd, 0x0b, 0xc3, 0x41, 0x7d, 0x79, 0x5b, 0xae, 0x82, 0xf2, 0x6c,
- 0xd5, 0x5f, 0x95, 0xc0, 0xd9, 0xf4, 0x8e, 0x98, 0x43, 0x44, 0x94, 0x13, 0x11, 0x91, 0x97, 0x63,
- 0x21, 0xea, 0xb3, 0xb4, 0xd8, 0x99, 0x3f, 0x13, 0xa6, 0xab, 0x60, 0x5e, 0x10, 0x8f, 0x40, 0x28,
- 0xa8, 0x58, 0xb8, 0x3c, 0xbb, 0x49, 0x31, 0x4a, 0xeb, 0xc3, 0x37, 0xc0, 0xac, 0xcb, 0xb9, 0x55,
- 0x00, 0xe0, 0xf3, 0x93, 0x6f, 0x08, 0x80, 0x59, 0x14, 0x17, 0xa2, 0xa4, 0x2e, 0xe3, 0x26, 0x11,
- 0xe5, 0x08, 0x00, 0x8a, 0x49, 0x6e, 0xb2, 0x9a, 0x56, 0x40, 0x59, 0x1b, 0xb8, 0x01, 0x16, 0xfa,
- 0x56, 0x16, 0xca, 0x8f, 0xb5, 0x0b, 0x02, 0x6a, 0x61, 0x37, 0xab, 0x82, 0x64, 0x76, 0xf0, 0x36,
- 0x58, 0xa0, 0xc4, 0x35, 0x75, 0x0b, 0x53, 0xdd, 0xea, 0x85, 0x70, 0xfe, 0xca, 0x2f, 0x33, 0xa8,
- 0x9d, 0xac, 0x18, 0xc9, 0x6c, 0xe0, 0x8f, 0x12, 0xcc, 0x67, 0x8a, 0x6f, 0x48, 0x57, 0x8e, 0xce,
- 0xac, 0xb1, 0xa9, 0x8f, 0x84, 0x92, 0x95, 0xc7, 0xa5, 0x64, 0xea, 0xc7, 0x0a, 0x80, 0xd9, 0x6c,
- 0x1e, 0x79, 0x4f, 0x90, 0xb1, 0x88, 0x55, 0xdb, 0xae, 0x9c, 0x2c, 0x5d, 0x1d, 0x4d, 0x96, 0xa2,
- 0xcd, 0x78, 0x3c, 0xb6, 0x24, 0xa6, 0xf7, 0x74, 0xee, 0x78, 0xc6, 0x60, 0x4b, 0x91, 0x3f, 0x4f,
- 0xc6, 0x96, 0x62, 0x38, 0x47, 0xb3, 0xa5, 0x7f, 0x16, 0xc0, 0x42, 0xa4, 0x3c, 0x36, 0x5b, 0x92,
- 0x98, 0x3c, 0xbb, 0xe7, 0x19, 0x8f, 0xc1, 0x44, 0x53, 0xf7, 0x7f, 0xc2, 0x60, 0x22, 0x87, 0x72,
- 0x18, 0xcc, 0x6f, 0x0b, 0x71, 0xaf, 0x8f, 0xc9, 0x60, 0xbe, 0x80, 0x5b, 0x8f, 0xaf, 0x1c, 0x09,
- 0x52, 0x3f, 0x2a, 0x82, 0xb3, 0xe9, 0x14, 0x4c, 0x94, 0x54, 0x65, 0x64, 0x49, 0xdd, 0x06, 0x8b,
- 0xf7, 0xfa, 0x86, 0x71, 0xc8, 0xc7, 0x10, 0xab, 0xab, 0x7e, 0x31, 0xfe, 0xa6, 0xb0, 0x5c, 0xfc,
- 0x9e, 0x44, 0x07, 0x49, 0x2d, 0xb3, 0x15, 0xb6, 0xf8, 0xa4, 0x15, 0xb6, 0x74, 0x82, 0x0a, 0x9b,
- 0x53, 0x12, 0xa7, 0x4f, 0x50, 0x12, 0xe5, 0x7c, 0x67, 0xf2, 0x44, 0x7c, 0x67, 0xec, 0xf2, 0x2a,
- 0xd9, 0xf9, 0x46, 0xde, 0x2c, 0x0c, 0x15, 0xb0, 0x24, 0x3f, 0xd4, 0x43, 0x03, 0xcc, 0x99, 0xf8,
- 0x41, 0xfc, 0x4a, 0x65, 0x54, 0xed, 0xe9, 0x53, 0xdd, 0x68, 0xf8, 0x6f, 0x4e, 0x8d, 0xdb, 0x16,
- 0xdd, 0x72, 0x3b, 0xd4, 0xd5, 0xad, 0x9e, 0x5f, 0xab, 0x37, 0x12, 0x58, 0x28, 0x85, 0x0d, 0xef,
- 0x82, 0xb2, 0x89, 0x1f, 0x74, 0xfa, 0x6e, 0x2f, 0xa8, 0xa9, 0xc7, 0xef, 0x87, 0xa7, 0xd1, 0x86,
- 0x40, 0x41, 0x21, 0x9e, 0xfa, 0xb9, 0x02, 0x96, 0x73, 0x8a, 0xf1, 0xd7, 0x68, 0x94, 0x7f, 0x54,
- 0xc0, 0xc5, 0xc4, 0x28, 0x59, 0x72, 0x93, 0x7b, 0x7d, 0x83, 0xe7, 0xb9, 0xe0, 0x3e, 0x57, 0xc1,
- 0x8c, 0x83, 0x5d, 0xaa, 0x87, 0xfc, 0xbb, 0xd4, 0x9a, 0x1d, 0x0e, 0xea, 0x33, 0xdb, 0x41, 0x23,
- 0x8a, 0xe4, 0x92, 0xb9, 0x29, 0x3c, 0xbd, 0xb9, 0x51, 0x7f, 0x59, 0x00, 0x95, 0x98, 0xcb, 0xa7,
- 0xc0, 0x7a, 0xde, 0x4e, 0xb0, 0x1e, 0xe9, 0x93, 0x54, 0x7c, 0x0e, 0xf3, 0x68, 0xcf, 0x46, 0x8a,
- 0xf6, 0x7c, 0x7b, 0x14, 0xd0, 0xd1, 0xbc, 0xe7, 0x5f, 0x05, 0xb0, 0x18, 0xd3, 0x8e, 0x88, 0xcf,
- 0x77, 0x13, 0xc4, 0x67, 0x25, 0x45, 0x7c, 0xaa, 0x32, 0x9b, 0x67, 0xcc, 0x67, 0x34, 0xf3, 0xf9,
- 0x83, 0x02, 0xe6, 0x63, 0x73, 0x77, 0x0a, 0xd4, 0x67, 0x2d, 0x49, 0x7d, 0xea, 0x23, 0xe2, 0x25,
- 0x87, 0xfb, 0xdc, 0x02, 0x0b, 0x31, 0xa5, 0x2d, 0xb7, 0xab, 0x5b, 0xd8, 0xf0, 0xe0, 0x0b, 0xa0,
- 0xe4, 0x51, 0xec, 0xd2, 0x20, 0xbb, 0x03, 0xdb, 0x0e, 0x6b, 0x44, 0xbe, 0x4c, 0xfd, 0xb7, 0x02,
- 0x9a, 0x31, 0xe3, 0x6d, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0xef, 0xd8, 0x46, 0xdf, 0x24, 0x6d,
- 0x03, 0xeb, 0x26, 0x22, 0xac, 0x41, 0xb7, 0xad, 0x6d, 0xdb, 0xd0, 0xb5, 0x43, 0x88, 0x41, 0xe5,
- 0xc3, 0x7d, 0x62, 0xad, 0x11, 0x83, 0x50, 0xf1, 0xe8, 0x32, 0xd3, 0x7a, 0x2b, 0x78, 0x83, 0x78,
- 0x2f, 0x12, 0x3d, 0x1e, 0xd4, 0x57, 0xc6, 0x41, 0xe4, 0xc1, 0x19, 0xc7, 0x84, 0x3f, 0x01, 0x80,
- 0x7d, 0x76, 0x34, 0x1c, 0x3c, 0xc1, 0xcc, 0xb4, 0xde, 0x0c, 0x52, 0xf8, 0xbd, 0x50, 0x72, 0xac,
- 0x0e, 0x62, 0x88, 0xea, 0x6f, 0xca, 0x89, 0xa5, 0xfe, 0xda, 0xdf, 0x78, 0xfd, 0x0c, 0x2c, 0x1e,
- 0x44, 0xb3, 0x13, 0x28, 0x30, 0x7a, 0xc5, 0xe2, 0xee, 0x45, 0x29, 0xbc, 0x6c, 0x5e, 0x23, 0x52,
- 0x77, 0x47, 0x02, 0x87, 0xa4, 0x9d, 0xc0, 0x57, 0x41, 0x85, 0x71, 0x19, 0x5d, 0x23, 0x9b, 0xd8,
- 0x0c, 0xd2, 0x30, 0x7c, 0xb3, 0xea, 0x44, 0x22, 0x14, 0xd7, 0x83, 0xfb, 0x60, 0xc1, 0xb1, 0xbb,
- 0x1b, 0xd8, 0xc2, 0x3d, 0xc2, 0x2a, 0xb4, 0xbf, 0x94, 0xfc, 0x2e, 0x6c, 0xa6, 0xf5, 0x5a, 0x70,
- 0xcf, 0xb1, 0x9d, 0x55, 0x61, 0x87, 0x3f, 0x49, 0x33, 0x0f, 0x02, 0x19, 0x24, 0x34, 0x33, 0x4f,
- 0xac, 0xd3, 0x99, 0xff, 0xa5, 0xc8, 0xf2, 0xf1, 0x84, 0x8f, 0xac, 0x79, 0xb7, 0x7c, 0xe5, 0x13,
- 0xdd, 0xf2, 0x49, 0x0e, 0x2f, 0x33, 0xc7, 0x3c, 0xbc, 0xfc, 0x49, 0x01, 0x97, 0x9c, 0x31, 0xd2,
- 0xa8, 0x0a, 0xf8, 0xb4, 0xb4, 0x47, 0x4c, 0xcb, 0x38, 0x19, 0xd9, 0x5a, 0x19, 0x0e, 0xea, 0x97,
- 0xc6, 0xd1, 0x44, 0x63, 0xb9, 0xc6, 0x92, 0xc6, 0x16, 0x3b, 0x5f, 0xb5, 0xc2, 0xdd, 0xbc, 0x32,
- 0xc2, 0xcd, 0x60, 0xa3, 0xf4, 0xf3, 0x30, 0xf8, 0x42, 0x21, 0x8c, 0xfa, 0x71, 0x09, 0x9c, 0xcb,
- 0x54, 0xeb, 0x2f, 0xf1, 0x06, 0x33, 0x73, 0x38, 0x9a, 0x3c, 0xc6, 0xe1, 0x68, 0x15, 0xcc, 0x8b,
- 0x67, 0xef, 0xd4, 0xd9, 0x2a, 0x0c, 0x93, 0x76, 0x52, 0x8c, 0xd2, 0xfa, 0xb2, 0x1b, 0xd4, 0xd2,
- 0x31, 0x6f, 0x50, 0xe3, 0x5e, 0x88, 0x7f, 0x6b, 0xf9, 0xf9, 0x9c, 0xf5, 0x42, 0xfc, 0x69, 0x2b,
- 0xad, 0x0f, 0xdf, 0x0c, 0x92, 0x35, 0x44, 0x98, 0xe6, 0x08, 0xa9, 0xec, 0x0b, 0x01, 0x52, 0xda,
- 0x4f, 0xf4, 0xb4, 0xfb, 0xbe, 0xe4, 0x69, 0x77, 0x65, 0x44, 0x98, 0x8d, 0x7f, 0xc3, 0x29, 0x3d,
- 0xbf, 0x56, 0x8e, 0x7f, 0x7e, 0x55, 0xff, 0xa2, 0x80, 0xe7, 0x72, 0xb7, 0x29, 0xb8, 0x9a, 0x60,
- 0x8f, 0xd7, 0x52, 0xec, 0xf1, 0xf9, 0x5c, 0xc3, 0x18, 0x85, 0x34, 0xe5, 0x97, 0x9f, 0x37, 0x47,
- 0x5e, 0x7e, 0x4a, 0x4e, 0x22, 0xa3, 0x6f, 0x41, 0x5b, 0xaf, 0x3f, 0x7c, 0x54, 0x9b, 0xf8, 0xf4,
- 0x51, 0x6d, 0xe2, 0xb3, 0x47, 0xb5, 0x89, 0x9f, 0x0f, 0x6b, 0xca, 0xc3, 0x61, 0x4d, 0xf9, 0x74,
- 0x58, 0x53, 0x3e, 0x1b, 0xd6, 0x94, 0xbf, 0x0f, 0x6b, 0xca, 0xaf, 0x3f, 0xaf, 0x4d, 0xdc, 0x85,
- 0xd9, 0xff, 0x8a, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0a, 0xea, 0xf9, 0x40, 0x2a, 0x00,
- 0x00,
-}
+func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/apps/v1/generated.proto b/operator/vendor/k8s.io/api/apps/v1/generated.proto
index 5885a622..42d5415c 100644
--- a/operator/vendor/k8s.io/api/apps/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/apps/v1/generated.proto
@@ -343,7 +343,7 @@ message DeploymentStatus {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 9;
@@ -481,7 +481,7 @@ message ReplicaSetStatus {
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
// and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 7;
@@ -581,10 +581,12 @@ message RollingUpdateStatefulSetStrategy {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
- // Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
+ //
+ // +featureGate=MaxUnavailableStatefulSet
// +optional
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
}
diff --git a/operator/vendor/k8s.io/api/apps/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/apps/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..f9faa4e1
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apps/v1/generated.protomessage.pb.go
@@ -0,0 +1,82 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*ControllerRevision) ProtoMessage() {}
+
+func (*ControllerRevisionList) ProtoMessage() {}
+
+func (*DaemonSet) ProtoMessage() {}
+
+func (*DaemonSetCondition) ProtoMessage() {}
+
+func (*DaemonSetList) ProtoMessage() {}
+
+func (*DaemonSetSpec) ProtoMessage() {}
+
+func (*DaemonSetStatus) ProtoMessage() {}
+
+func (*DaemonSetUpdateStrategy) ProtoMessage() {}
+
+func (*Deployment) ProtoMessage() {}
+
+func (*DeploymentCondition) ProtoMessage() {}
+
+func (*DeploymentList) ProtoMessage() {}
+
+func (*DeploymentSpec) ProtoMessage() {}
+
+func (*DeploymentStatus) ProtoMessage() {}
+
+func (*DeploymentStrategy) ProtoMessage() {}
+
+func (*ReplicaSet) ProtoMessage() {}
+
+func (*ReplicaSetCondition) ProtoMessage() {}
+
+func (*ReplicaSetList) ProtoMessage() {}
+
+func (*ReplicaSetSpec) ProtoMessage() {}
+
+func (*ReplicaSetStatus) ProtoMessage() {}
+
+func (*RollingUpdateDaemonSet) ProtoMessage() {}
+
+func (*RollingUpdateDeployment) ProtoMessage() {}
+
+func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
+
+func (*StatefulSet) ProtoMessage() {}
+
+func (*StatefulSetCondition) ProtoMessage() {}
+
+func (*StatefulSetList) ProtoMessage() {}
+
+func (*StatefulSetOrdinals) ProtoMessage() {}
+
+func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
+
+func (*StatefulSetSpec) ProtoMessage() {}
+
+func (*StatefulSetStatus) ProtoMessage() {}
+
+func (*StatefulSetUpdateStrategy) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/apps/v1/types.go b/operator/vendor/k8s.io/api/apps/v1/types.go
index 4cf54cc9..b8989c14 100644
--- a/operator/vendor/k8s.io/api/apps/v1/types.go
+++ b/operator/vendor/k8s.io/api/apps/v1/types.go
@@ -19,7 +19,7 @@ package v1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
@@ -123,10 +123,12 @@ type RollingUpdateStatefulSetStrategy struct {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
- // Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
+ //
+ // +featureGate=MaxUnavailableStatefulSet
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"`
}
@@ -512,7 +514,7 @@ type DeploymentStatus struct {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
@@ -900,7 +902,7 @@ type ReplicaSetStatus struct {
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
// and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
diff --git a/operator/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index ac54033f..718914c6 100644
--- a/operator/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -182,7 +182,7 @@ var map_DeploymentStatus = map[string]string{
"readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
"availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
- "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -253,7 +253,7 @@ var map_ReplicaSetStatus = map[string]string{
"fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
"readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
"availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
- "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
"conditions": "Represents the latest available observations of a replica set's current state.",
}
@@ -285,7 +285,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
var map_RollingUpdateStatefulSetStrategy = map[string]string{
"": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.",
- "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.",
+ "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.",
}
func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/apps/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/apps/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..ae9c3ace
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apps/v1/zz_generated.model_name.go
@@ -0,0 +1,172 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ControllerRevision) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ControllerRevision"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ControllerRevisionList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ControllerRevisionList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DaemonSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DaemonSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DaemonSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DaemonSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DaemonSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetUpdateStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DaemonSetUpdateStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Deployment) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.Deployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DeploymentCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DeploymentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DeploymentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DeploymentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.DeploymentStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ReplicaSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ReplicaSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ReplicaSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ReplicaSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.ReplicaSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDaemonSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.RollingUpdateDaemonSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDeployment) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.RollingUpdateDeployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateStatefulSetStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetOrdinals) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetOrdinals"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetPersistentVolumeClaimRetentionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetUpdateStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1.StatefulSetUpdateStrategy"
+}
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/doc.go b/operator/vendor/k8s.io/api/apps/v1beta1/doc.go
index 7770fab5..85f6df3f 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.apps.v1beta1
package v1beta1
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
index ae84aaf4..3058b6d2 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
@@ -23,14 +23,12 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -38,826 +36,53 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
-func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
-func (*ControllerRevision) ProtoMessage() {}
-func (*ControllerRevision) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{0}
-}
-func (m *ControllerRevision) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ControllerRevision) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ControllerRevision.Merge(m, src)
-}
-func (m *ControllerRevision) XXX_Size() int {
- return m.Size()
-}
-func (m *ControllerRevision) XXX_DiscardUnknown() {
- xxx_messageInfo_ControllerRevision.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo
-
-func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
-func (*ControllerRevisionList) ProtoMessage() {}
-func (*ControllerRevisionList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{1}
-}
-func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ControllerRevisionList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ControllerRevisionList.Merge(m, src)
-}
-func (m *ControllerRevisionList) XXX_Size() int {
- return m.Size()
-}
-func (m *ControllerRevisionList) XXX_DiscardUnknown() {
- xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo
-
-func (m *Deployment) Reset() { *m = Deployment{} }
-func (*Deployment) ProtoMessage() {}
-func (*Deployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{2}
-}
-func (m *Deployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Deployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Deployment.Merge(m, src)
-}
-func (m *Deployment) XXX_Size() int {
- return m.Size()
-}
-func (m *Deployment) XXX_DiscardUnknown() {
- xxx_messageInfo_Deployment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Deployment proto.InternalMessageInfo
-
-func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (*DeploymentCondition) ProtoMessage() {}
-func (*DeploymentCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{3}
-}
-func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentCondition.Merge(m, src)
-}
-func (m *DeploymentCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
-
-func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-func (*DeploymentList) ProtoMessage() {}
-func (*DeploymentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{4}
-}
-func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentList.Merge(m, src)
-}
-func (m *DeploymentList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
-
-func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
-func (*DeploymentRollback) ProtoMessage() {}
-func (*DeploymentRollback) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{5}
-}
-func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentRollback) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentRollback.Merge(m, src)
-}
-func (m *DeploymentRollback) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentRollback) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentRollback.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo
-
-func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-func (*DeploymentSpec) ProtoMessage() {}
-func (*DeploymentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{6}
-}
-func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentSpec.Merge(m, src)
-}
-func (m *DeploymentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
-
-func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (*DeploymentStatus) ProtoMessage() {}
-func (*DeploymentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{7}
-}
-func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStatus.Merge(m, src)
-}
-func (m *DeploymentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
-
-func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-func (*DeploymentStrategy) ProtoMessage() {}
-func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{8}
-}
-func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStrategy.Merge(m, src)
-}
-func (m *DeploymentStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
-
-func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
-func (*RollbackConfig) ProtoMessage() {}
-func (*RollbackConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{9}
-}
-func (m *RollbackConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollbackConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollbackConfig.Merge(m, src)
-}
-func (m *RollbackConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *RollbackConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_RollbackConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo
-
-func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (*RollingUpdateDeployment) ProtoMessage() {}
-func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{10}
-}
-func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDeployment.Merge(m, src)
-}
-func (m *RollingUpdateDeployment) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDeployment) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m)
-}
+func (m *Deployment) Reset() { *m = Deployment{} }
-var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
-func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
-func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{11}
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src)
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m)
-}
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo
+func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
-func (m *Scale) Reset() { *m = Scale{} }
-func (*Scale) ProtoMessage() {}
-func (*Scale) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{12}
-}
-func (m *Scale) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scale) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scale.Merge(m, src)
-}
-func (m *Scale) XXX_Size() int {
- return m.Size()
-}
-func (m *Scale) XXX_DiscardUnknown() {
- xxx_messageInfo_Scale.DiscardUnknown(m)
-}
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-var xxx_messageInfo_Scale proto.InternalMessageInfo
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-func (*ScaleSpec) ProtoMessage() {}
-func (*ScaleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{13}
-}
-func (m *ScaleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleSpec.Merge(m, src)
-}
-func (m *ScaleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
-func (*ScaleStatus) ProtoMessage() {}
-func (*ScaleStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{14}
-}
-func (m *ScaleStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleStatus.Merge(m, src)
-}
-func (m *ScaleStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleStatus.DiscardUnknown(m)
-}
+func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
-var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (m *StatefulSet) Reset() { *m = StatefulSet{} }
-func (*StatefulSet) ProtoMessage() {}
-func (*StatefulSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{15}
-}
-func (m *StatefulSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSet.Merge(m, src)
-}
-func (m *StatefulSet) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSet) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSet.DiscardUnknown(m)
-}
+func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
-var xxx_messageInfo_StatefulSet proto.InternalMessageInfo
+func (m *Scale) Reset() { *m = Scale{} }
-func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
-func (*StatefulSetCondition) ProtoMessage() {}
-func (*StatefulSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{16}
-}
-func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetCondition.Merge(m, src)
-}
-func (m *StatefulSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m)
-}
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
-func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
-func (*StatefulSetList) ProtoMessage() {}
-func (*StatefulSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{17}
-}
-func (m *StatefulSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetList.Merge(m, src)
-}
-func (m *StatefulSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetList.DiscardUnknown(m)
-}
+func (m *StatefulSet) Reset() { *m = StatefulSet{} }
-var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo
+func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
-func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
-func (*StatefulSetOrdinals) ProtoMessage() {}
-func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{18}
-}
-func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetOrdinals.Merge(m, src)
-}
-func (m *StatefulSetOrdinals) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetOrdinals) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m)
-}
+func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
-var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo
+func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() {
*m = StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
-func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
-func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{19}
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src)
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo
-
-func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
-func (*StatefulSetSpec) ProtoMessage() {}
-func (*StatefulSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{20}
-}
-func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetSpec.Merge(m, src)
-}
-func (m *StatefulSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo
-func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
-func (*StatefulSetStatus) ProtoMessage() {}
-func (*StatefulSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{21}
-}
-func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetStatus.Merge(m, src)
-}
-func (m *StatefulSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m)
-}
+func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
-var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo
+func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
-func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
-func (*StatefulSetUpdateStrategy) ProtoMessage() {}
-func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2747f709ac7c95e7, []int{22}
-}
-func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src)
-}
-func (m *StatefulSetUpdateStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision")
- proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList")
- proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta1.Deployment")
- proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition")
- proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList")
- proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry")
- proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec")
- proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus")
- proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy")
- proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.apps.v1beta1.RollbackConfig")
- proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateDeployment")
- proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateStatefulSetStrategy")
- proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale")
- proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec")
- proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry")
- proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet")
- proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition")
- proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList")
- proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1beta1.StatefulSetOrdinals")
- proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy")
- proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec")
- proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus")
- proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2747f709ac7c95e7)
-}
-
-var fileDescriptor_2747f709ac7c95e7 = []byte{
- // 2041 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xdd, 0x6f, 0x1b, 0xc7,
- 0x11, 0xd7, 0x51, 0xa2, 0x44, 0x8d, 0x22, 0xca, 0x5e, 0xa9, 0x16, 0xa3, 0xb4, 0x92, 0x70, 0x31,
- 0x62, 0x25, 0xb1, 0x8f, 0xb1, 0x92, 0x06, 0x89, 0xdd, 0xba, 0x10, 0x25, 0x37, 0x56, 0x20, 0x45,
- 0xca, 0x4a, 0xb2, 0xd1, 0xf4, 0x03, 0x59, 0x91, 0x6b, 0xea, 0xa2, 0xfb, 0xc2, 0xdd, 0x52, 0x31,
- 0xd1, 0x97, 0xfe, 0x01, 0x2d, 0xd2, 0xe7, 0xfe, 0x15, 0xed, 0x53, 0x8b, 0x16, 0x7d, 0x2d, 0xfc,
- 0x18, 0xf4, 0xa5, 0x79, 0x22, 0x6a, 0xe6, 0xb5, 0x7d, 0x6b, 0x5f, 0x0c, 0x14, 0x28, 0x76, 0x6f,
- 0xef, 0xfb, 0x4e, 0x3a, 0x16, 0xb0, 0x80, 0xe6, 0x8d, 0xb7, 0x33, 0xf3, 0x9b, 0xd9, 0xd9, 0x99,
- 0xd9, 0x99, 0x25, 0xdc, 0x38, 0x7d, 0xcf, 0xd3, 0x74, 0xbb, 0x49, 0x1c, 0xbd, 0x49, 0x1c, 0xc7,
- 0x6b, 0x9e, 0xdd, 0x3e, 0xa6, 0x8c, 0xdc, 0x6e, 0x76, 0xa9, 0x45, 0x5d, 0xc2, 0x68, 0x47, 0x73,
- 0x5c, 0x9b, 0xd9, 0x68, 0xd1, 0x67, 0xd4, 0x88, 0xa3, 0x6b, 0x9c, 0x51, 0x93, 0x8c, 0x4b, 0xb7,
- 0xba, 0x3a, 0x3b, 0xe9, 0x1d, 0x6b, 0x6d, 0xdb, 0x6c, 0x76, 0xed, 0xae, 0xdd, 0x14, 0xfc, 0xc7,
- 0xbd, 0xc7, 0xe2, 0x4b, 0x7c, 0x88, 0x5f, 0x3e, 0xce, 0x92, 0x1a, 0x53, 0xd8, 0xb6, 0x5d, 0xda,
- 0x3c, 0xcb, 0xe8, 0x5a, 0x7a, 0x27, 0xe2, 0x31, 0x49, 0xfb, 0x44, 0xb7, 0xa8, 0xdb, 0x6f, 0x3a,
- 0xa7, 0x5d, 0xbe, 0xe0, 0x35, 0x4d, 0xca, 0x48, 0x9e, 0x54, 0xb3, 0x48, 0xca, 0xed, 0x59, 0x4c,
- 0x37, 0x69, 0x46, 0xe0, 0xdd, 0x8b, 0x04, 0xbc, 0xf6, 0x09, 0x35, 0x49, 0x46, 0xee, 0xed, 0x22,
- 0xb9, 0x1e, 0xd3, 0x8d, 0xa6, 0x6e, 0x31, 0x8f, 0xb9, 0x69, 0x21, 0xf5, 0xdf, 0x0a, 0xa0, 0x4d,
- 0xdb, 0x62, 0xae, 0x6d, 0x18, 0xd4, 0xc5, 0xf4, 0x4c, 0xf7, 0x74, 0xdb, 0x42, 0x9f, 0x42, 0x8d,
- 0xef, 0xa7, 0x43, 0x18, 0x69, 0x28, 0xab, 0xca, 0xda, 0xcc, 0xfa, 0x5b, 0x5a, 0xe4, 0xe9, 0x10,
- 0x5e, 0x73, 0x4e, 0xbb, 0x7c, 0xc1, 0xd3, 0x38, 0xb7, 0x76, 0x76, 0x5b, 0xdb, 0x3b, 0xfe, 0x8c,
- 0xb6, 0xd9, 0x2e, 0x65, 0xa4, 0x85, 0x9e, 0x0e, 0x56, 0xc6, 0x86, 0x83, 0x15, 0x88, 0xd6, 0x70,
- 0x88, 0x8a, 0xf6, 0x60, 0x42, 0xa0, 0x57, 0x04, 0xfa, 0xad, 0x42, 0x74, 0xb9, 0x69, 0x0d, 0x93,
- 0xcf, 0xef, 0x3f, 0x61, 0xd4, 0xe2, 0xe6, 0xb5, 0x5e, 0x92, 0xd0, 0x13, 0x5b, 0x84, 0x11, 0x2c,
- 0x80, 0xd0, 0x4d, 0xa8, 0xb9, 0xd2, 0xfc, 0xc6, 0xf8, 0xaa, 0xb2, 0x36, 0xde, 0xba, 0x22, 0xb9,
- 0x6a, 0xc1, 0xb6, 0x70, 0xc8, 0xa1, 0x3e, 0x55, 0xe0, 0x5a, 0x76, 0xdf, 0x3b, 0xba, 0xc7, 0xd0,
- 0x4f, 0x32, 0x7b, 0xd7, 0xca, 0xed, 0x9d, 0x4b, 0x8b, 0x9d, 0x87, 0x8a, 0x83, 0x95, 0xd8, 0xbe,
- 0xf7, 0xa1, 0xaa, 0x33, 0x6a, 0x7a, 0x8d, 0xca, 0xea, 0xf8, 0xda, 0xcc, 0xfa, 0x9b, 0x5a, 0x41,
- 0x00, 0x6b, 0x59, 0xeb, 0x5a, 0xb3, 0x12, 0xb7, 0xba, 0xcd, 0x11, 0xb0, 0x0f, 0xa4, 0xfe, 0xb2,
- 0x02, 0xb0, 0x45, 0x1d, 0xc3, 0xee, 0x9b, 0xd4, 0x62, 0x97, 0x70, 0x74, 0xdb, 0x30, 0xe1, 0x39,
- 0xb4, 0x2d, 0x8f, 0xee, 0x46, 0xe1, 0x0e, 0x22, 0xa3, 0x0e, 0x1c, 0xda, 0x8e, 0x0e, 0x8d, 0x7f,
- 0x61, 0x01, 0x81, 0x3e, 0x86, 0x49, 0x8f, 0x11, 0xd6, 0xf3, 0xc4, 0x91, 0xcd, 0xac, 0xbf, 0x5e,
- 0x06, 0x4c, 0x08, 0xb4, 0xea, 0x12, 0x6e, 0xd2, 0xff, 0xc6, 0x12, 0x48, 0xfd, 0xdb, 0x38, 0xcc,
- 0x47, 0xcc, 0x9b, 0xb6, 0xd5, 0xd1, 0x19, 0x0f, 0xe9, 0xbb, 0x30, 0xc1, 0xfa, 0x0e, 0x15, 0x3e,
- 0x99, 0x6e, 0xdd, 0x08, 0x8c, 0x39, 0xec, 0x3b, 0xf4, 0xf9, 0x60, 0x65, 0x31, 0x47, 0x84, 0x93,
- 0xb0, 0x10, 0x42, 0x3b, 0xa1, 0x9d, 0x15, 0x21, 0xfe, 0x4e, 0x52, 0xf9, 0xf3, 0xc1, 0x4a, 0x4e,
- 0x01, 0xd1, 0x42, 0xa4, 0xa4, 0x89, 0xe8, 0x33, 0xa8, 0x1b, 0xc4, 0x63, 0x47, 0x4e, 0x87, 0x30,
- 0x7a, 0xa8, 0x9b, 0xb4, 0x31, 0x29, 0x76, 0xff, 0x46, 0xb9, 0x83, 0xe2, 0x12, 0xad, 0x6b, 0xd2,
- 0x82, 0xfa, 0x4e, 0x02, 0x09, 0xa7, 0x90, 0xd1, 0x19, 0x20, 0xbe, 0x72, 0xe8, 0x12, 0xcb, 0xf3,
- 0x77, 0xc5, 0xf5, 0x4d, 0x8d, 0xac, 0x6f, 0x49, 0xea, 0x43, 0x3b, 0x19, 0x34, 0x9c, 0xa3, 0x01,
- 0xbd, 0x06, 0x93, 0x2e, 0x25, 0x9e, 0x6d, 0x35, 0x26, 0x84, 0xc7, 0xc2, 0xe3, 0xc2, 0x62, 0x15,
- 0x4b, 0x2a, 0x7a, 0x1d, 0xa6, 0x4c, 0xea, 0x79, 0xa4, 0x4b, 0x1b, 0x55, 0xc1, 0x38, 0x27, 0x19,
- 0xa7, 0x76, 0xfd, 0x65, 0x1c, 0xd0, 0xd5, 0x3f, 0x28, 0x50, 0x8f, 0x8e, 0xe9, 0x12, 0x72, 0xf5,
- 0x41, 0x32, 0x57, 0x5f, 0x2d, 0x11, 0x9c, 0x05, 0x39, 0xfa, 0x8f, 0x0a, 0xa0, 0x88, 0x09, 0xdb,
- 0x86, 0x71, 0x4c, 0xda, 0xa7, 0x68, 0x15, 0x26, 0x2c, 0x62, 0x06, 0x31, 0x19, 0x26, 0xc8, 0x47,
- 0xc4, 0xa4, 0x58, 0x50, 0xd0, 0x17, 0x0a, 0xa0, 0x9e, 0x38, 0xcd, 0xce, 0x86, 0x65, 0xd9, 0x8c,
- 0x70, 0x07, 0x07, 0x06, 0x6d, 0x96, 0x30, 0x28, 0xd0, 0xa5, 0x1d, 0x65, 0x50, 0xee, 0x5b, 0xcc,
- 0xed, 0x47, 0x07, 0x9b, 0x65, 0xc0, 0x39, 0xaa, 0xd1, 0x8f, 0x01, 0x5c, 0x89, 0x79, 0x68, 0xcb,
- 0xb4, 0x2d, 0xae, 0x01, 0x81, 0xfa, 0x4d, 0xdb, 0x7a, 0xac, 0x77, 0xa3, 0xc2, 0x82, 0x43, 0x08,
- 0x1c, 0x83, 0x5b, 0xba, 0x0f, 0x8b, 0x05, 0x76, 0xa2, 0x2b, 0x30, 0x7e, 0x4a, 0xfb, 0xbe, 0xab,
- 0x30, 0xff, 0x89, 0x16, 0xa0, 0x7a, 0x46, 0x8c, 0x1e, 0xf5, 0x73, 0x12, 0xfb, 0x1f, 0x77, 0x2a,
- 0xef, 0x29, 0xea, 0x6f, 0xab, 0xf1, 0x48, 0xe1, 0xf5, 0x06, 0xad, 0xf1, 0xeb, 0xc1, 0x31, 0xf4,
- 0x36, 0xf1, 0x04, 0x46, 0xb5, 0xf5, 0x92, 0x7f, 0x35, 0xf8, 0x6b, 0x38, 0xa4, 0xa2, 0x9f, 0x42,
- 0xcd, 0xa3, 0x06, 0x6d, 0x33, 0xdb, 0x95, 0x25, 0xee, 0xed, 0x92, 0x31, 0x45, 0x8e, 0xa9, 0x71,
- 0x20, 0x45, 0x7d, 0xf8, 0xe0, 0x0b, 0x87, 0x90, 0xe8, 0x63, 0xa8, 0x31, 0x6a, 0x3a, 0x06, 0x61,
- 0x54, 0x7a, 0x2f, 0x11, 0x57, 0xbc, 0x76, 0x70, 0xb0, 0x7d, 0xbb, 0x73, 0x28, 0xd9, 0x44, 0xf5,
- 0x0c, 0xe3, 0x34, 0x58, 0xc5, 0x21, 0x0c, 0xfa, 0x11, 0xd4, 0x3c, 0xc6, 0x6f, 0xf5, 0x6e, 0x5f,
- 0x64, 0xdb, 0x79, 0xd7, 0x4a, 0xbc, 0x8e, 0xfa, 0x22, 0x11, 0x74, 0xb0, 0x82, 0x43, 0x38, 0xb4,
- 0x01, 0x73, 0xa6, 0x6e, 0x61, 0x4a, 0x3a, 0xfd, 0x03, 0xda, 0xb6, 0xad, 0x8e, 0x27, 0xd2, 0xb4,
- 0xda, 0x5a, 0x94, 0x42, 0x73, 0xbb, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0x1d, 0x58, 0x08, 0xae, 0xdd,
- 0x07, 0xba, 0xc7, 0x6c, 0xb7, 0xbf, 0xa3, 0x9b, 0x3a, 0x13, 0x35, 0xaf, 0xda, 0x6a, 0x0c, 0x07,
- 0x2b, 0x0b, 0x38, 0x87, 0x8e, 0x73, 0xa5, 0x78, 0x5d, 0x71, 0x48, 0xcf, 0xa3, 0x1d, 0x51, 0xc3,
- 0x6a, 0x51, 0x5d, 0xd9, 0x17, 0xab, 0x58, 0x52, 0xd1, 0xa3, 0x44, 0x98, 0xd6, 0x46, 0x0b, 0xd3,
- 0x7a, 0x71, 0x88, 0xa2, 0x23, 0x58, 0x74, 0x5c, 0xbb, 0xeb, 0x52, 0xcf, 0xdb, 0xa2, 0xa4, 0x63,
- 0xe8, 0x16, 0x0d, 0x3c, 0x33, 0x2d, 0x76, 0xf4, 0xca, 0x70, 0xb0, 0xb2, 0xb8, 0x9f, 0xcf, 0x82,
- 0x8b, 0x64, 0xd5, 0x5f, 0x55, 0xe1, 0x4a, 0xfa, 0x8e, 0x43, 0x1f, 0x02, 0xb2, 0x8f, 0x3d, 0xea,
- 0x9e, 0xd1, 0xce, 0x07, 0x7e, 0xe3, 0xc6, 0xbb, 0x1b, 0x45, 0x74, 0x37, 0x61, 0xde, 0xee, 0x65,
- 0x38, 0x70, 0x8e, 0x94, 0xdf, 0x1f, 0xc9, 0x04, 0xa8, 0x08, 0x43, 0x63, 0xfd, 0x51, 0x26, 0x09,
- 0x36, 0x60, 0x4e, 0xe6, 0x7e, 0x40, 0x14, 0xc1, 0x1a, 0x3b, 0xf7, 0xa3, 0x24, 0x19, 0xa7, 0xf9,
- 0xd1, 0x5d, 0x98, 0x75, 0x79, 0x1c, 0x84, 0x00, 0x53, 0x02, 0xe0, 0x5b, 0x12, 0x60, 0x16, 0xc7,
- 0x89, 0x38, 0xc9, 0x8b, 0x3e, 0x80, 0xab, 0xe4, 0x8c, 0xe8, 0x06, 0x39, 0x36, 0x68, 0x08, 0x30,
- 0x21, 0x00, 0x5e, 0x96, 0x00, 0x57, 0x37, 0xd2, 0x0c, 0x38, 0x2b, 0x83, 0x76, 0x61, 0xbe, 0x67,
- 0x65, 0xa1, 0xfc, 0x20, 0x7e, 0x45, 0x42, 0xcd, 0x1f, 0x65, 0x59, 0x70, 0x9e, 0x1c, 0xda, 0x86,
- 0x79, 0x46, 0x5d, 0x53, 0xb7, 0x08, 0xd3, 0xad, 0x6e, 0x08, 0xe7, 0x9f, 0xfc, 0x22, 0x87, 0x3a,
- 0xcc, 0x92, 0x71, 0x9e, 0x0c, 0xfa, 0x14, 0xa0, 0x1d, 0x34, 0x08, 0x5e, 0x63, 0x52, 0x54, 0xf4,
- 0x9b, 0x25, 0xf2, 0x36, 0xec, 0x2a, 0xa2, 0x6a, 0x1a, 0x2e, 0x79, 0x38, 0x86, 0x89, 0xee, 0x40,
- 0xbd, 0x6d, 0x1b, 0x86, 0x48, 0xa2, 0x4d, 0xbb, 0x67, 0x31, 0x91, 0x07, 0xd5, 0x16, 0xe2, 0x7d,
- 0xc3, 0x66, 0x82, 0x82, 0x53, 0x9c, 0xea, 0x9f, 0x94, 0xf8, 0x8d, 0x15, 0x54, 0x06, 0x74, 0x27,
- 0xd1, 0x45, 0xbd, 0x96, 0xea, 0xa2, 0xae, 0x65, 0x25, 0x62, 0x4d, 0x94, 0x0e, 0xb3, 0x3c, 0x8f,
- 0x74, 0xab, 0xeb, 0xc7, 0x8e, 0xac, 0xae, 0x6f, 0x9d, 0x9b, 0x95, 0x21, 0x77, 0xec, 0x8e, 0xbd,
- 0x2a, 0xc2, 0x27, 0x4e, 0xc4, 0x49, 0x64, 0xf5, 0x1e, 0xd4, 0x93, 0x29, 0x9d, 0x18, 0x0f, 0x94,
- 0x0b, 0xc7, 0x83, 0xaf, 0x15, 0x58, 0x2c, 0xd0, 0x8e, 0x0c, 0xa8, 0x9b, 0xe4, 0x49, 0x2c, 0x62,
- 0x2e, 0x6c, 0xb3, 0xf9, 0x00, 0xa6, 0xf9, 0x03, 0x98, 0xb6, 0x6d, 0xb1, 0x3d, 0xf7, 0x80, 0xb9,
- 0xba, 0xd5, 0xf5, 0xcf, 0x61, 0x37, 0x81, 0x85, 0x53, 0xd8, 0xe8, 0x13, 0xa8, 0x99, 0xe4, 0xc9,
- 0x41, 0xcf, 0xed, 0xe6, 0xf9, 0xab, 0x9c, 0x1e, 0x71, 0x15, 0xed, 0x4a, 0x14, 0x1c, 0xe2, 0xa9,
- 0x7f, 0x56, 0x60, 0x35, 0xb1, 0x4b, 0x5e, 0x76, 0xe8, 0xe3, 0x9e, 0x71, 0x40, 0xa3, 0x13, 0x7f,
- 0x13, 0xa6, 0x1d, 0xe2, 0x32, 0x3d, 0x2c, 0x3d, 0xd5, 0xd6, 0xec, 0x70, 0xb0, 0x32, 0xbd, 0x1f,
- 0x2c, 0xe2, 0x88, 0x9e, 0xe3, 0x9b, 0xca, 0x8b, 0xf3, 0x8d, 0xfa, 0x1f, 0x05, 0xaa, 0x07, 0x6d,
- 0x62, 0xd0, 0x4b, 0x18, 0x7a, 0xb6, 0x12, 0x43, 0x8f, 0x5a, 0x18, 0xb3, 0xc2, 0x9e, 0xc2, 0x79,
- 0x67, 0x27, 0x35, 0xef, 0x5c, 0xbf, 0x00, 0xe7, 0xfc, 0x51, 0xe7, 0x7d, 0x98, 0x0e, 0xd5, 0x25,
- 0xea, 0xbb, 0x72, 0x51, 0x7d, 0x57, 0x7f, 0x53, 0x81, 0x99, 0x98, 0x8a, 0xd1, 0xa4, 0xb9, 0xbb,
- 0x63, 0x2d, 0x12, 0x2f, 0x5c, 0xeb, 0x65, 0x36, 0xa2, 0x05, 0xed, 0x90, 0xdf, 0x79, 0x46, 0x7d,
- 0x47, 0xb6, 0x4b, 0xba, 0x07, 0x75, 0x46, 0xdc, 0x2e, 0x65, 0x01, 0x4d, 0x38, 0x6c, 0x3a, 0x1a,
- 0x7b, 0x0e, 0x13, 0x54, 0x9c, 0xe2, 0x5e, 0xba, 0x0b, 0xb3, 0x09, 0x65, 0x23, 0xb5, 0x8f, 0x5f,
- 0x70, 0xe7, 0x44, 0xa9, 0x70, 0x09, 0xd1, 0xf5, 0x61, 0x22, 0xba, 0xd6, 0x8a, 0x9d, 0x19, 0x4b,
- 0xd0, 0xa2, 0x18, 0xc3, 0xa9, 0x18, 0x7b, 0xa3, 0x14, 0xda, 0xf9, 0x91, 0xf6, 0xcf, 0x0a, 0x2c,
- 0xc4, 0xb8, 0xa3, 0xa9, 0xfa, 0x7b, 0x89, 0xfb, 0x60, 0x2d, 0x75, 0x1f, 0x34, 0xf2, 0x64, 0x5e,
- 0xd8, 0x58, 0x9d, 0x3f, 0xea, 0x8e, 0xff, 0x3f, 0x8e, 0xba, 0x7f, 0x54, 0x60, 0x2e, 0xe6, 0xbb,
- 0x4b, 0x98, 0x75, 0xb7, 0x93, 0xb3, 0xee, 0xf5, 0x32, 0x41, 0x53, 0x30, 0xec, 0xde, 0x81, 0xf9,
- 0x18, 0xd3, 0x9e, 0xdb, 0xd1, 0x2d, 0x62, 0x78, 0xe8, 0x55, 0xa8, 0x7a, 0x8c, 0xb8, 0x2c, 0xb8,
- 0x44, 0x02, 0xd9, 0x03, 0xbe, 0x88, 0x7d, 0x9a, 0xfa, 0x2f, 0x05, 0x9a, 0x31, 0xe1, 0x7d, 0xea,
- 0x7a, 0xba, 0xc7, 0xa8, 0xc5, 0x1e, 0xda, 0x46, 0xcf, 0xa4, 0x9b, 0x06, 0xd1, 0x4d, 0x4c, 0xf9,
- 0x82, 0x6e, 0x5b, 0xfb, 0xb6, 0xa1, 0xb7, 0xfb, 0x88, 0xc0, 0xcc, 0xe7, 0x27, 0xd4, 0xda, 0xa2,
- 0x06, 0x65, 0xb4, 0x23, 0x43, 0xf1, 0x07, 0x12, 0x7e, 0xe6, 0x51, 0x44, 0x7a, 0x3e, 0x58, 0x59,
- 0x2b, 0x83, 0x28, 0x22, 0x34, 0x8e, 0x89, 0x7e, 0x06, 0xc0, 0x3f, 0x45, 0x2d, 0xeb, 0xc8, 0x60,
- 0xbd, 0x17, 0x64, 0xf4, 0xa3, 0x90, 0x32, 0x92, 0x82, 0x18, 0xa2, 0xfa, 0xbb, 0x5a, 0xe2, 0xbc,
- 0xbf, 0xf1, 0x13, 0xeb, 0xcf, 0x61, 0xe1, 0x2c, 0xf2, 0x4e, 0xc0, 0xc0, 0x3b, 0xfc, 0xf1, 0xf4,
- 0x2b, 0x60, 0x08, 0x9f, 0xe7, 0xd7, 0xd6, 0xb7, 0xa5, 0x92, 0x85, 0x87, 0x39, 0x70, 0x38, 0x57,
- 0x09, 0xfa, 0x2e, 0xcc, 0xf0, 0xe9, 0x48, 0x6f, 0xd3, 0x8f, 0x88, 0x19, 0xe4, 0xe2, 0x7c, 0x10,
- 0x2f, 0x07, 0x11, 0x09, 0xc7, 0xf9, 0xd0, 0x09, 0xcc, 0x3b, 0x76, 0x67, 0x97, 0x58, 0xa4, 0x4b,
- 0x79, 0x23, 0xe8, 0x1f, 0xa5, 0x18, 0x63, 0xa7, 0x5b, 0xef, 0x06, 0x93, 0xc4, 0x7e, 0x96, 0xe5,
- 0x39, 0x9f, 0x07, 0xb3, 0xcb, 0x22, 0x08, 0xf2, 0x20, 0x91, 0x0b, 0xf5, 0x9e, 0xec, 0xc7, 0xe4,
- 0x54, 0xef, 0xbf, 0xd7, 0xad, 0x97, 0x49, 0xca, 0xa3, 0x84, 0x64, 0x74, 0x61, 0x26, 0xd7, 0x71,
- 0x4a, 0x43, 0xe1, 0x94, 0x5e, 0xfb, 0x9f, 0xa6, 0xf4, 0x9c, 0x67, 0x83, 0xe9, 0x11, 0x9f, 0x0d,
- 0xfe, 0xa2, 0xc0, 0x75, 0xa7, 0x44, 0x2e, 0x35, 0x40, 0xf8, 0xe6, 0x41, 0x19, 0xdf, 0x94, 0xc9,
- 0xcd, 0xd6, 0xda, 0x70, 0xb0, 0x72, 0xbd, 0x0c, 0x27, 0x2e, 0x65, 0x1f, 0x7a, 0x08, 0x35, 0x5b,
- 0xd6, 0xc0, 0xc6, 0x8c, 0xb0, 0xf5, 0x66, 0x19, 0x5b, 0x83, 0xba, 0xe9, 0xa7, 0x65, 0xf0, 0x85,
- 0x43, 0x2c, 0xf5, 0xf7, 0x55, 0xb8, 0x9a, 0xb9, 0xc1, 0xd1, 0x0f, 0xcf, 0x79, 0x32, 0xb8, 0xf6,
- 0xc2, 0x9e, 0x0b, 0x32, 0xb3, 0xfe, 0xf8, 0x08, 0xb3, 0xfe, 0x06, 0xcc, 0xb5, 0x7b, 0xae, 0x4b,
- 0x2d, 0x96, 0x9a, 0xf4, 0xc3, 0x60, 0xd9, 0x4c, 0x92, 0x71, 0x9a, 0x3f, 0xef, 0xb9, 0xa2, 0x3a,
- 0xe2, 0x73, 0x45, 0xdc, 0x0a, 0x39, 0x27, 0xfa, 0xa9, 0x9d, 0xb5, 0x42, 0x8e, 0x8b, 0x69, 0x7e,
- 0xde, 0xb4, 0xfa, 0xa8, 0x21, 0xc2, 0x54, 0xb2, 0x69, 0x3d, 0x4a, 0x50, 0x71, 0x8a, 0x3b, 0x67,
- 0x5e, 0x9f, 0x2e, 0x3b, 0xaf, 0x23, 0x92, 0x78, 0x4d, 0x00, 0x51, 0x47, 0x6f, 0x95, 0x89, 0xb3,
- 0xf2, 0xcf, 0x09, 0xb9, 0x6f, 0x32, 0x33, 0xa3, 0xbf, 0xc9, 0xa8, 0x7f, 0x55, 0xe0, 0xe5, 0xc2,
- 0x8a, 0x85, 0x36, 0x12, 0x2d, 0xe5, 0xad, 0x54, 0x4b, 0xf9, 0x9d, 0x42, 0xc1, 0x58, 0x5f, 0xe9,
- 0xe6, 0xbf, 0x34, 0xbc, 0x5f, 0xee, 0xa5, 0x21, 0x67, 0x0a, 0xbe, 0xf8, 0xc9, 0xa1, 0xf5, 0xfd,
- 0xa7, 0xcf, 0x96, 0xc7, 0xbe, 0x7c, 0xb6, 0x3c, 0xf6, 0xd5, 0xb3, 0xe5, 0xb1, 0x5f, 0x0c, 0x97,
- 0x95, 0xa7, 0xc3, 0x65, 0xe5, 0xcb, 0xe1, 0xb2, 0xf2, 0xd5, 0x70, 0x59, 0xf9, 0xfb, 0x70, 0x59,
- 0xf9, 0xf5, 0xd7, 0xcb, 0x63, 0x9f, 0x2c, 0x16, 0xfc, 0xb1, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0x40, 0xa4, 0x4b, 0xb9, 0xf2, 0x1e, 0x00, 0x00,
-}
+func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1150,7 +375,7 @@ func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.UpdatedAnnotations {
keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations)
+ sort.Strings(keysForUpdatedAnnotations)
for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])]
baseI := i
@@ -1597,7 +822,7 @@ func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Selector {
keysForSelector = append(keysForSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.Selector[string(keysForSelector[iNdEx])]
baseI := i
@@ -2586,7 +1811,7 @@ func (this *DeploymentRollback) String() string {
for k := range this.UpdatedAnnotations {
keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations)
+ sort.Strings(keysForUpdatedAnnotations)
mapStringForUpdatedAnnotations := "map[string]string{"
for _, k := range keysForUpdatedAnnotations {
mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k])
@@ -2714,7 +1939,7 @@ func (this *ScaleStatus) String() string {
for k := range this.Selector {
keysForSelector = append(keysForSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
mapStringForSelector := "map[string]string{"
for _, k := range keysForSelector {
mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/generated.proto b/operator/vendor/k8s.io/api/apps/v1beta1/generated.proto
index b61dc490..b47d61e2 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -208,7 +208,7 @@ message DeploymentStatus {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 9;
@@ -289,10 +289,12 @@ message RollingUpdateStatefulSetStrategy {
// maxUnavailable is the maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
- // Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
+ //
+ // +featureGate=MaxUnavailableStatefulSet
// +optional
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
}
@@ -496,8 +498,12 @@ message StatefulSetSpec {
// +optional
optional int32 minReadySeconds = 9;
- // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates.
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
// +optional
optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/apps/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..3127ea3c
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,68 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*ControllerRevision) ProtoMessage() {}
+
+func (*ControllerRevisionList) ProtoMessage() {}
+
+func (*Deployment) ProtoMessage() {}
+
+func (*DeploymentCondition) ProtoMessage() {}
+
+func (*DeploymentList) ProtoMessage() {}
+
+func (*DeploymentRollback) ProtoMessage() {}
+
+func (*DeploymentSpec) ProtoMessage() {}
+
+func (*DeploymentStatus) ProtoMessage() {}
+
+func (*DeploymentStrategy) ProtoMessage() {}
+
+func (*RollbackConfig) ProtoMessage() {}
+
+func (*RollingUpdateDeployment) ProtoMessage() {}
+
+func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
+
+func (*Scale) ProtoMessage() {}
+
+func (*ScaleSpec) ProtoMessage() {}
+
+func (*ScaleStatus) ProtoMessage() {}
+
+func (*StatefulSet) ProtoMessage() {}
+
+func (*StatefulSetCondition) ProtoMessage() {}
+
+func (*StatefulSetList) ProtoMessage() {}
+
+func (*StatefulSetOrdinals) ProtoMessage() {}
+
+func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
+
+func (*StatefulSetSpec) ProtoMessage() {}
+
+func (*StatefulSetStatus) ProtoMessage() {}
+
+func (*StatefulSetUpdateStrategy) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/types.go b/operator/vendor/k8s.io/api/apps/v1beta1/types.go
index cd140be1..b1e6b336 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -166,10 +166,12 @@ type RollingUpdateStatefulSetStrategy struct {
// maxUnavailable is the maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
- // Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
+ //
+ // +featureGate=MaxUnavailableStatefulSet
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"`
}
@@ -294,8 +296,12 @@ type StatefulSetSpec struct {
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
- // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates.
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"`
@@ -582,7 +588,7 @@ type DeploymentStatus struct {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
index 02ea5f7f..a6f7a119 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
@@ -119,7 +119,7 @@ var map_DeploymentStatus = map[string]string{
"readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
"availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
- "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -160,7 +160,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
var map_RollingUpdateStatefulSetStrategy = map[string]string{
"": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.",
- "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.",
+ "maxUnavailable": "maxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.",
}
func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
@@ -259,7 +259,7 @@ var map_StatefulSetSpec = map[string]string{
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
"minReadySeconds": "minReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
- "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.",
+ "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down.",
"ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
}
diff --git a/operator/vendor/k8s.io/api/apps/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/apps/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..5da1ed3f
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apps/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ControllerRevision) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.ControllerRevision"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ControllerRevisionList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.ControllerRevisionList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Deployment) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.Deployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.DeploymentCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.DeploymentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentRollback) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.DeploymentRollback"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.DeploymentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.DeploymentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.DeploymentStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollbackConfig) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.RollbackConfig"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDeployment) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.RollingUpdateDeployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateStatefulSetStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.RollingUpdateStatefulSetStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scale) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.Scale"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.ScaleSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.ScaleStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetOrdinals) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetOrdinals"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetPersistentVolumeClaimRetentionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetUpdateStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta1.StatefulSetUpdateStrategy"
+}
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/doc.go b/operator/vendor/k8s.io/api/apps/v1beta2/doc.go
index 7d28fe42..3259a47b 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta2/doc.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.apps.v1beta2
package v1beta2
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/operator/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
index 9fcba6fe..777a66f1 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
@@ -23,14 +23,12 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -38,1135 +36,73 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
-func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
-func (*ControllerRevision) ProtoMessage() {}
-func (*ControllerRevision) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{0}
-}
-func (m *ControllerRevision) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ControllerRevision) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ControllerRevision.Merge(m, src)
-}
-func (m *ControllerRevision) XXX_Size() int {
- return m.Size()
-}
-func (m *ControllerRevision) XXX_DiscardUnknown() {
- xxx_messageInfo_ControllerRevision.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo
-
-func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
-func (*ControllerRevisionList) ProtoMessage() {}
-func (*ControllerRevisionList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{1}
-}
-func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ControllerRevisionList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ControllerRevisionList.Merge(m, src)
-}
-func (m *ControllerRevisionList) XXX_Size() int {
- return m.Size()
-}
-func (m *ControllerRevisionList) XXX_DiscardUnknown() {
- xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo
-
-func (m *DaemonSet) Reset() { *m = DaemonSet{} }
-func (*DaemonSet) ProtoMessage() {}
-func (*DaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{2}
-}
-func (m *DaemonSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSet.Merge(m, src)
-}
-func (m *DaemonSet) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSet) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSet proto.InternalMessageInfo
-
-func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
-func (*DaemonSetCondition) ProtoMessage() {}
-func (*DaemonSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{3}
-}
-func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetCondition.Merge(m, src)
-}
-func (m *DaemonSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo
-
-func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
-func (*DaemonSetList) ProtoMessage() {}
-func (*DaemonSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{4}
-}
-func (m *DaemonSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetList.Merge(m, src)
-}
-func (m *DaemonSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo
-
-func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
-func (*DaemonSetSpec) ProtoMessage() {}
-func (*DaemonSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{5}
-}
-func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetSpec.Merge(m, src)
-}
-func (m *DaemonSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo
-
-func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
-func (*DaemonSetStatus) ProtoMessage() {}
-func (*DaemonSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{6}
-}
-func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetStatus.Merge(m, src)
-}
-func (m *DaemonSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo
-
-func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
-func (*DaemonSetUpdateStrategy) ProtoMessage() {}
-func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{7}
-}
-func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src)
-}
-func (m *DaemonSetUpdateStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo
-
-func (m *Deployment) Reset() { *m = Deployment{} }
-func (*Deployment) ProtoMessage() {}
-func (*Deployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{8}
-}
-func (m *Deployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Deployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Deployment.Merge(m, src)
-}
-func (m *Deployment) XXX_Size() int {
- return m.Size()
-}
-func (m *Deployment) XXX_DiscardUnknown() {
- xxx_messageInfo_Deployment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Deployment proto.InternalMessageInfo
-
-func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (*DeploymentCondition) ProtoMessage() {}
-func (*DeploymentCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{9}
-}
-func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentCondition.Merge(m, src)
-}
-func (m *DeploymentCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
-
-func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-func (*DeploymentList) ProtoMessage() {}
-func (*DeploymentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{10}
-}
-func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentList.Merge(m, src)
-}
-func (m *DeploymentList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
-
-func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-func (*DeploymentSpec) ProtoMessage() {}
-func (*DeploymentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{11}
-}
-func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentSpec.Merge(m, src)
-}
-func (m *DeploymentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
-
-func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (*DeploymentStatus) ProtoMessage() {}
-func (*DeploymentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{12}
-}
-func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStatus.Merge(m, src)
-}
-func (m *DeploymentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
-
-func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-func (*DeploymentStrategy) ProtoMessage() {}
-func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{13}
-}
-func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStrategy.Merge(m, src)
-}
-func (m *DeploymentStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
-
-func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
-func (*ReplicaSet) ProtoMessage() {}
-func (*ReplicaSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{14}
-}
-func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSet.Merge(m, src)
-}
-func (m *ReplicaSet) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSet) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
-func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
-func (*ReplicaSetCondition) ProtoMessage() {}
-func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{15}
-}
-func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetCondition.Merge(m, src)
-}
-func (m *ReplicaSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m)
-}
+func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
-var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
-func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
-func (*ReplicaSetList) ProtoMessage() {}
-func (*ReplicaSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{16}
-}
-func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetList.Merge(m, src)
-}
-func (m *ReplicaSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetList.DiscardUnknown(m)
-}
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
-var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
-func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
-func (*ReplicaSetSpec) ProtoMessage() {}
-func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{17}
-}
-func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetSpec.Merge(m, src)
-}
-func (m *ReplicaSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
-
-func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
-func (*ReplicaSetStatus) ProtoMessage() {}
-func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{18}
-}
-func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetStatus.Merge(m, src)
-}
-func (m *ReplicaSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m)
-}
+func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
-var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
+func (m *Deployment) Reset() { *m = Deployment{} }
-func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
-func (*RollingUpdateDaemonSet) ProtoMessage() {}
-func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{19}
-}
-func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src)
-}
-func (m *RollingUpdateDaemonSet) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m)
-}
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (*RollingUpdateDeployment) ProtoMessage() {}
-func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{20}
-}
-func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDeployment.Merge(m, src)
-}
-func (m *RollingUpdateDeployment) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDeployment) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m)
-}
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
-func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
-func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{21}
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src)
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m)
-}
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
-func (m *Scale) Reset() { *m = Scale{} }
-func (*Scale) ProtoMessage() {}
-func (*Scale) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{22}
-}
-func (m *Scale) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scale) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scale.Merge(m, src)
-}
-func (m *Scale) XXX_Size() int {
- return m.Size()
-}
-func (m *Scale) XXX_DiscardUnknown() {
- xxx_messageInfo_Scale.DiscardUnknown(m)
-}
+func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
-var xxx_messageInfo_Scale proto.InternalMessageInfo
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
-func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-func (*ScaleSpec) ProtoMessage() {}
-func (*ScaleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{23}
-}
-func (m *ScaleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleSpec.Merge(m, src)
-}
-func (m *ScaleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleSpec.DiscardUnknown(m)
-}
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
-var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
-func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
-func (*ScaleStatus) ProtoMessage() {}
-func (*ScaleStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{24}
-}
-func (m *ScaleStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleStatus.Merge(m, src)
-}
-func (m *ScaleStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleStatus.DiscardUnknown(m)
-}
+func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
-var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (m *StatefulSet) Reset() { *m = StatefulSet{} }
-func (*StatefulSet) ProtoMessage() {}
-func (*StatefulSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{25}
-}
-func (m *StatefulSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSet.Merge(m, src)
-}
-func (m *StatefulSet) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSet) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSet.DiscardUnknown(m)
-}
+func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
-var xxx_messageInfo_StatefulSet proto.InternalMessageInfo
+func (m *Scale) Reset() { *m = Scale{} }
-func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
-func (*StatefulSetCondition) ProtoMessage() {}
-func (*StatefulSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{26}
-}
-func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetCondition.Merge(m, src)
-}
-func (m *StatefulSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m)
-}
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
-func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
-func (*StatefulSetList) ProtoMessage() {}
-func (*StatefulSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{27}
-}
-func (m *StatefulSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetList.Merge(m, src)
-}
-func (m *StatefulSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetList.DiscardUnknown(m)
-}
+func (m *StatefulSet) Reset() { *m = StatefulSet{} }
-var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo
+func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
-func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
-func (*StatefulSetOrdinals) ProtoMessage() {}
-func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{28}
-}
-func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetOrdinals) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetOrdinals) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetOrdinals.Merge(m, src)
-}
-func (m *StatefulSetOrdinals) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetOrdinals) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetOrdinals.DiscardUnknown(m)
-}
+func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
-var xxx_messageInfo_StatefulSetOrdinals proto.InternalMessageInfo
+func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() {
*m = StatefulSetPersistentVolumeClaimRetentionPolicy{}
}
-func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
-func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{29}
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.Merge(m, src)
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy.DiscardUnknown(m)
-}
-var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.InternalMessageInfo
+func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
-func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
-func (*StatefulSetSpec) ProtoMessage() {}
-func (*StatefulSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{30}
-}
-func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetSpec.Merge(m, src)
-}
-func (m *StatefulSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo
-
-func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
-func (*StatefulSetStatus) ProtoMessage() {}
-func (*StatefulSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{31}
-}
-func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetStatus.Merge(m, src)
-}
-func (m *StatefulSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m)
-}
+func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
-var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo
-
-func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
-func (*StatefulSetUpdateStrategy) ProtoMessage() {}
-func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_c423c016abf485d4, []int{32}
-}
-func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src)
-}
-func (m *StatefulSetUpdateStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision")
- proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList")
- proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet")
- proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition")
- proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList")
- proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec")
- proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus")
- proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.DaemonSetUpdateStrategy")
- proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta2.Deployment")
- proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta2.DeploymentCondition")
- proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta2.DeploymentList")
- proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta2.DeploymentSpec")
- proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta2.DeploymentStatus")
- proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta2.DeploymentStrategy")
- proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1beta2.ReplicaSet")
- proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetCondition")
- proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetList")
- proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetSpec")
- proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetStatus")
- proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDaemonSet")
- proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDeployment")
- proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateStatefulSetStrategy")
- proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale")
- proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec")
- proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry")
- proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet")
- proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition")
- proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList")
- proto.RegisterType((*StatefulSetOrdinals)(nil), "k8s.io.api.apps.v1beta2.StatefulSetOrdinals")
- proto.RegisterType((*StatefulSetPersistentVolumeClaimRetentionPolicy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy")
- proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec")
- proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus")
- proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_c423c016abf485d4)
-}
-
-var fileDescriptor_c423c016abf485d4 = []byte{
- // 2359 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xf7, 0x92, 0xa2, 0x44, 0x0e, 0x2d, 0xc9, 0x1e, 0xa9, 0x22, 0x63, 0xb7, 0xa4, 0xb1, 0x36,
- 0x6c, 0x25, 0xb6, 0x49, 0x5b, 0xf9, 0x40, 0x62, 0xb7, 0x09, 0x44, 0x29, 0xb5, 0x1d, 0x48, 0x32,
- 0x33, 0xb4, 0x1c, 0x34, 0xe8, 0x87, 0x47, 0xe4, 0x98, 0xda, 0x78, 0xbf, 0xb0, 0x3b, 0x54, 0x4c,
- 0xf4, 0xd2, 0x6b, 0x81, 0x16, 0x6d, 0xae, 0xfd, 0x27, 0x8a, 0x5e, 0x8a, 0xa2, 0x41, 0x6f, 0x41,
- 0xe1, 0x63, 0xd0, 0x4b, 0x72, 0x22, 0x6a, 0xe6, 0x54, 0x14, 0xbd, 0xb5, 0x17, 0x03, 0x05, 0x8a,
- 0x99, 0x9d, 0xfd, 0xde, 0x35, 0x97, 0x8a, 0xad, 0x34, 0x41, 0x6e, 0xdc, 0x79, 0xef, 0xfd, 0xe6,
- 0xcd, 0xcc, 0x7b, 0xf3, 0x7e, 0xfb, 0xb8, 0xe0, 0xc2, 0x83, 0xd7, 0xed, 0x86, 0x62, 0x34, 0xb1,
- 0xa9, 0x34, 0xb1, 0x69, 0xda, 0xcd, 0x83, 0xab, 0x7b, 0x84, 0xe2, 0xb5, 0x66, 0x9f, 0xe8, 0xc4,
- 0xc2, 0x94, 0xf4, 0x1a, 0xa6, 0x65, 0x50, 0x03, 0x56, 0x1c, 0xc5, 0x06, 0x36, 0x95, 0x06, 0x53,
- 0x6c, 0x08, 0xc5, 0x53, 0x97, 0xfb, 0x0a, 0xdd, 0x1f, 0xec, 0x35, 0xba, 0x86, 0xd6, 0xec, 0x1b,
- 0x7d, 0xa3, 0xc9, 0xf5, 0xf7, 0x06, 0xf7, 0xf9, 0x13, 0x7f, 0xe0, 0xbf, 0x1c, 0x9c, 0x53, 0x72,
- 0x60, 0xc2, 0xae, 0x61, 0x91, 0xe6, 0xc1, 0xd5, 0xe8, 0x5c, 0xa7, 0x5e, 0xf1, 0x75, 0x34, 0xdc,
- 0xdd, 0x57, 0x74, 0x62, 0x0d, 0x9b, 0xe6, 0x83, 0x3e, 0x1b, 0xb0, 0x9b, 0x1a, 0xa1, 0x38, 0xc9,
- 0xaa, 0x99, 0x66, 0x65, 0x0d, 0x74, 0xaa, 0x68, 0x24, 0x66, 0xf0, 0xda, 0x24, 0x03, 0xbb, 0xbb,
- 0x4f, 0x34, 0x1c, 0xb3, 0x7b, 0x39, 0xcd, 0x6e, 0x40, 0x15, 0xb5, 0xa9, 0xe8, 0xd4, 0xa6, 0x56,
- 0xd4, 0x48, 0xfe, 0x8f, 0x04, 0xe0, 0x86, 0xa1, 0x53, 0xcb, 0x50, 0x55, 0x62, 0x21, 0x72, 0xa0,
- 0xd8, 0x8a, 0xa1, 0xc3, 0x7b, 0xa0, 0xc8, 0xd6, 0xd3, 0xc3, 0x14, 0x57, 0xa5, 0x33, 0xd2, 0x6a,
- 0x79, 0xed, 0x4a, 0xc3, 0xdf, 0x69, 0x0f, 0xbe, 0x61, 0x3e, 0xe8, 0xb3, 0x01, 0xbb, 0xc1, 0xb4,
- 0x1b, 0x07, 0x57, 0x1b, 0xb7, 0xf7, 0x3e, 0x20, 0x5d, 0xba, 0x4d, 0x28, 0x6e, 0xc1, 0x47, 0xa3,
- 0xfa, 0xb1, 0xf1, 0xa8, 0x0e, 0xfc, 0x31, 0xe4, 0xa1, 0xc2, 0xdb, 0x60, 0x86, 0xa3, 0xe7, 0x38,
- 0xfa, 0xe5, 0x54, 0x74, 0xb1, 0xe8, 0x06, 0xc2, 0x1f, 0xbe, 0xfd, 0x90, 0x12, 0x9d, 0xb9, 0xd7,
- 0x3a, 0x2e, 0xa0, 0x67, 0x36, 0x31, 0xc5, 0x88, 0x03, 0xc1, 0x4b, 0xa0, 0x68, 0x09, 0xf7, 0xab,
- 0xf9, 0x33, 0xd2, 0x6a, 0xbe, 0x75, 0x42, 0x68, 0x15, 0xdd, 0x65, 0x21, 0x4f, 0x43, 0x7e, 0x24,
- 0x81, 0x95, 0xf8, 0xba, 0xb7, 0x14, 0x9b, 0xc2, 0x1f, 0xc7, 0xd6, 0xde, 0xc8, 0xb6, 0x76, 0x66,
- 0xcd, 0x57, 0xee, 0x4d, 0xec, 0x8e, 0x04, 0xd6, 0xdd, 0x06, 0x05, 0x85, 0x12, 0xcd, 0xae, 0xe6,
- 0xce, 0xe4, 0x57, 0xcb, 0x6b, 0x17, 0x1b, 0x29, 0x01, 0xdc, 0x88, 0x7b, 0xd7, 0x9a, 0x17, 0xb8,
- 0x85, 0x5b, 0x0c, 0x01, 0x39, 0x40, 0xf2, 0x2f, 0x73, 0xa0, 0xb4, 0x89, 0x89, 0x66, 0xe8, 0x1d,
- 0x42, 0x8f, 0xe0, 0xe4, 0x6e, 0x82, 0x19, 0xdb, 0x24, 0x5d, 0x71, 0x72, 0xe7, 0x53, 0x17, 0xe0,
- 0xf9, 0xd4, 0x31, 0x49, 0xd7, 0x3f, 0x32, 0xf6, 0x84, 0x38, 0x02, 0x6c, 0x83, 0x59, 0x9b, 0x62,
- 0x3a, 0xb0, 0xf9, 0x81, 0x95, 0xd7, 0x56, 0x33, 0x60, 0x71, 0xfd, 0xd6, 0x82, 0x40, 0x9b, 0x75,
- 0x9e, 0x91, 0xc0, 0x91, 0xff, 0x91, 0x03, 0xd0, 0xd3, 0xdd, 0x30, 0xf4, 0x9e, 0x42, 0x59, 0x38,
- 0x5f, 0x03, 0x33, 0x74, 0x68, 0x12, 0xbe, 0x21, 0xa5, 0xd6, 0x79, 0xd7, 0x95, 0x3b, 0x43, 0x93,
- 0x3c, 0x19, 0xd5, 0x57, 0xe2, 0x16, 0x4c, 0x82, 0xb8, 0x0d, 0xdc, 0xf2, 0x9c, 0xcc, 0x71, 0xeb,
- 0x57, 0xc2, 0x53, 0x3f, 0x19, 0xd5, 0x13, 0xee, 0x8e, 0x86, 0x87, 0x14, 0x76, 0x10, 0x1e, 0x00,
- 0xa8, 0x62, 0x9b, 0xde, 0xb1, 0xb0, 0x6e, 0x3b, 0x33, 0x29, 0x1a, 0x11, 0xcb, 0x7f, 0x29, 0xdb,
- 0x41, 0x31, 0x8b, 0xd6, 0x29, 0xe1, 0x05, 0xdc, 0x8a, 0xa1, 0xa1, 0x84, 0x19, 0xe0, 0x79, 0x30,
- 0x6b, 0x11, 0x6c, 0x1b, 0x7a, 0x75, 0x86, 0xaf, 0xc2, 0xdb, 0x40, 0xc4, 0x47, 0x91, 0x90, 0xc2,
- 0x17, 0xc1, 0x9c, 0x46, 0x6c, 0x1b, 0xf7, 0x49, 0xb5, 0xc0, 0x15, 0x17, 0x85, 0xe2, 0xdc, 0xb6,
- 0x33, 0x8c, 0x5c, 0xb9, 0xfc, 0x47, 0x09, 0xcc, 0x7b, 0x3b, 0x77, 0x04, 0x99, 0x73, 0x23, 0x9c,
- 0x39, 0xf2, 0xe4, 0x60, 0x49, 0x49, 0x98, 0x4f, 0xf2, 0x01, 0xc7, 0x59, 0x38, 0xc2, 0x9f, 0x80,
- 0xa2, 0x4d, 0x54, 0xd2, 0xa5, 0x86, 0x25, 0x1c, 0x7f, 0x39, 0xa3, 0xe3, 0x78, 0x8f, 0xa8, 0x1d,
- 0x61, 0xda, 0x3a, 0xce, 0x3c, 0x77, 0x9f, 0x90, 0x07, 0x09, 0xdf, 0x05, 0x45, 0x4a, 0x34, 0x53,
- 0xc5, 0x94, 0x88, 0xac, 0x39, 0x1b, 0x74, 0x9e, 0xc5, 0x0c, 0x03, 0x6b, 0x1b, 0xbd, 0x3b, 0x42,
- 0x8d, 0xa7, 0x8c, 0xb7, 0x19, 0xee, 0x28, 0xf2, 0x60, 0xa0, 0x09, 0x16, 0x06, 0x66, 0x8f, 0x69,
- 0x52, 0x76, 0x9d, 0xf7, 0x87, 0x22, 0x86, 0xae, 0x4c, 0xde, 0x95, 0xdd, 0x90, 0x5d, 0x6b, 0x45,
- 0xcc, 0xb2, 0x10, 0x1e, 0x47, 0x11, 0x7c, 0xb8, 0x0e, 0x16, 0x35, 0x45, 0x47, 0x04, 0xf7, 0x86,
- 0x1d, 0xd2, 0x35, 0xf4, 0x9e, 0xcd, 0x43, 0xa9, 0xd0, 0xaa, 0x08, 0x80, 0xc5, 0xed, 0xb0, 0x18,
- 0x45, 0xf5, 0xe1, 0x16, 0x58, 0x76, 0x2f, 0xe0, 0x9b, 0x8a, 0x4d, 0x0d, 0x6b, 0xb8, 0xa5, 0x68,
- 0x0a, 0xad, 0xce, 0x72, 0x9c, 0xea, 0x78, 0x54, 0x5f, 0x46, 0x09, 0x72, 0x94, 0x68, 0x25, 0x7f,
- 0x34, 0x0b, 0x16, 0x23, 0xf7, 0x02, 0xbc, 0x0b, 0x56, 0xba, 0x03, 0xcb, 0x22, 0x3a, 0xdd, 0x19,
- 0x68, 0x7b, 0xc4, 0xea, 0x74, 0xf7, 0x49, 0x6f, 0xa0, 0x92, 0x1e, 0x3f, 0xd6, 0x42, 0xab, 0x26,
- 0x7c, 0x5d, 0xd9, 0x48, 0xd4, 0x42, 0x29, 0xd6, 0xf0, 0x1d, 0x00, 0x75, 0x3e, 0xb4, 0xad, 0xd8,
- 0xb6, 0x87, 0x99, 0xe3, 0x98, 0x5e, 0x2a, 0xee, 0xc4, 0x34, 0x50, 0x82, 0x15, 0xf3, 0xb1, 0x47,
- 0x6c, 0xc5, 0x22, 0xbd, 0xa8, 0x8f, 0xf9, 0xb0, 0x8f, 0x9b, 0x89, 0x5a, 0x28, 0xc5, 0x1a, 0xbe,
- 0x0a, 0xca, 0xce, 0x6c, 0x7c, 0xcf, 0xc5, 0xe1, 0x2c, 0x09, 0xb0, 0xf2, 0x8e, 0x2f, 0x42, 0x41,
- 0x3d, 0xb6, 0x34, 0x63, 0xcf, 0x26, 0xd6, 0x01, 0xe9, 0xdd, 0x70, 0xc8, 0x01, 0xab, 0xa0, 0x05,
- 0x5e, 0x41, 0xbd, 0xa5, 0xdd, 0x8e, 0x69, 0xa0, 0x04, 0x2b, 0xb6, 0x34, 0x27, 0x6a, 0x62, 0x4b,
- 0x9b, 0x0d, 0x2f, 0x6d, 0x37, 0x51, 0x0b, 0xa5, 0x58, 0xb3, 0xd8, 0x73, 0x5c, 0x5e, 0x3f, 0xc0,
- 0x8a, 0x8a, 0xf7, 0x54, 0x52, 0x9d, 0x0b, 0xc7, 0xde, 0x4e, 0x58, 0x8c, 0xa2, 0xfa, 0xf0, 0x06,
- 0x38, 0xe9, 0x0c, 0xed, 0xea, 0xd8, 0x03, 0x29, 0x72, 0x90, 0x17, 0x04, 0xc8, 0xc9, 0x9d, 0xa8,
- 0x02, 0x8a, 0xdb, 0xc0, 0x6b, 0x60, 0xa1, 0x6b, 0xa8, 0x2a, 0x8f, 0xc7, 0x0d, 0x63, 0xa0, 0xd3,
- 0x6a, 0x89, 0xa3, 0x40, 0x96, 0x43, 0x1b, 0x21, 0x09, 0x8a, 0x68, 0xc2, 0x9f, 0x01, 0xd0, 0x75,
- 0x0b, 0x83, 0x5d, 0x05, 0x13, 0x18, 0x40, 0xbc, 0x2c, 0xf9, 0x95, 0xd9, 0x1b, 0xb2, 0x51, 0x00,
- 0x52, 0xfe, 0x44, 0x02, 0x95, 0x94, 0x44, 0x87, 0x6f, 0x85, 0x8a, 0xe0, 0xc5, 0x48, 0x11, 0x3c,
- 0x9d, 0x62, 0x16, 0xa8, 0x84, 0xfb, 0x60, 0x9e, 0x11, 0x12, 0x45, 0xef, 0x3b, 0x2a, 0xe2, 0x2e,
- 0x6b, 0xa6, 0x2e, 0x00, 0x05, 0xb5, 0xfd, 0x5b, 0xf9, 0xe4, 0x78, 0x54, 0x9f, 0x0f, 0xc9, 0x50,
- 0x18, 0x58, 0xfe, 0x55, 0x0e, 0x80, 0x4d, 0x62, 0xaa, 0xc6, 0x50, 0x23, 0xfa, 0x51, 0x70, 0x9a,
- 0x5b, 0x21, 0x4e, 0x73, 0x21, 0xfd, 0x48, 0x3c, 0xa7, 0x52, 0x49, 0xcd, 0xbb, 0x11, 0x52, 0xf3,
- 0x62, 0x16, 0xb0, 0xa7, 0xb3, 0x9a, 0xcf, 0xf2, 0x60, 0xc9, 0x57, 0xf6, 0x69, 0xcd, 0xf5, 0xd0,
- 0x89, 0x5e, 0x88, 0x9c, 0x68, 0x25, 0xc1, 0xe4, 0xb9, 0xf1, 0x9a, 0x0f, 0xc0, 0x02, 0x63, 0x1d,
- 0xce, 0xf9, 0x71, 0x4e, 0x33, 0x3b, 0x35, 0xa7, 0xf1, 0x2a, 0xd1, 0x56, 0x08, 0x09, 0x45, 0x90,
- 0x53, 0x38, 0xd4, 0xdc, 0xd7, 0x91, 0x43, 0xfd, 0x49, 0x02, 0x0b, 0xfe, 0x31, 0x1d, 0x01, 0x89,
- 0xba, 0x19, 0x26, 0x51, 0x67, 0x33, 0x04, 0x67, 0x0a, 0x8b, 0xfa, 0x6c, 0x26, 0xe8, 0x3a, 0xa7,
- 0x51, 0xab, 0xec, 0x15, 0xcc, 0x54, 0x95, 0x2e, 0xb6, 0x45, 0xbd, 0x3d, 0xee, 0xbc, 0x7e, 0x39,
- 0x63, 0xc8, 0x93, 0x86, 0x08, 0x57, 0xee, 0xf9, 0x12, 0xae, 0xfc, 0xb3, 0x21, 0x5c, 0x3f, 0x02,
- 0x45, 0xdb, 0xa5, 0x5a, 0x33, 0x1c, 0xf2, 0x62, 0xa6, 0xc4, 0x16, 0x2c, 0xcb, 0x83, 0xf6, 0xf8,
- 0x95, 0x07, 0x97, 0xc4, 0xac, 0x0a, 0x5f, 0x25, 0xb3, 0x62, 0x81, 0x6e, 0xe2, 0x81, 0x4d, 0x7a,
- 0x3c, 0xa9, 0x8a, 0x7e, 0xa0, 0xb7, 0xf9, 0x28, 0x12, 0x52, 0xb8, 0x0b, 0x2a, 0xa6, 0x65, 0xf4,
- 0x2d, 0x62, 0xdb, 0x9b, 0x04, 0xf7, 0x54, 0x45, 0x27, 0xee, 0x02, 0x9c, 0x9a, 0x78, 0x7a, 0x3c,
- 0xaa, 0x57, 0xda, 0xc9, 0x2a, 0x28, 0xcd, 0x56, 0xfe, 0x75, 0x01, 0x9c, 0x88, 0xde, 0x8d, 0x29,
- 0x34, 0x45, 0x3a, 0x14, 0x4d, 0xb9, 0x14, 0x88, 0x53, 0x87, 0xc3, 0x05, 0x5a, 0x05, 0xb1, 0x58,
- 0x5d, 0x07, 0x8b, 0x82, 0x96, 0xb8, 0x42, 0x41, 0xd4, 0xbc, 0xe3, 0xd9, 0x0d, 0x8b, 0x51, 0x54,
- 0x1f, 0x5e, 0x07, 0xf3, 0x16, 0x67, 0x5e, 0x2e, 0x80, 0xc3, 0x5e, 0xbe, 0x23, 0x00, 0xe6, 0x51,
- 0x50, 0x88, 0xc2, 0xba, 0x8c, 0xb9, 0xf8, 0x84, 0xc4, 0x05, 0x98, 0x09, 0x33, 0x97, 0xf5, 0xa8,
- 0x02, 0x8a, 0xdb, 0xc0, 0x6d, 0xb0, 0x34, 0xd0, 0xe3, 0x50, 0x4e, 0xac, 0x9d, 0x16, 0x50, 0x4b,
- 0xbb, 0x71, 0x15, 0x94, 0x64, 0x07, 0x6f, 0x81, 0x25, 0x4a, 0x2c, 0x4d, 0xd1, 0x31, 0x55, 0xf4,
- 0xbe, 0x07, 0xe7, 0x9c, 0x7c, 0x85, 0x41, 0xdd, 0x89, 0x8b, 0x51, 0x92, 0x0d, 0xbc, 0x17, 0xe2,
- 0x45, 0xb3, 0xfc, 0x6a, 0xba, 0x94, 0x21, 0xbd, 0x32, 0x13, 0xa3, 0x04, 0xd6, 0x56, 0xcc, 0xca,
- 0xda, 0xe4, 0x8f, 0x25, 0x00, 0xe3, 0x29, 0x3d, 0xb1, 0xa9, 0x10, 0xb3, 0x08, 0x14, 0x5f, 0x25,
- 0x99, 0x4a, 0x5d, 0xc9, 0x48, 0xa5, 0xfc, 0xbb, 0x39, 0x1b, 0x97, 0x12, 0x1b, 0x7d, 0x34, 0xfd,
- 0xa1, 0xac, 0x5c, 0xca, 0x77, 0xea, 0x19, 0x70, 0xa9, 0x00, 0xd8, 0xd3, 0xb9, 0xd4, 0x3f, 0x73,
- 0x60, 0xc9, 0x57, 0xce, 0xcc, 0xa5, 0x12, 0x4c, 0xbe, 0xed, 0x11, 0x65, 0xe3, 0x37, 0xfe, 0xd6,
- 0xfd, 0x3f, 0xf1, 0x1b, 0xdf, 0xab, 0x14, 0x7e, 0xf3, 0xfb, 0x5c, 0xd0, 0xf5, 0x29, 0xf9, 0xcd,
- 0x33, 0x68, 0x96, 0x7c, 0xed, 0x28, 0x92, 0xfc, 0xd1, 0x0c, 0x38, 0x11, 0xcd, 0xc3, 0x50, 0xad,
- 0x95, 0x26, 0xd6, 0xda, 0x36, 0x58, 0xbe, 0x3f, 0x50, 0xd5, 0x21, 0x5f, 0x43, 0xa0, 0xe0, 0x3a,
- 0x55, 0xfa, 0xbb, 0xc2, 0x72, 0xf9, 0x87, 0x09, 0x3a, 0x28, 0xd1, 0x32, 0x5e, 0x7a, 0x67, 0xbe,
- 0x6c, 0xe9, 0x2d, 0x1c, 0xa2, 0xf4, 0xa6, 0xd4, 0xca, 0xb9, 0x43, 0xd4, 0xca, 0x64, 0x22, 0x94,
- 0x3f, 0x14, 0x11, 0x9a, 0xae, 0xee, 0x26, 0xdc, 0x81, 0x13, 0x1b, 0x12, 0x63, 0x09, 0xac, 0x24,
- 0xb7, 0x01, 0xa0, 0x0a, 0x16, 0x34, 0xfc, 0x30, 0xd8, 0x8e, 0x99, 0x54, 0x8f, 0x06, 0x54, 0x51,
- 0x1b, 0xce, 0x1f, 0x59, 0x8d, 0x5b, 0x3a, 0xbd, 0x6d, 0x75, 0xa8, 0xa5, 0xe8, 0x7d, 0xa7, 0x88,
- 0x6f, 0x87, 0xb0, 0x50, 0x04, 0x1b, 0xbe, 0x0f, 0x8a, 0x1a, 0x7e, 0xd8, 0x19, 0x58, 0xfd, 0xa4,
- 0x62, 0x9b, 0x6d, 0x1e, 0x9e, 0x4b, 0xdb, 0x02, 0x05, 0x79, 0x78, 0xf2, 0x17, 0x12, 0xa8, 0xa4,
- 0x14, 0xe8, 0x6f, 0xd0, 0x2a, 0xff, 0x22, 0x81, 0x33, 0xa1, 0x55, 0xb2, 0x0c, 0x27, 0xf7, 0x07,
- 0x2a, 0x4f, 0x76, 0x41, 0x8a, 0x2e, 0x82, 0x92, 0x89, 0x2d, 0xaa, 0x78, 0xec, 0xbc, 0xd0, 0x9a,
- 0x1f, 0x8f, 0xea, 0xa5, 0xb6, 0x3b, 0x88, 0x7c, 0x79, 0xc2, 0xde, 0xe4, 0x9e, 0xdf, 0xde, 0xc8,
- 0xff, 0x95, 0x40, 0xa1, 0xd3, 0xc5, 0x2a, 0x39, 0x02, 0x0e, 0xb4, 0x19, 0xe2, 0x40, 0xe9, 0x7f,
- 0x55, 0x70, 0x7f, 0x52, 0xe9, 0xcf, 0x56, 0x84, 0xfe, 0x9c, 0x9b, 0x80, 0xf3, 0x74, 0xe6, 0xf3,
- 0x06, 0x28, 0x79, 0xd3, 0x4d, 0x77, 0x2d, 0xcb, 0xbf, 0xcb, 0x81, 0x72, 0x60, 0x8a, 0x29, 0x2f,
- 0xf5, 0x7b, 0xa1, 0x4a, 0xc6, 0xee, 0x98, 0xb5, 0x2c, 0x0b, 0x69, 0xb8, 0x55, 0xeb, 0x6d, 0x9d,
- 0x5a, 0xc1, 0x37, 0xe8, 0x78, 0x31, 0x7b, 0x13, 0x2c, 0x50, 0x6c, 0xf5, 0x09, 0x75, 0x65, 0x7c,
- 0xc3, 0x4a, 0x7e, 0x47, 0xe9, 0x4e, 0x48, 0x8a, 0x22, 0xda, 0xa7, 0xae, 0x83, 0xf9, 0xd0, 0x64,
- 0xf0, 0x04, 0xc8, 0x3f, 0x20, 0x43, 0x87, 0x0c, 0x22, 0xf6, 0x13, 0x2e, 0x83, 0xc2, 0x01, 0x56,
- 0x07, 0x4e, 0x88, 0x96, 0x90, 0xf3, 0x70, 0x2d, 0xf7, 0xba, 0x24, 0xff, 0x86, 0x6d, 0x8e, 0x9f,
- 0x0a, 0x47, 0x10, 0x5d, 0xef, 0x84, 0xa2, 0x2b, 0xfd, 0x5f, 0xd3, 0x60, 0x82, 0xa6, 0xc5, 0x18,
- 0x8a, 0xc4, 0xd8, 0x4b, 0x99, 0xd0, 0x9e, 0x1e, 0x69, 0xff, 0xca, 0x81, 0xe5, 0x80, 0xb6, 0x4f,
- 0xb2, 0xbf, 0x1f, 0x22, 0xd9, 0xab, 0x11, 0x92, 0x5d, 0x4d, 0xb2, 0xf9, 0x96, 0x65, 0x4f, 0x66,
- 0xd9, 0x7f, 0x96, 0xc0, 0x62, 0x60, 0xef, 0x8e, 0x80, 0x66, 0xdf, 0x0a, 0xd3, 0xec, 0x73, 0x59,
- 0x82, 0x26, 0x85, 0x67, 0x5f, 0x03, 0x4b, 0x01, 0xa5, 0xdb, 0x56, 0x4f, 0xd1, 0xb1, 0x6a, 0xc3,
- 0xb3, 0xa0, 0x60, 0x53, 0x6c, 0x51, 0xb7, 0x88, 0xb8, 0xb6, 0x1d, 0x36, 0x88, 0x1c, 0x99, 0xfc,
- 0x6f, 0x09, 0x34, 0x03, 0xc6, 0x6d, 0x62, 0xd9, 0x8a, 0x4d, 0x89, 0x4e, 0xef, 0x1a, 0xea, 0x40,
- 0x23, 0x1b, 0x2a, 0x56, 0x34, 0x44, 0xd8, 0x80, 0x62, 0xe8, 0x6d, 0x43, 0x55, 0xba, 0x43, 0x88,
- 0x41, 0xf9, 0xc3, 0x7d, 0xa2, 0x6f, 0x12, 0x95, 0x50, 0xf1, 0xbf, 0x60, 0xa9, 0xf5, 0x96, 0xfb,
- 0x37, 0xd9, 0x7b, 0xbe, 0xe8, 0xc9, 0xa8, 0xbe, 0x9a, 0x05, 0x91, 0x47, 0x68, 0x10, 0x13, 0xfe,
- 0x14, 0x00, 0xf6, 0xc8, 0xef, 0xb2, 0x9e, 0x08, 0xd6, 0x37, 0xdd, 0x8c, 0x7e, 0xcf, 0x93, 0x4c,
- 0x35, 0x41, 0x00, 0x51, 0xfe, 0x43, 0x31, 0x74, 0xde, 0xdf, 0xf8, 0xde, 0xeb, 0xcf, 0xc1, 0xf2,
- 0x81, 0xbf, 0x3b, 0xae, 0x02, 0xa3, 0xf2, 0xf9, 0x68, 0x53, 0xc0, 0x83, 0x4f, 0xda, 0x57, 0xff,
- 0x05, 0xe2, 0x6e, 0x02, 0x1c, 0x4a, 0x9c, 0x04, 0xbe, 0x0a, 0xca, 0x8c, 0x37, 0x2b, 0x5d, 0xb2,
- 0x83, 0x35, 0x37, 0x17, 0xbd, 0xbf, 0x55, 0x3b, 0xbe, 0x08, 0x05, 0xf5, 0xe0, 0x3e, 0x58, 0x32,
- 0x8d, 0xde, 0x36, 0xd6, 0x71, 0x9f, 0x30, 0x22, 0xe8, 0x1c, 0x25, 0x6f, 0xc8, 0x96, 0x5a, 0xaf,
- 0xb9, 0xcd, 0xb6, 0x76, 0x5c, 0xe5, 0xc9, 0xa8, 0x5e, 0x49, 0x18, 0xe6, 0x41, 0x90, 0x04, 0x09,
- 0xad, 0xd8, 0xa7, 0x00, 0xce, 0x5f, 0x21, 0x6b, 0x59, 0x92, 0xf2, 0x90, 0x1f, 0x03, 0xa4, 0xf5,
- 0x9b, 0x8b, 0x87, 0xea, 0x37, 0x27, 0xbc, 0x2d, 0x97, 0xa6, 0x7c, 0x5b, 0xfe, 0xab, 0x04, 0xce,
- 0x99, 0x19, 0x72, 0xa9, 0x0a, 0xf8, 0xde, 0xdc, 0xcc, 0xb2, 0x37, 0x59, 0x72, 0xb3, 0xb5, 0x3a,
- 0x1e, 0xd5, 0xcf, 0x65, 0xd1, 0x44, 0x99, 0xfc, 0x83, 0x77, 0x41, 0xd1, 0x10, 0x77, 0x60, 0xb5,
- 0xcc, 0x7d, 0xbd, 0x94, 0xc5, 0x57, 0xf7, 0xde, 0x74, 0xd2, 0xd2, 0x7d, 0x42, 0x1e, 0x96, 0xfc,
- 0x71, 0x01, 0x9c, 0x8c, 0x55, 0xf0, 0xaf, 0xb0, 0xab, 0x1e, 0x7b, 0x2f, 0xcf, 0x4f, 0xf1, 0x5e,
- 0xbe, 0x0e, 0x16, 0xc5, 0x87, 0x1a, 0x91, 0xd7, 0x7a, 0x2f, 0x60, 0x36, 0xc2, 0x62, 0x14, 0xd5,
- 0x4f, 0xea, 0xea, 0x17, 0xa6, 0xec, 0xea, 0x07, 0xbd, 0x10, 0x1f, 0x1e, 0x3a, 0xe9, 0x1d, 0xf7,
- 0x42, 0x7c, 0x7f, 0x18, 0xd5, 0x67, 0xc4, 0xd5, 0x41, 0xf5, 0x10, 0xe6, 0xc2, 0xc4, 0x75, 0x37,
- 0x24, 0x45, 0x11, 0xed, 0x2f, 0xf5, 0x31, 0x02, 0x4e, 0xf8, 0x18, 0xe1, 0x72, 0x96, 0x58, 0xcb,
- 0xde, 0x75, 0x4f, 0xec, 0x9f, 0x94, 0xa7, 0xef, 0x9f, 0xc8, 0x7f, 0x93, 0xc0, 0x0b, 0xa9, 0xb7,
- 0x16, 0x5c, 0x0f, 0xd1, 0xca, 0xcb, 0x11, 0x5a, 0xf9, 0xbd, 0x54, 0xc3, 0x00, 0xb7, 0xb4, 0x92,
- 0x1b, 0xf2, 0x6f, 0x64, 0x6b, 0xc8, 0x27, 0xbc, 0x09, 0x4f, 0xee, 0xcc, 0xb7, 0x7e, 0xf0, 0xe8,
- 0x71, 0xed, 0xd8, 0xa7, 0x8f, 0x6b, 0xc7, 0x3e, 0x7f, 0x5c, 0x3b, 0xf6, 0x8b, 0x71, 0x4d, 0x7a,
- 0x34, 0xae, 0x49, 0x9f, 0x8e, 0x6b, 0xd2, 0xe7, 0xe3, 0x9a, 0xf4, 0xf7, 0x71, 0x4d, 0xfa, 0xed,
- 0x17, 0xb5, 0x63, 0xef, 0x57, 0x52, 0x3e, 0x85, 0xfe, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd4,
- 0x01, 0x82, 0xf5, 0x24, 0x2d, 0x00, 0x00,
-}
+func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -2438,7 +1374,7 @@ func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Selector {
keysForSelector = append(keysForSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.Selector[string(keysForSelector[iNdEx])]
baseI := i
@@ -3884,7 +2820,7 @@ func (this *ScaleStatus) String() string {
for k := range this.Selector {
keysForSelector = append(keysForSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
mapStringForSelector := "map[string]string{"
for _, k := range keysForSelector {
mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/generated.proto b/operator/vendor/k8s.io/api/apps/v1beta2/generated.proto
index 37c6d5ae..1cdd0a43 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta2/generated.proto
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -348,7 +348,7 @@ message DeploymentStatus {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 9;
@@ -487,7 +487,7 @@ message ReplicaSetStatus {
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
// and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 7;
@@ -587,10 +587,12 @@ message RollingUpdateStatefulSetStrategy {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
- // Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
+ //
+ // +featureGate=MaxUnavailableStatefulSet
// +optional
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
}
@@ -795,8 +797,12 @@ message StatefulSetSpec {
// +optional
optional int32 minReadySeconds = 9;
- // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates.
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
// +optional
optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/apps/v1beta2/generated.protomessage.pb.go
new file mode 100644
index 00000000..18582bf2
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/generated.protomessage.pb.go
@@ -0,0 +1,88 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta2
+
+func (*ControllerRevision) ProtoMessage() {}
+
+func (*ControllerRevisionList) ProtoMessage() {}
+
+func (*DaemonSet) ProtoMessage() {}
+
+func (*DaemonSetCondition) ProtoMessage() {}
+
+func (*DaemonSetList) ProtoMessage() {}
+
+func (*DaemonSetSpec) ProtoMessage() {}
+
+func (*DaemonSetStatus) ProtoMessage() {}
+
+func (*DaemonSetUpdateStrategy) ProtoMessage() {}
+
+func (*Deployment) ProtoMessage() {}
+
+func (*DeploymentCondition) ProtoMessage() {}
+
+func (*DeploymentList) ProtoMessage() {}
+
+func (*DeploymentSpec) ProtoMessage() {}
+
+func (*DeploymentStatus) ProtoMessage() {}
+
+func (*DeploymentStrategy) ProtoMessage() {}
+
+func (*ReplicaSet) ProtoMessage() {}
+
+func (*ReplicaSetCondition) ProtoMessage() {}
+
+func (*ReplicaSetList) ProtoMessage() {}
+
+func (*ReplicaSetSpec) ProtoMessage() {}
+
+func (*ReplicaSetStatus) ProtoMessage() {}
+
+func (*RollingUpdateDaemonSet) ProtoMessage() {}
+
+func (*RollingUpdateDeployment) ProtoMessage() {}
+
+func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
+
+func (*Scale) ProtoMessage() {}
+
+func (*ScaleSpec) ProtoMessage() {}
+
+func (*ScaleStatus) ProtoMessage() {}
+
+func (*StatefulSet) ProtoMessage() {}
+
+func (*StatefulSetCondition) ProtoMessage() {}
+
+func (*StatefulSetList) ProtoMessage() {}
+
+func (*StatefulSetOrdinals) ProtoMessage() {}
+
+func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
+
+func (*StatefulSetSpec) ProtoMessage() {}
+
+func (*StatefulSetStatus) ProtoMessage() {}
+
+func (*StatefulSetUpdateStrategy) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/types.go b/operator/vendor/k8s.io/api/apps/v1beta2/types.go
index e9dc85df..18c74a4a 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta2/types.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -19,7 +19,7 @@ package v1beta2
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- runtime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
)
@@ -176,10 +176,12 @@ type RollingUpdateStatefulSetStrategy struct {
// The maximum number of pods that can be unavailable during the update.
// Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
// Absolute number is calculated from percentage by rounding up. This can not be 0.
- // Defaults to 1. This field is alpha-level and is only honored by servers that enable the
- // MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
// will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
+ //
+ // +featureGate=MaxUnavailableStatefulSet
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"varint,2,opt,name=maxUnavailable"`
}
@@ -304,8 +306,12 @@ type StatefulSetSpec struct {
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
- // PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
- // the StatefulSet VolumeClaimTemplates.
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
// +optional
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaimRetentionPolicy"`
@@ -560,7 +566,7 @@ type DeploymentStatus struct {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
@@ -962,7 +968,7 @@ type ReplicaSetStatus struct {
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
// and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index 34d80af5..ebafa66a 100644
--- a/operator/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -182,7 +182,7 @@ var map_DeploymentStatus = map[string]string{
"readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
"availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
- "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -253,7 +253,7 @@ var map_ReplicaSetStatus = map[string]string{
"fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
"readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
"availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
- "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
"conditions": "Represents the latest available observations of a replica set's current state.",
}
@@ -285,7 +285,7 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
var map_RollingUpdateStatefulSetStrategy = map[string]string{
"": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.",
"partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.",
- "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.",
+ "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable. This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.",
}
func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
@@ -384,7 +384,7 @@ var map_StatefulSetSpec = map[string]string{
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
- "persistentVolumeClaimRetentionPolicy": "PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.",
+ "persistentVolumeClaimRetentionPolicy": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down.",
"ordinals": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested.",
}
diff --git a/operator/vendor/k8s.io/api/apps/v1beta2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/apps/v1beta2/zz_generated.model_name.go
new file mode 100644
index 00000000..f3ee4178
--- /dev/null
+++ b/operator/vendor/k8s.io/api/apps/v1beta2/zz_generated.model_name.go
@@ -0,0 +1,187 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ControllerRevision) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ControllerRevision"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ControllerRevisionList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ControllerRevisionList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DaemonSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DaemonSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DaemonSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DaemonSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DaemonSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetUpdateStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DaemonSetUpdateStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Deployment) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.Deployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DeploymentCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DeploymentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DeploymentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DeploymentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.DeploymentStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ReplicaSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ReplicaSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ReplicaSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ReplicaSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ReplicaSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDaemonSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.RollingUpdateDaemonSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDeployment) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.RollingUpdateDeployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateStatefulSetStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.RollingUpdateStatefulSetStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scale) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.Scale"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ScaleSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.ScaleStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSet) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetList) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetOrdinals) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetOrdinals"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetPersistentVolumeClaimRetentionPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetPersistentVolumeClaimRetentionPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatefulSetUpdateStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.apps.v1beta2.StatefulSetUpdateStrategy"
+}
diff --git a/operator/vendor/k8s.io/api/authentication/v1/doc.go b/operator/vendor/k8s.io/api/authentication/v1/doc.go
index dc3aed4e..bf0cd705 100644
--- a/operator/vendor/k8s.io/api/authentication/v1/doc.go
+++ b/operator/vendor/k8s.io/api/authentication/v1/doc.go
@@ -19,5 +19,6 @@ limitations under the License.
// +groupName=authentication.k8s.io
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.authentication.v1
package v1
diff --git a/operator/vendor/k8s.io/api/authentication/v1/generated.pb.go b/operator/vendor/k8s.io/api/authentication/v1/generated.pb.go
index 6d922030..2b872c45 100644
--- a/operator/vendor/k8s.io/api/authentication/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/authentication/v1/generated.pb.go
@@ -23,11 +23,8 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -35,407 +32,27 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} }
-func (*BoundObjectReference) ProtoMessage() {}
-func (*BoundObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{0}
-}
-func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *BoundObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BoundObjectReference.Merge(m, src)
-}
-func (m *BoundObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *BoundObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_BoundObjectReference.DiscardUnknown(m)
-}
+func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
-var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo
+func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
-func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (*ExtraValue) ProtoMessage() {}
-func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{1}
-}
-func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExtraValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtraValue.Merge(m, src)
-}
-func (m *ExtraValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ExtraValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtraValue.DiscardUnknown(m)
-}
+func (m *TokenRequest) Reset() { *m = TokenRequest{} }
-var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
+func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} }
-func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
-func (*SelfSubjectReview) ProtoMessage() {}
-func (*SelfSubjectReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{2}
-}
-func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectReview.Merge(m, src)
-}
-func (m *SelfSubjectReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m)
-}
+func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} }
-var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo
+func (m *TokenReview) Reset() { *m = TokenReview{} }
-func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
-func (*SelfSubjectReviewStatus) ProtoMessage() {}
-func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{3}
-}
-func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src)
-}
-func (m *SelfSubjectReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m)
-}
+func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} }
-var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo
+func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} }
-func (m *TokenRequest) Reset() { *m = TokenRequest{} }
-func (*TokenRequest) ProtoMessage() {}
-func (*TokenRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{4}
-}
-func (m *TokenRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenRequest.Merge(m, src)
-}
-func (m *TokenRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenRequest proto.InternalMessageInfo
-
-func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} }
-func (*TokenRequestSpec) ProtoMessage() {}
-func (*TokenRequestSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{5}
-}
-func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenRequestSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenRequestSpec.Merge(m, src)
-}
-func (m *TokenRequestSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenRequestSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo
-
-func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} }
-func (*TokenRequestStatus) ProtoMessage() {}
-func (*TokenRequestStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{6}
-}
-func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenRequestStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenRequestStatus.Merge(m, src)
-}
-func (m *TokenRequestStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenRequestStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo
-
-func (m *TokenReview) Reset() { *m = TokenReview{} }
-func (*TokenReview) ProtoMessage() {}
-func (*TokenReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{7}
-}
-func (m *TokenReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenReview.Merge(m, src)
-}
-func (m *TokenReview) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenReview) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenReview proto.InternalMessageInfo
-
-func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} }
-func (*TokenReviewSpec) ProtoMessage() {}
-func (*TokenReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{8}
-}
-func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenReviewSpec.Merge(m, src)
-}
-func (m *TokenReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo
-
-func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} }
-func (*TokenReviewStatus) ProtoMessage() {}
-func (*TokenReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{9}
-}
-func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenReviewStatus.Merge(m, src)
-}
-func (m *TokenReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo
-
-func (m *UserInfo) Reset() { *m = UserInfo{} }
-func (*UserInfo) ProtoMessage() {}
-func (*UserInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1237cbf54dccd53, []int{10}
-}
-func (m *UserInfo) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UserInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserInfo.Merge(m, src)
-}
-func (m *UserInfo) XXX_Size() int {
- return m.Size()
-}
-func (m *UserInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_UserInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserInfo proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference")
- proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue")
- proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1.SelfSubjectReview")
- proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1.SelfSubjectReviewStatus")
- proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest")
- proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec")
- proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus")
- proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview")
- proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec")
- proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus")
- proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo")
- proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/authentication/v1/generated.proto", fileDescriptor_d1237cbf54dccd53)
-}
-
-var fileDescriptor_d1237cbf54dccd53 = []byte{
- // 947 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0xc5,
- 0x13, 0xf7, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0xfe, 0x49, 0xef, 0x7f, 0x85, 0x15, 0x16, 0x4f, 0x98,
- 0x95, 0x50, 0x04, 0xbb, 0x33, 0x1b, 0x8b, 0xc7, 0x6a, 0x91, 0x90, 0x32, 0xc4, 0x02, 0x0b, 0xc1,
- 0xae, 0xda, 0x49, 0x40, 0x48, 0x48, 0xb4, 0xc7, 0x1d, 0xa7, 0xf1, 0xce, 0x83, 0x99, 0x1e, 0xb3,
- 0xbe, 0xed, 0x47, 0xe0, 0x08, 0x12, 0x07, 0x3e, 0x04, 0x12, 0x5f, 0x21, 0xc7, 0x15, 0xe2, 0xb0,
- 0x07, 0x64, 0x91, 0xe1, 0xca, 0x91, 0x13, 0x27, 0xd4, 0x3d, 0x1d, 0xdb, 0x63, 0x27, 0x13, 0x9f,
- 0xf6, 0xe6, 0xa9, 0xc7, 0xaf, 0xaa, 0x7e, 0x55, 0x5d, 0x65, 0xb8, 0x3b, 0x7c, 0x10, 0x99, 0xcc,
- 0xb7, 0x48, 0xc0, 0x2c, 0x12, 0xf3, 0x53, 0xea, 0x71, 0xe6, 0x10, 0xce, 0x7c, 0xcf, 0x1a, 0xed,
- 0x59, 0x03, 0xea, 0xd1, 0x90, 0x70, 0xda, 0x37, 0x83, 0xd0, 0xe7, 0x3e, 0xba, 0x9d, 0x5a, 0x9b,
- 0x24, 0x60, 0x66, 0xd6, 0xda, 0x1c, 0xed, 0x6d, 0xdf, 0x1b, 0x30, 0x7e, 0x1a, 0xf7, 0x4c, 0xc7,
- 0x77, 0xad, 0x81, 0x3f, 0xf0, 0x2d, 0xe9, 0xd4, 0x8b, 0x4f, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x52,
- 0xb0, 0xed, 0xb7, 0x67, 0xa1, 0x5d, 0xe2, 0x9c, 0x32, 0x8f, 0x86, 0x63, 0x2b, 0x18, 0x0e, 0x84,
- 0x20, 0xb2, 0x5c, 0xca, 0xc9, 0x25, 0x29, 0x6c, 0x5b, 0x57, 0x79, 0x85, 0xb1, 0xc7, 0x99, 0x4b,
- 0x97, 0x1c, 0xde, 0xbd, 0xce, 0x21, 0x72, 0x4e, 0xa9, 0x4b, 0x16, 0xfd, 0x8c, 0xdf, 0x34, 0xf8,
- 0xbf, 0xed, 0xc7, 0x5e, 0xff, 0x51, 0xef, 0x1b, 0xea, 0x70, 0x4c, 0x4f, 0x68, 0x48, 0x3d, 0x87,
- 0xa2, 0x1d, 0x28, 0x0f, 0x99, 0xd7, 0x6f, 0x68, 0x3b, 0xda, 0x6e, 0xcd, 0xbe, 0x71, 0x36, 0xd1,
- 0x0b, 0xc9, 0x44, 0x2f, 0x7f, 0xc2, 0xbc, 0x3e, 0x96, 0x1a, 0xd4, 0x02, 0x20, 0x01, 0x3b, 0xa6,
- 0x61, 0xc4, 0x7c, 0xaf, 0x51, 0x94, 0x76, 0x48, 0xd9, 0xc1, 0xfe, 0xe3, 0x8e, 0xd2, 0xe0, 0x39,
- 0x2b, 0x81, 0xea, 0x11, 0x97, 0x36, 0x4a, 0x59, 0xd4, 0xcf, 0x88, 0x4b, 0xb1, 0xd4, 0x20, 0x1b,
- 0x4a, 0x71, 0xe7, 0xa0, 0x51, 0x96, 0x06, 0xf7, 0x95, 0x41, 0xe9, 0xa8, 0x73, 0xf0, 0xef, 0x44,
- 0x7f, 0xfd, 0xaa, 0x22, 0xf9, 0x38, 0xa0, 0x91, 0x79, 0xd4, 0x39, 0xc0, 0xc2, 0xd9, 0x78, 0x0f,
- 0xa0, 0xfd, 0x94, 0x87, 0xe4, 0x98, 0x3c, 0x89, 0x29, 0xd2, 0xa1, 0xc2, 0x38, 0x75, 0xa3, 0x86,
- 0xb6, 0x53, 0xda, 0xad, 0xd9, 0xb5, 0x64, 0xa2, 0x57, 0x3a, 0x42, 0x80, 0x53, 0xf9, 0xc3, 0xea,
- 0x0f, 0x3f, 0xeb, 0x85, 0x67, 0x7f, 0xec, 0x14, 0x8c, 0xdf, 0x35, 0xd8, 0xea, 0xd2, 0x27, 0x27,
- 0xdd, 0x58, 0xb1, 0x31, 0x62, 0xf4, 0x3b, 0xf4, 0x35, 0x54, 0x45, 0x9f, 0xfa, 0x84, 0x13, 0x49,
- 0x47, 0xbd, 0x75, 0xdf, 0x9c, 0x8d, 0xc8, 0x34, 0x13, 0x33, 0x18, 0x0e, 0x84, 0x20, 0x32, 0x85,
- 0xb5, 0x39, 0xda, 0x33, 0x53, 0x4e, 0x3f, 0xa5, 0x9c, 0xcc, 0x88, 0x99, 0xc9, 0xf0, 0x14, 0x15,
- 0x7d, 0x05, 0x6b, 0x11, 0x27, 0x3c, 0x8e, 0x24, 0x8d, 0xf5, 0xd6, 0x3b, 0x66, 0xde, 0x08, 0x9a,
- 0x4b, 0x29, 0x76, 0xa5, 0xb3, 0xbd, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x58, 0x81, 0x1a, 0x3e, 0xbc,
- 0x72, 0x85, 0x0b, 0x3a, 0x84, 0x6a, 0x1c, 0xd1, 0xb0, 0xe3, 0x9d, 0xf8, 0xaa, 0xb6, 0x37, 0xf2,
- 0x63, 0x1f, 0x29, 0x6b, 0x7b, 0x53, 0x05, 0xab, 0x5e, 0x48, 0xf0, 0x14, 0xc9, 0xf8, 0xa9, 0x08,
- 0x37, 0x0e, 0xfd, 0x21, 0xf5, 0x30, 0xfd, 0x36, 0xa6, 0x11, 0x7f, 0x09, 0x14, 0x3e, 0x86, 0x72,
- 0x14, 0x50, 0x47, 0x11, 0x68, 0xe6, 0x17, 0x31, 0x9f, 0x5b, 0x37, 0xa0, 0xce, 0x6c, 0x12, 0xc5,
- 0x17, 0x96, 0x48, 0xe8, 0x8b, 0x69, 0x53, 0x4a, 0x4b, 0x19, 0x5f, 0x87, 0x99, 0xdf, 0x8f, 0x7f,
- 0x34, 0xd8, 0x5c, 0x4c, 0x01, 0xbd, 0x05, 0x35, 0x12, 0xf7, 0x99, 0x78, 0x7c, 0x17, 0xa3, 0xba,
- 0x9e, 0x4c, 0xf4, 0xda, 0xfe, 0x85, 0x10, 0xcf, 0xf4, 0xe8, 0x43, 0xd8, 0xa2, 0x4f, 0x03, 0x16,
- 0xca, 0xe8, 0x5d, 0xea, 0xf8, 0x5e, 0x3f, 0x92, 0x6f, 0xa6, 0x64, 0xdf, 0x4a, 0x26, 0xfa, 0x56,
- 0x7b, 0x51, 0x89, 0x97, 0xed, 0x91, 0x07, 0x1b, 0xbd, 0xcc, 0xd3, 0x57, 0x85, 0xb6, 0xf2, 0x0b,
- 0xbd, 0x6c, 0x5d, 0xd8, 0x28, 0x99, 0xe8, 0x1b, 0x59, 0x0d, 0x5e, 0x40, 0x37, 0x7e, 0xd1, 0x00,
- 0x2d, 0xb3, 0x84, 0xee, 0x40, 0x85, 0x0b, 0xa9, 0x5a, 0x35, 0xeb, 0x8a, 0xb4, 0x4a, 0x6a, 0x9a,
- 0xea, 0xd0, 0x18, 0x6e, 0xce, 0x0a, 0x38, 0x64, 0x2e, 0x8d, 0x38, 0x71, 0x03, 0xd5, 0xed, 0x37,
- 0x57, 0x9b, 0x25, 0xe1, 0x66, 0xbf, 0xaa, 0xe0, 0x6f, 0xb6, 0x97, 0xe1, 0xf0, 0x65, 0x31, 0x8c,
- 0x1f, 0x8b, 0x50, 0x57, 0x69, 0xbf, 0xa4, 0x75, 0xf0, 0x28, 0x33, 0xcb, 0xf7, 0x56, 0x9a, 0x3b,
- 0xf9, 0xa6, 0xaf, 0x1a, 0xe5, 0xcf, 0x17, 0x46, 0xd9, 0x5a, 0x1d, 0x32, 0x7f, 0x92, 0x1d, 0xf8,
- 0xdf, 0x42, 0xfc, 0xd5, 0xda, 0x99, 0x19, 0xf6, 0x62, 0xfe, 0xb0, 0x1b, 0x7f, 0x6b, 0xb0, 0xb5,
- 0x94, 0x12, 0x7a, 0x1f, 0xd6, 0xe7, 0x32, 0xa7, 0xe9, 0xa5, 0xaa, 0xda, 0xb7, 0x54, 0xbc, 0xf5,
- 0xfd, 0x79, 0x25, 0xce, 0xda, 0xa2, 0x8f, 0xa1, 0x2c, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37,
- 0xa5, 0x56, 0x48, 0xb0, 0x44, 0xc8, 0x56, 0x52, 0xbe, 0xe6, 0xd9, 0xde, 0x81, 0x0a, 0x0d, 0x43,
- 0x3f, 0x54, 0xf7, 0x6f, 0xca, 0x4d, 0x5b, 0x08, 0x71, 0xaa, 0x33, 0x7e, 0x2d, 0xc2, 0x74, 0xa7,
- 0xa2, 0xbb, 0xe9, 0x7e, 0x96, 0x47, 0x33, 0x25, 0x34, 0xb3, 0x77, 0x85, 0x1c, 0x4f, 0x2d, 0xd0,
- 0x6b, 0x50, 0x8a, 0x59, 0x5f, 0xdd, 0xe2, 0xfa, 0xdc, 0xf1, 0xc4, 0x42, 0x8e, 0x0c, 0x58, 0x1b,
- 0x84, 0x7e, 0x1c, 0x88, 0x31, 0x10, 0x89, 0x82, 0xe8, 0xe8, 0x47, 0x52, 0x82, 0x95, 0x06, 0x1d,
- 0x43, 0x85, 0x8a, 0xdb, 0x29, 0x6b, 0xa9, 0xb7, 0xf6, 0x56, 0xa3, 0xc6, 0x94, 0xf7, 0xb6, 0xed,
- 0xf1, 0x70, 0x3c, 0x57, 0x95, 0x90, 0xe1, 0x14, 0x6e, 0xbb, 0xa7, 0x6e, 0xb2, 0xb4, 0x41, 0x9b,
- 0x50, 0x1a, 0xd2, 0x71, 0x5a, 0x11, 0x16, 0x3f, 0xd1, 0x07, 0x50, 0x19, 0x89, 0x73, 0xad, 0x5a,
- 0xb2, 0x9b, 0x1f, 0x77, 0x76, 0xde, 0x71, 0xea, 0xf6, 0xb0, 0xf8, 0x40, 0xb3, 0xed, 0xb3, 0xf3,
- 0x66, 0xe1, 0xf9, 0x79, 0xb3, 0xf0, 0xe2, 0xbc, 0x59, 0x78, 0x96, 0x34, 0xb5, 0xb3, 0xa4, 0xa9,
- 0x3d, 0x4f, 0x9a, 0xda, 0x8b, 0xa4, 0xa9, 0xfd, 0x99, 0x34, 0xb5, 0xef, 0xff, 0x6a, 0x16, 0xbe,
- 0xbc, 0x9d, 0xf7, 0x67, 0xf0, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xb7, 0xc1, 0xa0, 0x2b,
- 0x0a, 0x00, 0x00,
-}
+func (m *UserInfo) Reset() { *m = UserInfo{} }
func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -898,7 +515,7 @@ func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
@@ -1260,7 +877,7 @@ func (this *UserInfo) String() string {
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
mapStringForExtra := "map[string]ExtraValue{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
diff --git a/operator/vendor/k8s.io/api/authentication/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/authentication/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..7003a808
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authentication/v1/generated.protomessage.pb.go
@@ -0,0 +1,44 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*BoundObjectReference) ProtoMessage() {}
+
+func (*ExtraValue) ProtoMessage() {}
+
+func (*SelfSubjectReview) ProtoMessage() {}
+
+func (*SelfSubjectReviewStatus) ProtoMessage() {}
+
+func (*TokenRequest) ProtoMessage() {}
+
+func (*TokenRequestSpec) ProtoMessage() {}
+
+func (*TokenRequestStatus) ProtoMessage() {}
+
+func (*TokenReview) ProtoMessage() {}
+
+func (*TokenReviewSpec) ProtoMessage() {}
+
+func (*TokenReviewStatus) ProtoMessage() {}
+
+func (*UserInfo) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/authentication/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/authentication/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..ca0c6f35
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authentication/v1/zz_generated.model_name.go
@@ -0,0 +1,72 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in BoundObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.BoundObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectReview) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.SelfSubjectReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.SelfSubjectReviewStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenRequest) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.TokenRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenRequestSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.TokenRequestSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenRequestStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.TokenRequestStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenReview) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.TokenReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.TokenReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.TokenReviewStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UserInfo) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1.UserInfo"
+}
diff --git a/operator/vendor/k8s.io/api/authentication/v1alpha1/doc.go b/operator/vendor/k8s.io/api/authentication/v1alpha1/doc.go
index c199ccd4..47f61e0b 100644
--- a/operator/vendor/k8s.io/api/authentication/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/authentication/v1alpha1/doc.go
@@ -19,5 +19,6 @@ limitations under the License.
// +groupName=authentication.k8s.io
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.authentication.v1alpha1
package v1alpha1
diff --git a/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go
index 98c106ec..b017e1c3 100644
--- a/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.pb.go
@@ -24,116 +24,14 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
-func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
-func (*SelfSubjectReview) ProtoMessage() {}
-func (*SelfSubjectReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_f003acd72d3d5efb, []int{0}
-}
-func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectReview.Merge(m, src)
-}
-func (m *SelfSubjectReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo
-
-func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
-func (*SelfSubjectReviewStatus) ProtoMessage() {}
-func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_f003acd72d3d5efb, []int{1}
-}
-func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src)
-}
-func (m *SelfSubjectReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1alpha1.SelfSubjectReview")
- proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1alpha1.SelfSubjectReviewStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/authentication/v1alpha1/generated.proto", fileDescriptor_f003acd72d3d5efb)
-}
-
-var fileDescriptor_f003acd72d3d5efb = []byte{
- // 368 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x4f, 0xe2, 0x40,
- 0x14, 0xc7, 0x3b, 0x7b, 0x20, 0xa4, 0x9b, 0x6c, 0x76, 0x7b, 0x59, 0xc2, 0x61, 0x30, 0x3d, 0x18,
- 0x0f, 0x3a, 0x23, 0xc4, 0x18, 0x13, 0x6f, 0x3d, 0xe9, 0xc1, 0x98, 0x14, 0xbd, 0x78, 0xf2, 0x51,
- 0x1e, 0xed, 0x08, 0xed, 0x34, 0xed, 0x14, 0xe3, 0xcd, 0x8f, 0xe0, 0xc7, 0xe2, 0xc8, 0x91, 0x78,
- 0x20, 0x52, 0xbf, 0x88, 0xe9, 0x50, 0x20, 0x82, 0xc0, 0xad, 0xef, 0xe5, 0xfd, 0x7e, 0xef, 0xdf,
- 0x99, 0x31, 0x5b, 0xfd, 0x8b, 0x94, 0x09, 0xc9, 0x21, 0x16, 0x1c, 0x32, 0x15, 0x60, 0xa4, 0x84,
- 0x07, 0x4a, 0xc8, 0x88, 0x0f, 0x9b, 0x30, 0x88, 0x03, 0x68, 0x72, 0x1f, 0x23, 0x4c, 0x40, 0x61,
- 0x97, 0xc5, 0x89, 0x54, 0xd2, 0xb2, 0xe7, 0x0c, 0x83, 0x58, 0xb0, 0xef, 0x0c, 0x5b, 0x30, 0xf5,
- 0x13, 0x5f, 0xa8, 0x20, 0xeb, 0x30, 0x4f, 0x86, 0xdc, 0x97, 0xbe, 0xe4, 0x1a, 0xed, 0x64, 0x3d,
- 0x5d, 0xe9, 0x42, 0x7f, 0xcd, 0x95, 0xf5, 0xe3, 0x5d, 0x31, 0xd6, 0x03, 0xd4, 0xcf, 0x56, 0xd3,
- 0x21, 0x78, 0x81, 0x88, 0x30, 0x79, 0xe1, 0x71, 0xdf, 0x2f, 0x1a, 0x29, 0x0f, 0x51, 0xc1, 0x4f,
- 0x14, 0xdf, 0x46, 0x25, 0x59, 0xa4, 0x44, 0x88, 0x1b, 0xc0, 0xf9, 0x3e, 0x20, 0xf5, 0x02, 0x0c,
- 0x61, 0x9d, 0xb3, 0xdf, 0x89, 0xf9, 0xaf, 0x8d, 0x83, 0x5e, 0x3b, 0xeb, 0x3c, 0xa1, 0xa7, 0x5c,
- 0x1c, 0x0a, 0x7c, 0xb6, 0x1e, 0xcd, 0x6a, 0x91, 0xac, 0x0b, 0x0a, 0x6a, 0xe4, 0x80, 0x1c, 0xfd,
- 0x6e, 0x9d, 0xb2, 0xd5, 0x41, 0x2e, 0x17, 0xb0, 0xb8, 0xef, 0x17, 0x8d, 0x94, 0x15, 0xd3, 0x6c,
- 0xd8, 0x64, 0xb7, 0xda, 0x72, 0x83, 0x0a, 0x1c, 0x6b, 0x34, 0x6d, 0x18, 0xf9, 0xb4, 0x61, 0xae,
- 0x7a, 0xee, 0xd2, 0x6a, 0x79, 0x66, 0x25, 0x55, 0xa0, 0xb2, 0xb4, 0xf6, 0x4b, 0xfb, 0x2f, 0xd9,
- 0xfe, 0x8b, 0x62, 0x1b, 0x41, 0xdb, 0x5a, 0xe1, 0xfc, 0x29, 0x57, 0x55, 0xe6, 0xb5, 0x5b, 0xaa,
- 0x6d, 0x69, 0xfe, 0xdf, 0x82, 0x58, 0x77, 0x66, 0x35, 0x4b, 0x31, 0xb9, 0x8e, 0x7a, 0xb2, 0xfc,
- 0xc3, 0xc3, 0x9d, 0x09, 0xd8, 0x7d, 0x39, 0xed, 0xfc, 0x2d, 0x97, 0x55, 0x17, 0x1d, 0x77, 0x69,
- 0x72, 0xae, 0x46, 0x33, 0x6a, 0x8c, 0x67, 0xd4, 0x98, 0xcc, 0xa8, 0xf1, 0x9a, 0x53, 0x32, 0xca,
- 0x29, 0x19, 0xe7, 0x94, 0x4c, 0x72, 0x4a, 0x3e, 0x72, 0x4a, 0xde, 0x3e, 0xa9, 0xf1, 0x60, 0xef,
- 0x7f, 0xc7, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x04, 0xfb, 0xb6, 0xfb, 0xec, 0x02, 0x00, 0x00,
-}
+func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..d0e9c4e5
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authentication/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,26 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*SelfSubjectReview) ProtoMessage() {}
+
+func (*SelfSubjectReviewStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..fbf0de8e
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authentication/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,32 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectReview) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1alpha1.SelfSubjectReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1alpha1.SelfSubjectReviewStatus"
+}
diff --git a/operator/vendor/k8s.io/api/authentication/v1beta1/doc.go b/operator/vendor/k8s.io/api/authentication/v1beta1/doc.go
index af63dc84..2acdbd02 100644
--- a/operator/vendor/k8s.io/api/authentication/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/authentication/v1beta1/doc.go
@@ -19,5 +19,6 @@ limitations under the License.
// +groupName=authentication.k8s.io
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.authentication.v1beta1
package v1beta1
diff --git a/operator/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
index 41539264..7e337e0a 100644
--- a/operator/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
@@ -23,286 +23,26 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (*ExtraValue) ProtoMessage() {}
-func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{0}
-}
-func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExtraValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtraValue.Merge(m, src)
-}
-func (m *ExtraValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ExtraValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtraValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
-func (*SelfSubjectReview) ProtoMessage() {}
-func (*SelfSubjectReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{1}
-}
-func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectReview.Merge(m, src)
-}
-func (m *SelfSubjectReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m)
-}
+func (m *SelfSubjectReview) Reset() { *m = SelfSubjectReview{} }
-var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo
+func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
-func (m *SelfSubjectReviewStatus) Reset() { *m = SelfSubjectReviewStatus{} }
-func (*SelfSubjectReviewStatus) ProtoMessage() {}
-func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{2}
-}
-func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src)
-}
-func (m *SelfSubjectReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m)
-}
+func (m *TokenReview) Reset() { *m = TokenReview{} }
-var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo
+func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} }
-func (m *TokenReview) Reset() { *m = TokenReview{} }
-func (*TokenReview) ProtoMessage() {}
-func (*TokenReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{3}
-}
-func (m *TokenReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenReview.Merge(m, src)
-}
-func (m *TokenReview) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenReview) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenReview.DiscardUnknown(m)
-}
+func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} }
-var xxx_messageInfo_TokenReview proto.InternalMessageInfo
-
-func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} }
-func (*TokenReviewSpec) ProtoMessage() {}
-func (*TokenReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{4}
-}
-func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenReviewSpec.Merge(m, src)
-}
-func (m *TokenReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo
-
-func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} }
-func (*TokenReviewStatus) ProtoMessage() {}
-func (*TokenReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{5}
-}
-func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenReviewStatus.Merge(m, src)
-}
-func (m *TokenReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo
-
-func (m *UserInfo) Reset() { *m = UserInfo{} }
-func (*UserInfo) ProtoMessage() {}
-func (*UserInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_fdc2de40fd7f3b21, []int{6}
-}
-func (m *UserInfo) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UserInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserInfo.Merge(m, src)
-}
-func (m *UserInfo) XXX_Size() int {
- return m.Size()
-}
-func (m *UserInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_UserInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserInfo proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue")
- proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReview")
- proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.SelfSubjectReviewStatus")
- proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview")
- proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec")
- proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus")
- proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo")
- proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_fdc2de40fd7f3b21)
-}
-
-var fileDescriptor_fdc2de40fd7f3b21 = []byte{
- // 711 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x4e, 0xdb, 0x4e,
- 0x10, 0x8e, 0xf3, 0x07, 0x25, 0x9b, 0x5f, 0x7e, 0x85, 0x95, 0xaa, 0xa2, 0x48, 0x75, 0x20, 0x95,
- 0x2a, 0x24, 0x60, 0xdd, 0x20, 0x44, 0x11, 0x3d, 0xe1, 0x16, 0x21, 0x0e, 0xa8, 0xd2, 0x06, 0x7a,
- 0x68, 0x7b, 0xe8, 0xc6, 0x19, 0x1c, 0x37, 0xc4, 0xb6, 0xec, 0x75, 0x5a, 0x6e, 0x3c, 0x42, 0x8f,
- 0x3d, 0x56, 0xea, 0x93, 0xf4, 0xc6, 0x91, 0x23, 0x95, 0xaa, 0xa8, 0xb8, 0x4f, 0xd0, 0x37, 0xa8,
- 0x76, 0xbd, 0x38, 0x09, 0x94, 0x00, 0x97, 0xde, 0xbc, 0xdf, 0xce, 0xf7, 0xcd, 0xcc, 0x37, 0xa3,
- 0x35, 0x6a, 0x74, 0xd7, 0x43, 0xe2, 0x78, 0x06, 0xf3, 0x1d, 0x83, 0x45, 0xbc, 0x03, 0x2e, 0x77,
- 0x2c, 0xc6, 0x1d, 0xcf, 0x35, 0xfa, 0x8d, 0x16, 0x70, 0xd6, 0x30, 0x6c, 0x70, 0x21, 0x60, 0x1c,
- 0xda, 0xc4, 0x0f, 0x3c, 0xee, 0xe1, 0xf9, 0x84, 0x42, 0x98, 0xef, 0x90, 0x71, 0x0a, 0x51, 0x94,
- 0xea, 0xb2, 0xed, 0xf0, 0x4e, 0xd4, 0x22, 0x96, 0xd7, 0x33, 0x6c, 0xcf, 0xf6, 0x0c, 0xc9, 0x6c,
- 0x45, 0x07, 0xf2, 0x24, 0x0f, 0xf2, 0x2b, 0x51, 0xac, 0x2e, 0x4d, 0x2a, 0xe2, 0x72, 0xfe, 0xea,
- 0xea, 0x30, 0xba, 0xc7, 0xac, 0x8e, 0xe3, 0x42, 0x70, 0x64, 0xf8, 0x5d, 0x5b, 0x00, 0xa1, 0xd1,
- 0x03, 0xce, 0xfe, 0xc6, 0x32, 0xae, 0x63, 0x05, 0x91, 0xcb, 0x9d, 0x1e, 0x5c, 0x21, 0xac, 0xdd,
- 0x44, 0x08, 0xad, 0x0e, 0xf4, 0xd8, 0x65, 0x5e, 0xfd, 0x29, 0x42, 0x5b, 0x1f, 0x79, 0xc0, 0x5e,
- 0xb1, 0xc3, 0x08, 0x70, 0x0d, 0x15, 0x1c, 0x0e, 0xbd, 0x70, 0x56, 0x9b, 0xcb, 0x2d, 0x94, 0xcc,
- 0x52, 0x3c, 0xa8, 0x15, 0x76, 0x04, 0x40, 0x13, 0x7c, 0xa3, 0xf8, 0xf9, 0x4b, 0x2d, 0x73, 0xfc,
- 0x63, 0x2e, 0x53, 0xff, 0xae, 0xa1, 0x99, 0x26, 0x1c, 0x1e, 0x34, 0xa3, 0xd6, 0x7b, 0xb0, 0x38,
- 0x85, 0xbe, 0x03, 0x1f, 0xf0, 0x3b, 0x54, 0x14, 0x2d, 0xb5, 0x19, 0x67, 0xb3, 0xda, 0x9c, 0xb6,
- 0x50, 0x5e, 0x79, 0x42, 0x86, 0x03, 0x48, 0x2b, 0x23, 0x7e, 0xd7, 0x16, 0x40, 0x48, 0x44, 0x34,
- 0xe9, 0x37, 0xc8, 0x4b, 0xa9, 0xb2, 0x0b, 0x9c, 0x99, 0xf8, 0x64, 0x50, 0xcb, 0xc4, 0x83, 0x1a,
- 0x1a, 0x62, 0x34, 0x55, 0xc5, 0x2d, 0x34, 0x15, 0x72, 0xc6, 0xa3, 0x70, 0x36, 0x2b, 0xf5, 0x37,
- 0xc8, 0x8d, 0x03, 0x26, 0x57, 0xea, 0x6c, 0x4a, 0x05, 0xf3, 0x7f, 0x95, 0x69, 0x2a, 0x39, 0x53,
- 0xa5, 0x5c, 0xf7, 0xd0, 0x83, 0x6b, 0x28, 0x78, 0x0f, 0x15, 0xa3, 0x10, 0x82, 0x1d, 0xf7, 0xc0,
- 0x53, 0x0d, 0x3e, 0x9e, 0x58, 0x00, 0xd9, 0x57, 0xd1, 0xe6, 0xb4, 0x4a, 0x56, 0xbc, 0x40, 0x68,
- 0xaa, 0x54, 0xff, 0x9a, 0x45, 0xe5, 0x3d, 0xaf, 0x0b, 0xee, 0x3f, 0xb3, 0x71, 0x0f, 0xe5, 0x43,
- 0x1f, 0x2c, 0x65, 0xe2, 0xca, 0x2d, 0x4c, 0x1c, 0xa9, 0xaf, 0xe9, 0x83, 0x65, 0xfe, 0xa7, 0xf4,
- 0xf3, 0xe2, 0x44, 0xa5, 0x1a, 0x7e, 0x9b, 0x0e, 0x27, 0x27, 0x75, 0x57, 0xef, 0xa8, 0x3b, 0x79,
- 0x2c, 0x16, 0xba, 0x77, 0xa9, 0x08, 0xfc, 0x08, 0x15, 0xb8, 0x80, 0xa4, 0x4b, 0x25, 0xb3, 0xa2,
- 0x98, 0x85, 0x24, 0x2e, 0xb9, 0xc3, 0x8b, 0xa8, 0xc4, 0xa2, 0xb6, 0x03, 0xae, 0x05, 0x62, 0x6b,
- 0xc4, 0x66, 0x57, 0xe2, 0x41, 0xad, 0xb4, 0x79, 0x01, 0xd2, 0xe1, 0x7d, 0xfd, 0xb7, 0x86, 0x66,
- 0xae, 0x94, 0x84, 0x9f, 0xa1, 0xca, 0x48, 0xf9, 0xd0, 0x96, 0xf9, 0x8a, 0xe6, 0x7d, 0x95, 0xaf,
- 0xb2, 0x39, 0x7a, 0x49, 0xc7, 0x63, 0xf1, 0x2e, 0xca, 0x8b, 0x49, 0x2b, 0xaf, 0x17, 0x6f, 0xe1,
- 0x49, 0xba, 0x34, 0xa9, 0xc9, 0x02, 0xa1, 0x52, 0x66, 0xbc, 0x9d, 0xfc, 0xe4, 0x76, 0x84, 0x41,
- 0x10, 0x04, 0x5e, 0x20, 0x07, 0x32, 0x62, 0xd0, 0x96, 0x00, 0x69, 0x72, 0x57, 0xff, 0x96, 0x45,
- 0xe9, 0x56, 0xe2, 0xa5, 0x64, 0xc3, 0x5d, 0xd6, 0x03, 0xe5, 0xea, 0xd8, 0xe6, 0x0a, 0x9c, 0xa6,
- 0x11, 0xf8, 0x21, 0xca, 0x45, 0x4e, 0x5b, 0xb6, 0x56, 0x32, 0xcb, 0x2a, 0x30, 0xb7, 0xbf, 0xf3,
- 0x82, 0x0a, 0x1c, 0xd7, 0xd1, 0x94, 0x1d, 0x78, 0x91, 0x2f, 0x16, 0x42, 0x14, 0x8a, 0xc4, 0x58,
- 0xb7, 0x25, 0x42, 0xd5, 0x0d, 0x7e, 0x83, 0x0a, 0x20, 0x9e, 0x20, 0xd9, 0x4b, 0x79, 0x65, 0xed,
- 0x0e, 0xfe, 0x10, 0xf9, 0x76, 0x6d, 0xb9, 0x3c, 0x38, 0x1a, 0x69, 0x4d, 0x60, 0x34, 0xd1, 0xac,
- 0xda, 0xea, 0x7d, 0x93, 0x31, 0x78, 0x1a, 0xe5, 0xba, 0x70, 0x94, 0xb4, 0x45, 0xc5, 0x27, 0x7e,
- 0x8e, 0x0a, 0x7d, 0xf1, 0xf4, 0xa9, 0xe1, 0x2c, 0xdf, 0x22, 0xf9, 0xf0, 0xbd, 0xa4, 0x09, 0x77,
- 0x23, 0xbb, 0xae, 0x99, 0xdb, 0x27, 0xe7, 0x7a, 0xe6, 0xf4, 0x5c, 0xcf, 0x9c, 0x9d, 0xeb, 0x99,
- 0xe3, 0x58, 0xd7, 0x4e, 0x62, 0x5d, 0x3b, 0x8d, 0x75, 0xed, 0x2c, 0xd6, 0xb5, 0x9f, 0xb1, 0xae,
- 0x7d, 0xfa, 0xa5, 0x67, 0x5e, 0xcf, 0xdf, 0xf8, 0x03, 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0x45,
- 0x72, 0x2b, 0xf2, 0xe4, 0x06, 0x00, 0x00,
-}
+func (m *UserInfo) Reset() { *m = UserInfo{} }
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -582,7 +322,7 @@ func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
@@ -829,7 +569,7 @@ func (this *UserInfo) String() string {
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
mapStringForExtra := "map[string]ExtraValue{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
diff --git a/operator/vendor/k8s.io/api/authentication/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/authentication/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..a55034da
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authentication/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,36 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*ExtraValue) ProtoMessage() {}
+
+func (*SelfSubjectReview) ProtoMessage() {}
+
+func (*SelfSubjectReviewStatus) ProtoMessage() {}
+
+func (*TokenReview) ProtoMessage() {}
+
+func (*TokenReviewSpec) ProtoMessage() {}
+
+func (*TokenReviewStatus) ProtoMessage() {}
+
+func (*UserInfo) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/authentication/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/authentication/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..f464c3f2
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authentication/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,52 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectReview) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1beta1.SelfSubjectReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1beta1.SelfSubjectReviewStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenReview) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1beta1.TokenReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1beta1.TokenReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1beta1.TokenReviewStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UserInfo) OpenAPIModelName() string {
+ return "io.k8s.api.authentication.v1beta1.UserInfo"
+}
diff --git a/operator/vendor/k8s.io/api/authorization/v1/doc.go b/operator/vendor/k8s.io/api/authorization/v1/doc.go
index 40bf8006..0a0cbf91 100644
--- a/operator/vendor/k8s.io/api/authorization/v1/doc.go
+++ b/operator/vendor/k8s.io/api/authorization/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.authorization.v1
+
// +groupName=authorization.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/authorization/v1/generated.pb.go b/operator/vendor/k8s.io/api/authorization/v1/generated.pb.go
index aed9a3a4..f389c4c0 100644
--- a/operator/vendor/k8s.io/api/authorization/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/authorization/v1/generated.pb.go
@@ -23,581 +23,46 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *FieldSelectorAttributes) Reset() { *m = FieldSelectorAttributes{} }
-func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (*ExtraValue) ProtoMessage() {}
-func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{0}
-}
-func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExtraValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtraValue.Merge(m, src)
-}
-func (m *ExtraValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ExtraValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtraValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
-
-func (m *FieldSelectorAttributes) Reset() { *m = FieldSelectorAttributes{} }
-func (*FieldSelectorAttributes) ProtoMessage() {}
-func (*FieldSelectorAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{1}
-}
-func (m *FieldSelectorAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FieldSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FieldSelectorAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldSelectorAttributes.Merge(m, src)
-}
-func (m *FieldSelectorAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *FieldSelectorAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldSelectorAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldSelectorAttributes proto.InternalMessageInfo
-
-func (m *LabelSelectorAttributes) Reset() { *m = LabelSelectorAttributes{} }
-func (*LabelSelectorAttributes) ProtoMessage() {}
-func (*LabelSelectorAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{2}
-}
-func (m *LabelSelectorAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LabelSelectorAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LabelSelectorAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelSelectorAttributes.Merge(m, src)
-}
-func (m *LabelSelectorAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *LabelSelectorAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelSelectorAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelSelectorAttributes proto.InternalMessageInfo
-
-func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} }
-func (*LocalSubjectAccessReview) ProtoMessage() {}
-func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{3}
-}
-func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src)
-}
-func (m *LocalSubjectAccessReview) XXX_Size() int {
- return m.Size()
-}
-func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() {
- xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo
-
-func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} }
-func (*NonResourceAttributes) ProtoMessage() {}
-func (*NonResourceAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{4}
-}
-func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourceAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourceAttributes.Merge(m, src)
-}
-func (m *NonResourceAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourceAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo
-
-func (m *NonResourceRule) Reset() { *m = NonResourceRule{} }
-func (*NonResourceRule) ProtoMessage() {}
-func (*NonResourceRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{5}
-}
-func (m *NonResourceRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourceRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourceRule.Merge(m, src)
-}
-func (m *NonResourceRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourceRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourceRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo
-
-func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} }
-func (*ResourceAttributes) ProtoMessage() {}
-func (*ResourceAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{6}
-}
-func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceAttributes.Merge(m, src)
-}
-func (m *ResourceAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo
-
-func (m *ResourceRule) Reset() { *m = ResourceRule{} }
-func (*ResourceRule) ProtoMessage() {}
-func (*ResourceRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{7}
-}
-func (m *ResourceRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceRule.Merge(m, src)
-}
-func (m *ResourceRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceRule proto.InternalMessageInfo
-
-func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} }
-func (*SelfSubjectAccessReview) ProtoMessage() {}
-func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{8}
-}
-func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src)
-}
-func (m *SelfSubjectAccessReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo
-
-func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} }
-func (*SelfSubjectAccessReviewSpec) ProtoMessage() {}
-func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{9}
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src)
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m)
-}
+func (m *LabelSelectorAttributes) Reset() { *m = LabelSelectorAttributes{} }
-var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo
-
-func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} }
-func (*SelfSubjectRulesReview) ProtoMessage() {}
-func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{10}
-}
-func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src)
-}
-func (m *SelfSubjectRulesReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo
-
-func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} }
-func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
-func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{11}
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src)
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m)
-}
+func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} }
-var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo
+func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} }
-func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} }
-func (*SubjectAccessReview) ProtoMessage() {}
-func (*SubjectAccessReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{12}
-}
-func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectAccessReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectAccessReview.Merge(m, src)
-}
-func (m *SubjectAccessReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectAccessReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m)
-}
+func (m *NonResourceRule) Reset() { *m = NonResourceRule{} }
-var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo
+func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} }
-func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} }
-func (*SubjectAccessReviewSpec) ProtoMessage() {}
-func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{13}
-}
-func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src)
-}
-func (m *SubjectAccessReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m)
-}
+func (m *ResourceRule) Reset() { *m = ResourceRule{} }
-var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo
+func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} }
-func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} }
-func (*SubjectAccessReviewStatus) ProtoMessage() {}
-func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{14}
-}
-func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src)
-}
-func (m *SubjectAccessReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m)
-}
+func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} }
-var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo
+func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} }
-func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} }
-func (*SubjectRulesReviewStatus) ProtoMessage() {}
-func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_aafd0e5e70cec678, []int{15}
-}
-func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src)
-}
-func (m *SubjectRulesReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m)
-}
+func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} }
-var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo
+func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} }
-func init() {
- proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue")
- proto.RegisterType((*FieldSelectorAttributes)(nil), "k8s.io.api.authorization.v1.FieldSelectorAttributes")
- proto.RegisterType((*LabelSelectorAttributes)(nil), "k8s.io.api.authorization.v1.LabelSelectorAttributes")
- proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview")
- proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes")
- proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule")
- proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1.ResourceAttributes")
- proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1.ResourceRule")
- proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReview")
- proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReviewSpec")
- proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReview")
- proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec")
- proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview")
- proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec")
- proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry")
- proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus")
- proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus")
-}
+func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} }
-func init() {
- proto.RegisterFile("k8s.io/api/authorization/v1/generated.proto", fileDescriptor_aafd0e5e70cec678)
-}
+func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} }
-var fileDescriptor_aafd0e5e70cec678 = []byte{
- // 1247 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5,
- 0x17, 0xf7, 0xfa, 0x47, 0x62, 0x8f, 0xe3, 0x6f, 0xd2, 0xc9, 0x37, 0xcd, 0x36, 0x11, 0x76, 0x64,
- 0x24, 0x48, 0xd5, 0xb2, 0x26, 0x51, 0xdb, 0x44, 0x95, 0x0a, 0xf2, 0xaa, 0x01, 0x45, 0x4a, 0x4b,
- 0x35, 0x51, 0x22, 0x51, 0x04, 0x62, 0xbc, 0x9e, 0xd8, 0x4b, 0xec, 0xdd, 0xed, 0xcc, 0xac, 0xd3,
- 0x70, 0xaa, 0xc4, 0x3f, 0xc0, 0x91, 0x43, 0x0f, 0xfc, 0x07, 0x5c, 0x90, 0xb8, 0x73, 0x40, 0x11,
- 0xa7, 0x1e, 0x8b, 0x84, 0x2c, 0x62, 0xce, 0xfc, 0x0f, 0x68, 0x66, 0xc7, 0xde, 0xdd, 0xc4, 0x76,
- 0x6d, 0x0e, 0x94, 0x43, 0x6f, 0x9e, 0xf7, 0x79, 0xbf, 0xe7, 0xbd, 0xb7, 0x6f, 0x0c, 0x6e, 0x1c,
- 0x6f, 0x33, 0xc3, 0x76, 0x2b, 0xd8, 0xb3, 0x2b, 0xd8, 0xe7, 0x4d, 0x97, 0xda, 0x5f, 0x63, 0x6e,
- 0xbb, 0x4e, 0xa5, 0xb3, 0x51, 0x69, 0x10, 0x87, 0x50, 0xcc, 0x49, 0xdd, 0xf0, 0xa8, 0xcb, 0x5d,
- 0xb8, 0x1a, 0x30, 0x1b, 0xd8, 0xb3, 0x8d, 0x18, 0xb3, 0xd1, 0xd9, 0x58, 0x79, 0xaf, 0x61, 0xf3,
- 0xa6, 0x5f, 0x33, 0x2c, 0xb7, 0x5d, 0x69, 0xb8, 0x0d, 0xb7, 0x22, 0x65, 0x6a, 0xfe, 0x91, 0x3c,
- 0xc9, 0x83, 0xfc, 0x15, 0xe8, 0x5a, 0xb9, 0x15, 0x1a, 0x6e, 0x63, 0xab, 0x69, 0x3b, 0x84, 0x9e,
- 0x56, 0xbc, 0xe3, 0x86, 0x20, 0xb0, 0x4a, 0x9b, 0x70, 0x3c, 0xc4, 0x83, 0x95, 0xca, 0x28, 0x29,
- 0xea, 0x3b, 0xdc, 0x6e, 0x93, 0x4b, 0x02, 0x77, 0x5e, 0x25, 0xc0, 0xac, 0x26, 0x69, 0xe3, 0x8b,
- 0x72, 0xe5, 0x2d, 0x00, 0x76, 0x9e, 0x72, 0x8a, 0x0f, 0x71, 0xcb, 0x27, 0xb0, 0x04, 0x32, 0x36,
- 0x27, 0x6d, 0xa6, 0x6b, 0x6b, 0xa9, 0xf5, 0x9c, 0x99, 0xeb, 0x75, 0x4b, 0x99, 0x5d, 0x41, 0x40,
- 0x01, 0xfd, 0x6e, 0xf6, 0xbb, 0xef, 0x4b, 0x89, 0x67, 0xbf, 0xaf, 0x25, 0xca, 0xbf, 0x6a, 0x60,
- 0xf9, 0x23, 0x9b, 0xb4, 0xea, 0xfb, 0xa4, 0x45, 0x2c, 0xee, 0xd2, 0x2a, 0xe7, 0xd4, 0xae, 0xf9,
- 0x9c, 0x30, 0x78, 0x1b, 0xe4, 0x29, 0x3e, 0xe9, 0x03, 0xba, 0xb6, 0xa6, 0xad, 0xe7, 0xcc, 0xc5,
- 0xb3, 0x6e, 0x29, 0xd1, 0xeb, 0x96, 0xf2, 0x28, 0x84, 0x50, 0x94, 0x0f, 0x3e, 0x05, 0x73, 0x94,
- 0x3c, 0xf1, 0x6d, 0x4a, 0xda, 0xc4, 0xe1, 0x4c, 0x4f, 0xae, 0xa5, 0xd6, 0xf3, 0x9b, 0x1f, 0x18,
- 0xe1, 0x6d, 0x0c, 0x42, 0x33, 0xbc, 0xe3, 0x86, 0x20, 0x30, 0x43, 0x64, 0xd0, 0xe8, 0x6c, 0x18,
- 0x31, 0x5f, 0x50, 0xa8, 0xc6, 0xfc, 0xbf, 0xb2, 0x3b, 0x17, 0x21, 0x32, 0x14, 0xb3, 0x24, 0x83,
- 0xd9, 0xc3, 0x35, 0xd2, 0xfa, 0x8f, 0x04, 0x13, 0xf3, 0x65, 0xda, 0x60, 0x7e, 0x4c, 0x02, 0x7d,
- 0xcf, 0xb5, 0x70, 0x6b, 0xdf, 0xaf, 0x7d, 0x45, 0x2c, 0x5e, 0xb5, 0x2c, 0xc2, 0x18, 0x22, 0x1d,
- 0x9b, 0x9c, 0xc0, 0x2f, 0x41, 0x56, 0x18, 0xa9, 0x63, 0x8e, 0x65, 0x28, 0xf9, 0xcd, 0xf7, 0x27,
- 0x73, 0xe9, 0x13, 0xa9, 0xeb, 0x01, 0xe1, 0xd8, 0x84, 0xca, 0x09, 0x10, 0xd2, 0xd0, 0x40, 0x2b,
- 0x3c, 0x04, 0x69, 0xe6, 0x11, 0x4b, 0x4f, 0x4a, 0xed, 0xb7, 0x8c, 0x31, 0xbd, 0x64, 0x0c, 0xf1,
- 0x70, 0xdf, 0x23, 0x96, 0x39, 0xa7, 0x2c, 0xa4, 0xc5, 0x09, 0x49, 0x7d, 0xf0, 0x0b, 0x30, 0xc3,
- 0x38, 0xe6, 0x3e, 0xd3, 0x53, 0x52, 0xf3, 0x9d, 0xa9, 0x35, 0x4b, 0x69, 0xf3, 0x7f, 0x4a, 0xf7,
- 0x4c, 0x70, 0x46, 0x4a, 0x6b, 0xf9, 0x33, 0xb0, 0xf4, 0xd0, 0x75, 0x10, 0x61, 0xae, 0x4f, 0x2d,
- 0x12, 0x29, 0x80, 0x35, 0x90, 0xf6, 0x30, 0x6f, 0xaa, 0x9b, 0x1f, 0xb8, 0xf6, 0x08, 0xf3, 0x26,
- 0x92, 0x88, 0xe0, 0xe8, 0x10, 0x5a, 0x93, 0x21, 0x47, 0x38, 0x0e, 0x09, 0xad, 0x21, 0x89, 0x94,
- 0x9f, 0x80, 0xf9, 0x88, 0x72, 0xe4, 0xb7, 0x64, 0xaf, 0x09, 0x28, 0xd6, 0x6b, 0x42, 0x82, 0xa1,
- 0x80, 0x0e, 0xef, 0x81, 0x79, 0x27, 0x94, 0x39, 0x40, 0x7b, 0x41, 0x11, 0xe5, 0xcc, 0xc5, 0x5e,
- 0xb7, 0x14, 0x55, 0x27, 0x20, 0x74, 0x91, 0xb7, 0xfc, 0x3c, 0x0d, 0xe0, 0x90, 0x68, 0x2a, 0x20,
- 0xe7, 0xe0, 0x36, 0x61, 0x1e, 0xb6, 0x88, 0x0a, 0xe9, 0x8a, 0x72, 0x38, 0xf7, 0xb0, 0x0f, 0xa0,
- 0x90, 0xe7, 0xd5, 0xc1, 0xc1, 0xb7, 0x41, 0xa6, 0x41, 0x5d, 0xdf, 0x93, 0x17, 0x93, 0x33, 0x0b,
- 0x8a, 0x25, 0xf3, 0xb1, 0x20, 0xa2, 0x00, 0x83, 0xd7, 0xc1, 0x6c, 0x87, 0x50, 0x66, 0xbb, 0x8e,
- 0x9e, 0x96, 0x6c, 0xf3, 0x8a, 0x6d, 0xf6, 0x30, 0x20, 0xa3, 0x3e, 0x0e, 0x6f, 0x82, 0x2c, 0x55,
- 0x8e, 0xeb, 0x19, 0xc9, 0xbb, 0xa0, 0x78, 0xb3, 0x83, 0x0c, 0x0e, 0x38, 0x44, 0x7f, 0x32, 0xbf,
- 0x36, 0x10, 0x98, 0x89, 0xf7, 0xe7, 0x7e, 0x08, 0xa1, 0x28, 0x9f, 0x08, 0x4b, 0xc4, 0xa8, 0xcf,
- 0xc6, 0xc3, 0x12, 0x29, 0x40, 0x12, 0x81, 0x6d, 0x50, 0x38, 0x8a, 0x0e, 0x15, 0x3d, 0x3b, 0x41,
- 0x45, 0x8f, 0x18, 0x89, 0xe6, 0x95, 0x5e, 0xb7, 0x54, 0x88, 0xcf, 0xa8, 0xb8, 0x76, 0x61, 0xae,
- 0x15, 0x6d, 0x7b, 0x3d, 0x37, 0x81, 0xb9, 0x11, 0x43, 0x2b, 0x30, 0x17, 0x9f, 0x22, 0x71, 0xed,
- 0xe5, 0x9f, 0x35, 0x30, 0x37, 0x5d, 0x3d, 0xde, 0x00, 0x39, 0xec, 0xd9, 0xf2, 0x52, 0xfb, 0x95,
- 0x58, 0x10, 0x55, 0x53, 0x7d, 0xb4, 0x1b, 0x10, 0x51, 0x88, 0x0b, 0xe6, 0x7e, 0xaa, 0x45, 0xc3,
- 0x0e, 0x98, 0xfb, 0x26, 0x19, 0x0a, 0x71, 0xb8, 0x05, 0x0a, 0xfd, 0x83, 0x2c, 0x41, 0x3d, 0x2d,
- 0x05, 0x64, 0x10, 0x28, 0x0a, 0xa0, 0x38, 0x5f, 0xf9, 0xa7, 0x24, 0x58, 0xde, 0x27, 0xad, 0xa3,
- 0xd7, 0x33, 0xe9, 0x1e, 0xc7, 0x26, 0xdd, 0xf6, 0xf8, 0x79, 0x34, 0xdc, 0xcb, 0xd7, 0x36, 0xed,
- 0x9e, 0x27, 0xc1, 0xea, 0x18, 0x9f, 0xe0, 0x09, 0x80, 0xf4, 0xd2, 0xf0, 0x50, 0x79, 0xac, 0x8c,
- 0xf5, 0xe5, 0xf2, 0xcc, 0x31, 0xaf, 0xf6, 0xba, 0xa5, 0x21, 0xb3, 0x08, 0x0d, 0x31, 0x01, 0xbf,
- 0xd1, 0xc0, 0x92, 0x33, 0x6c, 0x0e, 0xab, 0x34, 0x6f, 0x8e, 0x35, 0x3e, 0x74, 0x82, 0x9b, 0xd7,
- 0x7a, 0xdd, 0xd2, 0xf0, 0xe1, 0x8e, 0x86, 0xdb, 0x12, 0xdf, 0xd0, 0xab, 0x91, 0xf4, 0x88, 0x06,
- 0xf9, 0xf7, 0xea, 0xea, 0xd3, 0x58, 0x5d, 0x6d, 0x4d, 0x5a, 0x57, 0x11, 0x27, 0x47, 0x96, 0xd5,
- 0xe7, 0x17, 0xca, 0xea, 0xf6, 0x24, 0x65, 0x15, 0x55, 0x3c, 0xbe, 0xaa, 0x1e, 0x80, 0x95, 0xd1,
- 0x0e, 0x4d, 0xfd, 0xe9, 0x29, 0xff, 0x90, 0x04, 0x8b, 0x6f, 0x96, 0x98, 0x69, 0xda, 0xfa, 0x97,
- 0x34, 0x58, 0x7e, 0xd3, 0xd2, 0xa3, 0xd6, 0x38, 0x9f, 0x11, 0xaa, 0x96, 0x94, 0xc1, 0xe5, 0x1c,
- 0x30, 0x42, 0x91, 0x44, 0x60, 0x19, 0xcc, 0x34, 0x82, 0xaf, 0x5b, 0xf0, 0xfd, 0x01, 0x22, 0xc1,
- 0xea, 0xd3, 0xa6, 0x10, 0x58, 0x07, 0x19, 0x22, 0xde, 0x4b, 0x7a, 0x46, 0xee, 0xf3, 0x1f, 0xfe,
- 0x93, 0xca, 0x30, 0xe4, 0x8b, 0x6b, 0xc7, 0xe1, 0xf4, 0x34, 0x5c, 0x96, 0x24, 0x0d, 0x05, 0xca,
- 0xe1, 0x5b, 0x20, 0xe5, 0xdb, 0x75, 0xb5, 0xcb, 0xe4, 0x15, 0x4b, 0xea, 0x60, 0xf7, 0x3e, 0x12,
- 0xf4, 0x15, 0xac, 0x1e, 0x6d, 0x52, 0x05, 0x5c, 0x00, 0xa9, 0x63, 0x72, 0x1a, 0x34, 0x14, 0x12,
- 0x3f, 0xe1, 0x3d, 0x90, 0xe9, 0x88, 0xf7, 0x9c, 0xca, 0xef, 0xbb, 0x63, 0x9d, 0x0c, 0x9f, 0x7f,
- 0x28, 0x90, 0xba, 0x9b, 0xdc, 0xd6, 0xca, 0xbf, 0x69, 0xe0, 0xda, 0xc8, 0xf2, 0x13, 0xcb, 0x1c,
- 0x6e, 0xb5, 0xdc, 0x13, 0x52, 0x97, 0x66, 0xb3, 0xe1, 0x32, 0x57, 0x0d, 0xc8, 0xa8, 0x8f, 0xc3,
- 0x77, 0xc0, 0x4c, 0x9d, 0x38, 0x36, 0xa9, 0xcb, 0xb5, 0x2f, 0x1b, 0x56, 0xee, 0x7d, 0x49, 0x45,
- 0x0a, 0x15, 0x7c, 0x94, 0x60, 0xe6, 0x3a, 0x6a, 0xd1, 0x1c, 0xf0, 0x21, 0x49, 0x45, 0x0a, 0x85,
- 0x55, 0x30, 0x4f, 0x84, 0x9b, 0xd2, 0xff, 0x1d, 0x4a, 0xdd, 0xfe, 0x8d, 0x2e, 0x2b, 0x81, 0xf9,
- 0x9d, 0x38, 0x8c, 0x2e, 0xf2, 0x97, 0xff, 0x4a, 0x02, 0x7d, 0xd4, 0x68, 0x83, 0x47, 0xe1, 0x2e,
- 0x22, 0x41, 0xb9, 0x0e, 0xe5, 0x37, 0xaf, 0x4f, 0xd4, 0x20, 0x42, 0xc2, 0x5c, 0x52, 0x8e, 0x14,
- 0xa2, 0xd4, 0xc8, 0xea, 0x22, 0x8f, 0x90, 0x82, 0x05, 0x27, 0xfe, 0x22, 0xe8, 0xbf, 0x11, 0x6f,
- 0x4e, 0xda, 0x0e, 0xd2, 0x9a, 0xae, 0xac, 0x2d, 0x5c, 0x00, 0x18, 0xba, 0xa4, 0x1f, 0x6e, 0x02,
- 0x60, 0x3b, 0x96, 0xdb, 0xf6, 0x5a, 0x84, 0x13, 0x99, 0xb6, 0x6c, 0x38, 0x07, 0x77, 0x07, 0x08,
- 0x8a, 0x70, 0x0d, 0xcb, 0x77, 0x7a, 0xba, 0x7c, 0x9b, 0xd5, 0xb3, 0xf3, 0x62, 0xe2, 0xc5, 0x79,
- 0x31, 0xf1, 0xf2, 0xbc, 0x98, 0x78, 0xd6, 0x2b, 0x6a, 0x67, 0xbd, 0xa2, 0xf6, 0xa2, 0x57, 0xd4,
- 0x5e, 0xf6, 0x8a, 0xda, 0x1f, 0xbd, 0xa2, 0xf6, 0xed, 0x9f, 0xc5, 0xc4, 0xe3, 0xd5, 0x31, 0xff,
- 0xd0, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x8c, 0x77, 0x0f, 0xbf, 0x11, 0x00, 0x00,
-}
+func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} }
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1247,7 +712,7 @@ func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
@@ -1914,7 +1379,7 @@ func (this *SubjectAccessReviewSpec) String() string {
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
mapStringForExtra := "map[string]ExtraValue{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
diff --git a/operator/vendor/k8s.io/api/authorization/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/authorization/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..dc1e1028
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authorization/v1/generated.protomessage.pb.go
@@ -0,0 +1,54 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*ExtraValue) ProtoMessage() {}
+
+func (*FieldSelectorAttributes) ProtoMessage() {}
+
+func (*LabelSelectorAttributes) ProtoMessage() {}
+
+func (*LocalSubjectAccessReview) ProtoMessage() {}
+
+func (*NonResourceAttributes) ProtoMessage() {}
+
+func (*NonResourceRule) ProtoMessage() {}
+
+func (*ResourceAttributes) ProtoMessage() {}
+
+func (*ResourceRule) ProtoMessage() {}
+
+func (*SelfSubjectAccessReview) ProtoMessage() {}
+
+func (*SelfSubjectAccessReviewSpec) ProtoMessage() {}
+
+func (*SelfSubjectRulesReview) ProtoMessage() {}
+
+func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
+
+func (*SubjectAccessReview) ProtoMessage() {}
+
+func (*SubjectAccessReviewSpec) ProtoMessage() {}
+
+func (*SubjectAccessReviewStatus) ProtoMessage() {}
+
+func (*SubjectRulesReviewStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/authorization/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/authorization/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..43e3b62e
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authorization/v1/zz_generated.model_name.go
@@ -0,0 +1,97 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FieldSelectorAttributes) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.FieldSelectorAttributes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LabelSelectorAttributes) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.LabelSelectorAttributes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LocalSubjectAccessReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.LocalSubjectAccessReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourceAttributes) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.NonResourceAttributes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourceRule) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.NonResourceRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceAttributes) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.ResourceAttributes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceRule) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.ResourceRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectAccessReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SelfSubjectAccessReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectAccessReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectRulesReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SelfSubjectRulesReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectRulesReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectAccessReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SubjectAccessReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectAccessReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SubjectAccessReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectAccessReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SubjectAccessReviewStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectRulesReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1.SubjectRulesReviewStatus"
+}
diff --git a/operator/vendor/k8s.io/api/authorization/v1beta1/doc.go b/operator/vendor/k8s.io/api/authorization/v1beta1/doc.go
index 9f7332d4..8937d1c1 100644
--- a/operator/vendor/k8s.io/api/authorization/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/authorization/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.authorization.v1beta1
// +groupName=authorization.k8s.io
diff --git a/operator/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
index 5007d1b4..9578cfec 100644
--- a/operator/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
@@ -23,520 +23,42 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v11 "k8s.io/api/authorization/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} }
-func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (*ExtraValue) ProtoMessage() {}
-func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{0}
-}
-func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExtraValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtraValue.Merge(m, src)
-}
-func (m *ExtraValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ExtraValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtraValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
-
-func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} }
-func (*LocalSubjectAccessReview) ProtoMessage() {}
-func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{1}
-}
-func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src)
-}
-func (m *LocalSubjectAccessReview) XXX_Size() int {
- return m.Size()
-}
-func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() {
- xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo
-
-func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} }
-func (*NonResourceAttributes) ProtoMessage() {}
-func (*NonResourceAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{2}
-}
-func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourceAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourceAttributes.Merge(m, src)
-}
-func (m *NonResourceAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourceAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo
-
-func (m *NonResourceRule) Reset() { *m = NonResourceRule{} }
-func (*NonResourceRule) ProtoMessage() {}
-func (*NonResourceRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{3}
-}
-func (m *NonResourceRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourceRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourceRule.Merge(m, src)
-}
-func (m *NonResourceRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourceRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourceRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo
-
-func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} }
-func (*ResourceAttributes) ProtoMessage() {}
-func (*ResourceAttributes) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{4}
-}
-func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceAttributes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceAttributes.Merge(m, src)
-}
-func (m *ResourceAttributes) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceAttributes) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceAttributes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo
-
-func (m *ResourceRule) Reset() { *m = ResourceRule{} }
-func (*ResourceRule) ProtoMessage() {}
-func (*ResourceRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{5}
-}
-func (m *ResourceRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceRule.Merge(m, src)
-}
-func (m *ResourceRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceRule proto.InternalMessageInfo
-
-func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} }
-func (*SelfSubjectAccessReview) ProtoMessage() {}
-func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{6}
-}
-func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src)
-}
-func (m *SelfSubjectAccessReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo
+func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} }
-func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} }
-func (*SelfSubjectAccessReviewSpec) ProtoMessage() {}
-func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{7}
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src)
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo
-
-func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} }
-func (*SelfSubjectRulesReview) ProtoMessage() {}
-func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{8}
-}
-func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src)
-}
-func (m *SelfSubjectRulesReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo
-
-func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} }
-func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
-func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{9}
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src)
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo
-
-func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} }
-func (*SubjectAccessReview) ProtoMessage() {}
-func (*SubjectAccessReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{10}
-}
-func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectAccessReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectAccessReview.Merge(m, src)
-}
-func (m *SubjectAccessReview) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectAccessReview) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m)
-}
+func (m *NonResourceRule) Reset() { *m = NonResourceRule{} }
-var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo
+func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} }
-func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} }
-func (*SubjectAccessReviewSpec) ProtoMessage() {}
-func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{11}
-}
-func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src)
-}
-func (m *SubjectAccessReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m)
-}
+func (m *ResourceRule) Reset() { *m = ResourceRule{} }
-var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo
+func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} }
-func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} }
-func (*SubjectAccessReviewStatus) ProtoMessage() {}
-func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{12}
-}
-func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src)
-}
-func (m *SubjectAccessReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m)
-}
+func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} }
-var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo
+func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} }
-func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} }
-func (*SubjectRulesReviewStatus) ProtoMessage() {}
-func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_8eab727787743457, []int{13}
-}
-func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src)
-}
-func (m *SubjectRulesReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m)
-}
+func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} }
-var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo
+func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} }
-func init() {
- proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue")
- proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview")
- proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.NonResourceAttributes")
- proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1beta1.NonResourceRule")
- proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.ResourceAttributes")
- proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1beta1.ResourceRule")
- proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReview")
- proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReviewSpec")
- proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReview")
- proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec")
- proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview")
- proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec")
- proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry")
- proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus")
- proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus")
-}
+func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} }
-func init() {
- proto.RegisterFile("k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_8eab727787743457)
-}
+func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} }
-var fileDescriptor_8eab727787743457 = []byte{
- // 1192 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4f, 0x6f, 0xe3, 0x44,
- 0x14, 0x8f, 0xf3, 0xa7, 0x4d, 0x26, 0x1b, 0xda, 0x9d, 0xaa, 0x5b, 0x6f, 0x11, 0x49, 0x14, 0x24,
- 0x54, 0xb4, 0x8b, 0xb3, 0xad, 0x0a, 0x5d, 0x0a, 0x7b, 0xa8, 0xd5, 0x2e, 0xaa, 0xd4, 0x5d, 0x56,
- 0x53, 0xb5, 0x07, 0x56, 0x02, 0x26, 0xce, 0x34, 0x31, 0x75, 0x6c, 0xe3, 0x19, 0xa7, 0x14, 0x71,
- 0xd8, 0x23, 0x47, 0x8e, 0x1c, 0x38, 0x70, 0xe2, 0x3b, 0x70, 0x41, 0x82, 0x53, 0x8f, 0x7b, 0x2c,
- 0x12, 0x8a, 0xa8, 0xf9, 0x10, 0x5c, 0xd1, 0x8c, 0x27, 0xb1, 0x9d, 0xba, 0x4d, 0xdb, 0x03, 0x7b,
- 0xd9, 0x5b, 0xe6, 0xfd, 0x7e, 0xef, 0xcf, 0xbc, 0x79, 0xf3, 0xfc, 0x26, 0xe0, 0xc1, 0xe1, 0x43,
- 0xaa, 0x99, 0x4e, 0x13, 0xbb, 0x66, 0x13, 0xfb, 0xac, 0xeb, 0x78, 0xe6, 0xb7, 0x98, 0x99, 0x8e,
- 0xdd, 0xec, 0x2f, 0xb7, 0x08, 0xc3, 0xcb, 0xcd, 0x0e, 0xb1, 0x89, 0x87, 0x19, 0x69, 0x6b, 0xae,
- 0xe7, 0x30, 0x07, 0xd6, 0x43, 0x0d, 0x0d, 0xbb, 0xa6, 0x96, 0xd0, 0xd0, 0xa4, 0xc6, 0xe2, 0x7b,
- 0x1d, 0x93, 0x75, 0xfd, 0x96, 0x66, 0x38, 0xbd, 0x66, 0xc7, 0xe9, 0x38, 0x4d, 0xa1, 0xd8, 0xf2,
- 0x0f, 0xc4, 0x4a, 0x2c, 0xc4, 0xaf, 0xd0, 0xe0, 0xe2, 0xbd, 0x4b, 0x42, 0x18, 0xf7, 0xbe, 0xb8,
- 0x1a, 0x91, 0x7b, 0xd8, 0xe8, 0x9a, 0x36, 0xf1, 0x8e, 0x9b, 0xee, 0x61, 0x87, 0x0b, 0x68, 0xb3,
- 0x47, 0x18, 0x4e, 0xd3, 0x6a, 0x5e, 0xa4, 0xe5, 0xf9, 0x36, 0x33, 0x7b, 0xe4, 0x9c, 0xc2, 0x07,
- 0x93, 0x14, 0xa8, 0xd1, 0x25, 0x3d, 0x3c, 0xae, 0xd7, 0x58, 0x03, 0x60, 0xeb, 0x1b, 0xe6, 0xe1,
- 0x7d, 0x6c, 0xf9, 0x04, 0xd6, 0x40, 0xc1, 0x64, 0xa4, 0x47, 0x55, 0xa5, 0x9e, 0x5b, 0x2a, 0xe9,
- 0xa5, 0x60, 0x50, 0x2b, 0x6c, 0x73, 0x01, 0x0a, 0xe5, 0xeb, 0xc5, 0x1f, 0x7f, 0xae, 0x65, 0x5e,
- 0xfc, 0x55, 0xcf, 0x34, 0x7e, 0xcb, 0x02, 0x75, 0xc7, 0x31, 0xb0, 0xb5, 0xeb, 0xb7, 0xbe, 0x22,
- 0x06, 0xdb, 0x30, 0x0c, 0x42, 0x29, 0x22, 0x7d, 0x93, 0x1c, 0xc1, 0x2f, 0x41, 0x91, 0xef, 0xac,
- 0x8d, 0x19, 0x56, 0x95, 0xba, 0xb2, 0x54, 0x5e, 0x79, 0xa0, 0x45, 0xa7, 0x30, 0x0a, 0x50, 0x73,
- 0x0f, 0x3b, 0x5c, 0x40, 0x35, 0xce, 0xd6, 0xfa, 0xcb, 0xda, 0xa7, 0xc2, 0xd6, 0x13, 0xc2, 0xb0,
- 0x0e, 0x4f, 0x06, 0xb5, 0x4c, 0x30, 0xa8, 0x81, 0x48, 0x86, 0x46, 0x56, 0xe1, 0x73, 0x90, 0xa7,
- 0x2e, 0x31, 0xd4, 0xac, 0xb0, 0xfe, 0xa1, 0x36, 0xe9, 0x8c, 0xb5, 0x94, 0x30, 0x77, 0x5d, 0x62,
- 0xe8, 0xb7, 0xa4, 0x9b, 0x3c, 0x5f, 0x21, 0x61, 0x14, 0x1a, 0x60, 0x8a, 0x32, 0xcc, 0x7c, 0xaa,
- 0xe6, 0x84, 0xf9, 0x8f, 0x6e, 0x66, 0x5e, 0x98, 0xd0, 0xdf, 0x90, 0x0e, 0xa6, 0xc2, 0x35, 0x92,
- 0xa6, 0x1b, 0xcf, 0xc1, 0xfc, 0x53, 0xc7, 0x46, 0x84, 0x3a, 0xbe, 0x67, 0x90, 0x0d, 0xc6, 0x3c,
- 0xb3, 0xe5, 0x33, 0x42, 0x61, 0x1d, 0xe4, 0x5d, 0xcc, 0xba, 0x22, 0x71, 0xa5, 0x28, 0xbe, 0x67,
- 0x98, 0x75, 0x91, 0x40, 0x38, 0xa3, 0x4f, 0xbc, 0x96, 0xd8, 0x7c, 0x8c, 0xb1, 0x4f, 0xbc, 0x16,
- 0x12, 0x48, 0xe3, 0x6b, 0x30, 0x13, 0x33, 0x8e, 0x7c, 0x4b, 0x9c, 0x2d, 0x87, 0x12, 0x67, 0xcb,
- 0x35, 0x28, 0x0a, 0xe5, 0xf0, 0x11, 0x98, 0xb1, 0x23, 0x9d, 0x3d, 0xb4, 0x43, 0xd5, 0xac, 0xa0,
- 0xce, 0x05, 0x83, 0x5a, 0xdc, 0x1c, 0x87, 0xd0, 0x38, 0xb7, 0xf1, 0x53, 0x1e, 0xc0, 0x94, 0xdd,
- 0x34, 0x41, 0xc9, 0xc6, 0x3d, 0x42, 0x5d, 0x6c, 0x10, 0xb9, 0xa5, 0xdb, 0x32, 0xe0, 0xd2, 0xd3,
- 0x21, 0x80, 0x22, 0xce, 0xe4, 0xcd, 0xc1, 0xb7, 0x41, 0xa1, 0xe3, 0x39, 0xbe, 0x2b, 0x4e, 0xa7,
- 0xa4, 0x57, 0x24, 0xa5, 0xf0, 0x09, 0x17, 0xa2, 0x10, 0x83, 0xef, 0x82, 0xe9, 0x3e, 0xf1, 0xa8,
- 0xe9, 0xd8, 0x6a, 0x5e, 0xd0, 0x66, 0x24, 0x6d, 0x7a, 0x3f, 0x14, 0xa3, 0x21, 0x0e, 0xef, 0x83,
- 0xa2, 0x27, 0x03, 0x57, 0x0b, 0x82, 0x3b, 0x2b, 0xb9, 0xc5, 0x51, 0x06, 0x47, 0x0c, 0xf8, 0x3e,
- 0x28, 0x53, 0xbf, 0x35, 0x52, 0x98, 0x12, 0x0a, 0x73, 0x52, 0xa1, 0xbc, 0x1b, 0x41, 0x28, 0xce,
- 0xe3, 0xdb, 0xe2, 0x7b, 0x54, 0xa7, 0x93, 0xdb, 0xe2, 0x29, 0x40, 0x02, 0x81, 0x3d, 0x50, 0x39,
- 0x30, 0x89, 0xd5, 0xde, 0x25, 0x16, 0x31, 0x98, 0xe3, 0xa9, 0x45, 0x51, 0x7c, 0xab, 0x97, 0x15,
- 0x9f, 0xf6, 0x38, 0xae, 0x11, 0xa5, 0x5d, 0xbf, 0x1d, 0x0c, 0x6a, 0x95, 0x04, 0x88, 0x92, 0xd6,
- 0xb9, 0x3b, 0x0b, 0xb7, 0x88, 0x35, 0x72, 0x57, 0xba, 0x82, 0xbb, 0x9d, 0xb8, 0xc6, 0xb8, 0xbb,
- 0x04, 0x88, 0x92, 0xd6, 0x1b, 0x7f, 0x28, 0xe0, 0xd6, 0xf5, 0xea, 0xf1, 0x1e, 0x28, 0x61, 0xd7,
- 0x14, 0x87, 0x3a, 0xac, 0xc4, 0x0a, 0xaf, 0x9a, 0x8d, 0x67, 0xdb, 0xa1, 0x10, 0x45, 0x38, 0x27,
- 0x0f, 0x53, 0xcd, 0x6f, 0xed, 0x88, 0x3c, 0x74, 0x49, 0x51, 0x84, 0xc3, 0x35, 0x50, 0x19, 0x2e,
- 0x44, 0x09, 0xaa, 0x79, 0xa1, 0x20, 0x36, 0x81, 0xe2, 0x00, 0x4a, 0xf2, 0x1a, 0xbf, 0x67, 0xc1,
- 0xc2, 0x2e, 0xb1, 0x0e, 0x5e, 0x4d, 0xcf, 0xfb, 0x22, 0xd1, 0xf3, 0x1e, 0x5d, 0xa1, 0x29, 0xa5,
- 0x87, 0xfa, 0x6a, 0xfb, 0xde, 0x2f, 0x59, 0xf0, 0xe6, 0x25, 0x81, 0xc1, 0xef, 0x00, 0xf4, 0xce,
- 0xb5, 0x11, 0x99, 0xd1, 0xd5, 0xc9, 0x01, 0x9d, 0x6f, 0x41, 0xfa, 0x9d, 0x60, 0x50, 0x4b, 0x69,
- 0x4d, 0x28, 0xc5, 0x0f, 0xfc, 0x5e, 0x01, 0xf3, 0x76, 0x5a, 0x5b, 0x96, 0x59, 0x5f, 0x9b, 0x1c,
- 0x41, 0x6a, 0x57, 0xd7, 0xef, 0x06, 0x83, 0x5a, 0x7a, 0xc3, 0x47, 0xe9, 0x0e, 0xf9, 0x17, 0xf6,
- 0x4e, 0x2c, 0x51, 0xfc, 0xd2, 0xfc, 0x7f, 0xb5, 0xf6, 0x79, 0xa2, 0xd6, 0x3e, 0xbe, 0x56, 0xad,
- 0xc5, 0x22, 0xbd, 0xb0, 0xd4, 0x5a, 0x63, 0xa5, 0xb6, 0x7e, 0xe5, 0x52, 0x8b, 0x5b, 0xbf, 0xbc,
- 0xd2, 0x9e, 0x80, 0xc5, 0x8b, 0xa3, 0xba, 0xf6, 0x87, 0xa9, 0xf1, 0x6b, 0x16, 0xcc, 0xbd, 0x1e,
- 0x76, 0x6e, 0x76, 0xe9, 0x4f, 0xf3, 0x60, 0xe1, 0xf5, 0x85, 0xbf, 0xfc, 0xc2, 0xf3, 0x11, 0xc1,
- 0xa7, 0xc4, 0x93, 0x63, 0xcd, 0xe8, 0xac, 0xf6, 0x28, 0xf1, 0x90, 0x40, 0x60, 0x7d, 0x38, 0xf9,
- 0x84, 0x1f, 0x2c, 0xc0, 0x33, 0x2d, 0xbf, 0x85, 0x72, 0xec, 0x31, 0x41, 0x81, 0xf0, 0x79, 0x5e,
- 0x2d, 0xd4, 0x73, 0x4b, 0xe5, 0x95, 0xcd, 0x1b, 0xd7, 0x8a, 0x26, 0x9e, 0x05, 0x5b, 0x36, 0xf3,
- 0x8e, 0xa3, 0x09, 0x4b, 0xc8, 0x50, 0xe8, 0x01, 0xbe, 0x05, 0x72, 0xbe, 0xd9, 0x96, 0x03, 0x50,
- 0x59, 0x52, 0x72, 0x7b, 0xdb, 0x9b, 0x88, 0xcb, 0x17, 0x0f, 0xe4, 0xcb, 0x42, 0x98, 0x80, 0xb3,
- 0x20, 0x77, 0x48, 0x8e, 0xc3, 0x7b, 0x86, 0xf8, 0x4f, 0xa8, 0x83, 0x42, 0x9f, 0x3f, 0x3a, 0x64,
- 0x9e, 0xef, 0x4f, 0x8e, 0x34, 0x7a, 0xa8, 0xa0, 0x50, 0x75, 0x3d, 0xfb, 0x50, 0x69, 0xfc, 0xa9,
- 0x80, 0xbb, 0x17, 0x16, 0x24, 0x1f, 0x03, 0xb1, 0x65, 0x39, 0x47, 0xa4, 0x2d, 0x7c, 0x17, 0xa3,
- 0x31, 0x70, 0x23, 0x14, 0xa3, 0x21, 0x0e, 0xdf, 0x01, 0x53, 0x6d, 0x62, 0x9b, 0xa4, 0x2d, 0x06,
- 0xc6, 0x62, 0x54, 0xcb, 0x9b, 0x42, 0x8a, 0x24, 0xca, 0x79, 0x1e, 0xc1, 0xd4, 0xb1, 0xe5, 0x88,
- 0x3a, 0xe2, 0x21, 0x21, 0x45, 0x12, 0x85, 0x1b, 0x60, 0x86, 0xf0, 0x30, 0xc5, 0x26, 0xb6, 0x3c,
- 0xcf, 0x19, 0x9e, 0xec, 0x82, 0x54, 0x98, 0xd9, 0x4a, 0xc2, 0x68, 0x9c, 0xdf, 0xf8, 0x37, 0x0b,
- 0xd4, 0x8b, 0xda, 0x1e, 0x3c, 0x8c, 0xa6, 0x18, 0x01, 0x8a, 0x41, 0xaa, 0xbc, 0xa2, 0x5d, 0xfd,
- 0xca, 0x70, 0x35, 0x7d, 0x5e, 0x46, 0x53, 0x89, 0x4b, 0x63, 0x93, 0x8f, 0x58, 0xc2, 0x23, 0x30,
- 0x6b, 0x27, 0x1f, 0x14, 0xe1, 0x4c, 0x56, 0x5e, 0x59, 0xbe, 0xd6, 0x05, 0x11, 0x2e, 0x55, 0xe9,
- 0x72, 0x76, 0x0c, 0xa0, 0xe8, 0x9c, 0x13, 0xb8, 0x02, 0x80, 0x69, 0x1b, 0x4e, 0xcf, 0xb5, 0x08,
- 0x23, 0x22, 0x81, 0xc5, 0xa8, 0x5b, 0x6e, 0x8f, 0x10, 0x14, 0x63, 0xa5, 0x65, 0x3e, 0x7f, 0xbd,
- 0xcc, 0xeb, 0x8f, 0x4f, 0xce, 0xaa, 0x99, 0x97, 0x67, 0xd5, 0xcc, 0xe9, 0x59, 0x35, 0xf3, 0x22,
- 0xa8, 0x2a, 0x27, 0x41, 0x55, 0x79, 0x19, 0x54, 0x95, 0xd3, 0xa0, 0xaa, 0xfc, 0x1d, 0x54, 0x95,
- 0x1f, 0xfe, 0xa9, 0x66, 0x3e, 0xab, 0x4f, 0xfa, 0x33, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
- 0x46, 0xf7, 0xe0, 0x3d, 0xaf, 0x10, 0x00, 0x00,
-}
+func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} }
func (m ExtraValue) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1102,7 +624,7 @@ func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
@@ -1703,7 +1225,7 @@ func (this *SubjectAccessReviewSpec) String() string {
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
mapStringForExtra := "map[string]ExtraValue{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
diff --git a/operator/vendor/k8s.io/api/authorization/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/authorization/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..69af1b6c
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authorization/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,50 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*ExtraValue) ProtoMessage() {}
+
+func (*LocalSubjectAccessReview) ProtoMessage() {}
+
+func (*NonResourceAttributes) ProtoMessage() {}
+
+func (*NonResourceRule) ProtoMessage() {}
+
+func (*ResourceAttributes) ProtoMessage() {}
+
+func (*ResourceRule) ProtoMessage() {}
+
+func (*SelfSubjectAccessReview) ProtoMessage() {}
+
+func (*SelfSubjectAccessReviewSpec) ProtoMessage() {}
+
+func (*SelfSubjectRulesReview) ProtoMessage() {}
+
+func (*SelfSubjectRulesReviewSpec) ProtoMessage() {}
+
+func (*SubjectAccessReview) ProtoMessage() {}
+
+func (*SubjectAccessReviewSpec) ProtoMessage() {}
+
+func (*SubjectAccessReviewStatus) ProtoMessage() {}
+
+func (*SubjectRulesReviewStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/authorization/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/authorization/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..0f1b55c1
--- /dev/null
+++ b/operator/vendor/k8s.io/api/authorization/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,87 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LocalSubjectAccessReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.LocalSubjectAccessReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourceAttributes) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.NonResourceAttributes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourceRule) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.NonResourceRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceAttributes) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.ResourceAttributes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceRule) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.ResourceRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectAccessReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SelfSubjectAccessReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectAccessReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SelfSubjectAccessReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectRulesReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SelfSubjectRulesReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SelfSubjectRulesReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SelfSubjectRulesReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectAccessReview) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SubjectAccessReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectAccessReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SubjectAccessReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectAccessReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SubjectAccessReviewStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SubjectRulesReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.authorization.v1beta1.SubjectRulesReviewStatus"
+}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v1/doc.go b/operator/vendor/k8s.io/api/autoscaling/v1/doc.go
index 4ee085e1..9f502a66 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v1/doc.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v1/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.autoscaling.v1
package v1
diff --git a/operator/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/operator/vendor/k8s.io/api/autoscaling/v1/generated.pb.go
index 3e3c2313..6028054a 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v1/generated.pb.go
@@ -24,748 +24,56 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-func (*ContainerResourceMetricSource) ProtoMessage() {}
-func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{0}
-}
-func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src)
-}
-func (m *ContainerResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m)
-}
+func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo
+func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-func (*ContainerResourceMetricStatus) ProtoMessage() {}
-func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{1}
-}
-func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src)
-}
-func (m *ContainerResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m)
-}
+func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo
+func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-func (*CrossVersionObjectReference) ProtoMessage() {}
-func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{2}
-}
-func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CrossVersionObjectReference.Merge(m, src)
-}
-func (m *CrossVersionObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *CrossVersionObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-func (*ExternalMetricSource) ProtoMessage() {}
-func (*ExternalMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{3}
-}
-func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricSource.Merge(m, src)
-}
-func (m *ExternalMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-func (*ExternalMetricStatus) ProtoMessage() {}
-func (*ExternalMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{4}
-}
-func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricStatus.Merge(m, src)
-}
-func (m *ExternalMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m)
-}
+func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo
+func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-func (*HorizontalPodAutoscaler) ProtoMessage() {}
-func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{5}
-}
-func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src)
-}
-func (m *HorizontalPodAutoscaler) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo
-
-func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
-func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{6}
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m)
-}
+func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo
+func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (*HorizontalPodAutoscalerList) ProtoMessage() {}
-func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{7}
-}
-func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo
-
-func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
-func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{8}
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m)
-}
+func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo
+func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
-func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{9}
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m)
-}
+func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo
+func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
-func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-func (*MetricSpec) ProtoMessage() {}
-func (*MetricSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{10}
-}
-func (m *MetricSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricSpec.Merge(m, src)
-}
-func (m *MetricSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricSpec.DiscardUnknown(m)
-}
+func (m *Scale) Reset() { *m = Scale{} }
-var xxx_messageInfo_MetricSpec proto.InternalMessageInfo
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-func (*MetricStatus) ProtoMessage() {}
-func (*MetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{11}
-}
-func (m *MetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricStatus.Merge(m, src)
-}
-func (m *MetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricStatus proto.InternalMessageInfo
-
-func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-func (*ObjectMetricSource) ProtoMessage() {}
-func (*ObjectMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{12}
-}
-func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricSource.Merge(m, src)
-}
-func (m *ObjectMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo
-
-func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-func (*ObjectMetricStatus) ProtoMessage() {}
-func (*ObjectMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{13}
-}
-func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricStatus.Merge(m, src)
-}
-func (m *ObjectMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo
-
-func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-func (*PodsMetricSource) ProtoMessage() {}
-func (*PodsMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{14}
-}
-func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricSource.Merge(m, src)
-}
-func (m *PodsMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo
-
-func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-func (*PodsMetricStatus) ProtoMessage() {}
-func (*PodsMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{15}
-}
-func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricStatus.Merge(m, src)
-}
-func (m *PodsMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo
-
-func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-func (*ResourceMetricSource) ProtoMessage() {}
-func (*ResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{16}
-}
-func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricSource.Merge(m, src)
-}
-func (m *ResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo
-
-func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
-func (*ResourceMetricStatus) ProtoMessage() {}
-func (*ResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{17}
-}
-func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricStatus.Merge(m, src)
-}
-func (m *ResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo
-
-func (m *Scale) Reset() { *m = Scale{} }
-func (*Scale) ProtoMessage() {}
-func (*Scale) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{18}
-}
-func (m *Scale) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scale) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scale.Merge(m, src)
-}
-func (m *Scale) XXX_Size() int {
- return m.Size()
-}
-func (m *Scale) XXX_DiscardUnknown() {
- xxx_messageInfo_Scale.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Scale proto.InternalMessageInfo
-
-func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-func (*ScaleSpec) ProtoMessage() {}
-func (*ScaleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{19}
-}
-func (m *ScaleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleSpec.Merge(m, src)
-}
-func (m *ScaleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo
-
-func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
-func (*ScaleStatus) ProtoMessage() {}
-func (*ScaleStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1972394c0c7aac8b, []int{20}
-}
-func (m *ScaleStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleStatus.Merge(m, src)
-}
-func (m *ScaleStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricSource")
- proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ContainerResourceMetricStatus")
- proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference")
- proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource")
- proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus")
- proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler")
- proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition")
- proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList")
- proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec")
- proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus")
- proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec")
- proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus")
- proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource")
- proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus")
- proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource")
- proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus")
- proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource")
- proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus")
- proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale")
- proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec")
- proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_1972394c0c7aac8b)
-}
-
-var fileDescriptor_1972394c0c7aac8b = []byte{
- // 1593 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x13, 0xd7,
- 0x16, 0x8e, 0x7f, 0x12, 0x92, 0xe3, 0x90, 0x9f, 0x0b, 0x0f, 0x92, 0xf0, 0xf0, 0x44, 0xf3, 0x10,
- 0x0a, 0xef, 0x3d, 0xc6, 0x8d, 0x4b, 0x11, 0x5d, 0x55, 0xb1, 0x5b, 0x0a, 0x6a, 0x0c, 0xe1, 0x26,
- 0x50, 0xfa, 0x2b, 0x6e, 0xc6, 0x17, 0x67, 0x88, 0x67, 0xc6, 0x9a, 0x19, 0x5b, 0x04, 0x09, 0xa9,
- 0x5d, 0x74, 0xdf, 0x0d, 0xed, 0xb6, 0x95, 0xba, 0xed, 0x9a, 0x75, 0x77, 0x2c, 0x59, 0x20, 0x95,
- 0x95, 0x55, 0xa6, 0x8b, 0x2e, 0xba, 0xea, 0x96, 0x55, 0x75, 0xef, 0xdc, 0x19, 0xcf, 0xd8, 0x9e,
- 0x89, 0xe3, 0x84, 0xa8, 0xad, 0xd8, 0x65, 0x7c, 0xcf, 0xf9, 0xce, 0xbd, 0xe7, 0xff, 0x9c, 0xc0,
- 0xb9, 0xed, 0x4b, 0xb6, 0xa2, 0x99, 0x05, 0xd2, 0xd0, 0x0a, 0xa4, 0xe9, 0x98, 0xb6, 0x4a, 0xea,
- 0x9a, 0x51, 0x2b, 0xb4, 0x96, 0x0b, 0x35, 0x6a, 0x50, 0x8b, 0x38, 0xb4, 0xaa, 0x34, 0x2c, 0xd3,
- 0x31, 0xd1, 0xbc, 0x47, 0xaa, 0x90, 0x86, 0xa6, 0x84, 0x48, 0x95, 0xd6, 0xf2, 0xc2, 0xf9, 0x9a,
- 0xe6, 0x6c, 0x35, 0x37, 0x15, 0xd5, 0xd4, 0x0b, 0x35, 0xb3, 0x66, 0x16, 0x38, 0xc7, 0x66, 0xf3,
- 0x2e, 0xff, 0xe2, 0x1f, 0xfc, 0x2f, 0x0f, 0x69, 0x41, 0x0e, 0x09, 0x55, 0x4d, 0x8b, 0xf6, 0x91,
- 0xb6, 0x70, 0xa1, 0x43, 0xa3, 0x13, 0x75, 0x4b, 0x33, 0xa8, 0xb5, 0x53, 0x68, 0x6c, 0xd7, 0x38,
- 0x93, 0x45, 0x6d, 0xb3, 0x69, 0xa9, 0x74, 0x4f, 0x5c, 0x76, 0x41, 0xa7, 0x0e, 0xe9, 0x27, 0xab,
- 0x10, 0xc7, 0x65, 0x35, 0x0d, 0x47, 0xd3, 0x7b, 0xc5, 0x5c, 0xdc, 0x8d, 0xc1, 0x56, 0xb7, 0xa8,
- 0x4e, 0xba, 0xf9, 0xe4, 0xdf, 0xd2, 0x70, 0xba, 0x6c, 0x1a, 0x0e, 0x61, 0x1c, 0x58, 0x3c, 0xa2,
- 0x42, 0x1d, 0x4b, 0x53, 0xd7, 0xf9, 0xdf, 0xa8, 0x0c, 0x59, 0x83, 0xe8, 0x74, 0x2e, 0xb5, 0x98,
- 0x5a, 0x9a, 0x28, 0x15, 0x9e, 0xb4, 0xa5, 0x11, 0xb7, 0x2d, 0x65, 0xaf, 0x11, 0x9d, 0xbe, 0x6c,
- 0x4b, 0x52, 0xaf, 0xe2, 0x14, 0x1f, 0x86, 0x91, 0x60, 0xce, 0x8c, 0x6e, 0xc3, 0x9c, 0x43, 0xac,
- 0x1a, 0x75, 0x56, 0x5a, 0xd4, 0x22, 0x35, 0x7a, 0xd3, 0xd1, 0xea, 0xda, 0x03, 0xe2, 0x68, 0xa6,
- 0x31, 0x97, 0x5e, 0x4c, 0x2d, 0x8d, 0x96, 0xfe, 0xed, 0xb6, 0xa5, 0xb9, 0x8d, 0x18, 0x1a, 0x1c,
- 0xcb, 0x8d, 0x5a, 0x80, 0x22, 0x67, 0xb7, 0x48, 0xbd, 0x49, 0xe7, 0x32, 0x8b, 0xa9, 0xa5, 0x5c,
- 0x51, 0x51, 0x3a, 0x0e, 0x12, 0x68, 0x45, 0x69, 0x6c, 0xd7, 0xb8, 0xc7, 0xf8, 0x26, 0x53, 0x6e,
- 0x34, 0x89, 0xe1, 0x68, 0xce, 0x4e, 0xe9, 0x84, 0xdb, 0x96, 0xd0, 0x46, 0x0f, 0x1a, 0xee, 0x23,
- 0x01, 0x15, 0x60, 0x42, 0xf5, 0xf5, 0x36, 0x37, 0xca, 0x75, 0x33, 0x2b, 0x74, 0x33, 0xd1, 0x51,
- 0x68, 0x87, 0x46, 0xfe, 0x23, 0x41, 0xd3, 0x0e, 0x71, 0x9a, 0xf6, 0xc1, 0x68, 0xfa, 0x13, 0x98,
- 0x57, 0x9b, 0x96, 0x45, 0x8d, 0x78, 0x55, 0x9f, 0x76, 0xdb, 0xd2, 0x7c, 0x39, 0x8e, 0x08, 0xc7,
- 0xf3, 0xa3, 0x87, 0x70, 0x2c, 0x7a, 0xb8, 0x1f, 0x6d, 0x9f, 0x12, 0x0f, 0x3c, 0x56, 0xee, 0x85,
- 0xc4, 0xfd, 0xe4, 0x44, 0x75, 0x9e, 0x1d, 0x40, 0xe7, 0x8f, 0x52, 0x70, 0xaa, 0x6c, 0x99, 0xb6,
- 0x7d, 0x8b, 0x5a, 0xb6, 0x66, 0x1a, 0xd7, 0x37, 0xef, 0x51, 0xd5, 0xc1, 0xf4, 0x2e, 0xb5, 0xa8,
- 0xa1, 0x52, 0xb4, 0x08, 0xd9, 0x6d, 0xcd, 0xa8, 0x0a, 0x8d, 0x4f, 0xfa, 0x1a, 0xff, 0x40, 0x33,
- 0xaa, 0x98, 0x9f, 0x30, 0x0a, 0x6e, 0x93, 0x74, 0x94, 0x22, 0xa4, 0xf0, 0x22, 0x00, 0x69, 0x68,
- 0x42, 0x00, 0x57, 0xc5, 0x44, 0x09, 0x09, 0x3a, 0x58, 0x59, 0xbb, 0x2a, 0x4e, 0x70, 0x88, 0x4a,
- 0xfe, 0x26, 0x03, 0xc7, 0xdf, 0xbb, 0xef, 0x50, 0xcb, 0x20, 0xf5, 0x48, 0xb0, 0x15, 0x01, 0x74,
- 0xfe, 0x7d, 0xad, 0xe3, 0x08, 0x01, 0x58, 0x25, 0x38, 0xc1, 0x21, 0x2a, 0x64, 0xc2, 0x94, 0xf7,
- 0xb5, 0x4e, 0xeb, 0x54, 0x75, 0x4c, 0x8b, 0x5f, 0x36, 0x57, 0x7c, 0x33, 0xc9, 0x1e, 0xb6, 0xc2,
- 0x52, 0x8f, 0xd2, 0x5a, 0x56, 0x56, 0xc9, 0x26, 0xad, 0xfb, 0xac, 0x25, 0xe4, 0xb6, 0xa5, 0xa9,
- 0x4a, 0x04, 0x0e, 0x77, 0xc1, 0x23, 0x02, 0x39, 0x2f, 0x20, 0xf6, 0x63, 0xfd, 0x69, 0xb7, 0x2d,
- 0xe5, 0x36, 0x3a, 0x30, 0x38, 0x8c, 0x19, 0x13, 0xd5, 0xd9, 0x57, 0x1d, 0xd5, 0xf2, 0x77, 0xbd,
- 0x86, 0xf1, 0x62, 0xf3, 0x6f, 0x61, 0x98, 0x2d, 0x98, 0x14, 0x61, 0xb3, 0x1f, 0xcb, 0x1c, 0x17,
- 0xcf, 0x9a, 0x2c, 0x87, 0xb0, 0x70, 0x04, 0x19, 0xed, 0xf4, 0x4f, 0x04, 0xc3, 0x19, 0xe8, 0xe4,
- 0x5e, 0x92, 0x80, 0xfc, 0x38, 0x0d, 0x27, 0xaf, 0x98, 0x96, 0xf6, 0x80, 0x45, 0x79, 0x7d, 0xcd,
- 0xac, 0xae, 0x88, 0xca, 0x4f, 0x2d, 0x74, 0x07, 0xc6, 0x99, 0xf6, 0xaa, 0xc4, 0x21, 0xdc, 0x46,
- 0xb9, 0xe2, 0x1b, 0x83, 0xe9, 0xda, 0x4b, 0x0c, 0x15, 0xea, 0x90, 0x8e, 0x55, 0x3b, 0xbf, 0xe1,
- 0x00, 0x15, 0xdd, 0x86, 0xac, 0xdd, 0xa0, 0xaa, 0xb0, 0xe4, 0x45, 0x25, 0xb6, 0x03, 0x51, 0x62,
- 0xee, 0xb8, 0xde, 0xa0, 0x6a, 0x27, 0x8f, 0xb0, 0x2f, 0xcc, 0x11, 0xd1, 0x1d, 0x18, 0xb3, 0xb9,
- 0xaf, 0x09, 0xb3, 0x5d, 0x1a, 0x02, 0x9b, 0xf3, 0x97, 0xa6, 0x04, 0xfa, 0x98, 0xf7, 0x8d, 0x05,
- 0xae, 0xfc, 0x55, 0x06, 0x16, 0x63, 0x38, 0xcb, 0xa6, 0x51, 0xd5, 0x78, 0x8a, 0xbf, 0x02, 0x59,
- 0x67, 0xa7, 0xe1, 0xbb, 0xf8, 0x05, 0xff, 0xa2, 0x1b, 0x3b, 0x0d, 0x56, 0x84, 0xce, 0xec, 0xc6,
- 0xcf, 0xe8, 0x30, 0x47, 0x40, 0xab, 0xc1, 0x83, 0xd2, 0x11, 0x2c, 0x71, 0xad, 0x97, 0x6d, 0xa9,
- 0x4f, 0xd7, 0xa5, 0x04, 0x48, 0xd1, 0xcb, 0xb3, 0x8c, 0x50, 0x27, 0xb6, 0xb3, 0x61, 0x11, 0xc3,
- 0xf6, 0x24, 0x69, 0xba, 0xef, 0xe1, 0xff, 0x1d, 0xcc, 0xc8, 0x8c, 0xa3, 0xb4, 0x20, 0x6e, 0x81,
- 0x56, 0x7b, 0xd0, 0x70, 0x1f, 0x09, 0xe8, 0x2c, 0x8c, 0x59, 0x94, 0xd8, 0xa6, 0x21, 0x0a, 0x4e,
- 0xa0, 0x5c, 0xcc, 0x7f, 0xc5, 0xe2, 0x14, 0x9d, 0x83, 0x23, 0x3a, 0xb5, 0x6d, 0x52, 0xa3, 0xa2,
- 0x1b, 0x98, 0x16, 0x84, 0x47, 0x2a, 0xde, 0xcf, 0xd8, 0x3f, 0x97, 0x9f, 0xa5, 0xe0, 0x54, 0x8c,
- 0x1e, 0x57, 0x35, 0xdb, 0x41, 0x9f, 0xf6, 0x78, 0xb1, 0x32, 0x60, 0xc6, 0xd0, 0x6c, 0xcf, 0x87,
- 0x67, 0x84, 0xec, 0x71, 0xff, 0x97, 0x90, 0x07, 0x7f, 0x08, 0xa3, 0x9a, 0x43, 0x75, 0x66, 0x95,
- 0xcc, 0x52, 0xae, 0x58, 0xdc, 0xbb, 0x9b, 0x95, 0x8e, 0x0a, 0xf8, 0xd1, 0xab, 0x0c, 0x08, 0x7b,
- 0x78, 0xf2, 0xef, 0xe9, 0xd8, 0x67, 0x31, 0x37, 0x47, 0x2d, 0x98, 0xe2, 0x5f, 0x5e, 0x2a, 0xc6,
- 0xf4, 0xae, 0x78, 0x5c, 0x52, 0x10, 0x25, 0x14, 0xef, 0xd2, 0x09, 0x71, 0x8b, 0xa9, 0xf5, 0x08,
- 0x2a, 0xee, 0x92, 0x82, 0x96, 0x21, 0xa7, 0x6b, 0x06, 0xa6, 0x8d, 0xba, 0xa6, 0x12, 0x5b, 0xf4,
- 0x40, 0xbc, 0xfc, 0x54, 0x3a, 0x3f, 0xe3, 0x30, 0x0d, 0x7a, 0x0b, 0x72, 0x3a, 0xb9, 0x1f, 0xb0,
- 0x64, 0x38, 0xcb, 0x31, 0x21, 0x2f, 0x57, 0xe9, 0x1c, 0xe1, 0x30, 0x1d, 0xba, 0x07, 0x79, 0xaf,
- 0xa6, 0x94, 0xd7, 0x6e, 0x86, 0xda, 0xa6, 0x35, 0x6a, 0xa9, 0xd4, 0x70, 0x98, 0x6b, 0x64, 0x39,
- 0x92, 0xec, 0xb6, 0xa5, 0xfc, 0x46, 0x22, 0x25, 0xde, 0x05, 0x49, 0xfe, 0x29, 0x03, 0xa7, 0x13,
- 0xd3, 0x00, 0xba, 0x0c, 0xc8, 0xdc, 0xb4, 0xa9, 0xd5, 0xa2, 0xd5, 0xf7, 0xbd, 0xae, 0x9f, 0x35,
- 0x28, 0x4c, 0xe7, 0x19, 0xaf, 0x26, 0x5e, 0xef, 0x39, 0xc5, 0x7d, 0x38, 0x90, 0x0a, 0x47, 0x59,
- 0x5c, 0x78, 0x5a, 0xd6, 0x44, 0x2f, 0xb4, 0xb7, 0xa0, 0x9b, 0x75, 0xdb, 0xd2, 0xd1, 0xd5, 0x30,
- 0x08, 0x8e, 0x62, 0xa2, 0x15, 0x98, 0x16, 0xc9, 0xbe, 0x4b, 0xeb, 0x27, 0x85, 0xd6, 0xa7, 0xcb,
- 0xd1, 0x63, 0xdc, 0x4d, 0xcf, 0x20, 0xaa, 0xd4, 0xd6, 0x2c, 0x5a, 0x0d, 0x20, 0xb2, 0x51, 0x88,
- 0x77, 0xa3, 0xc7, 0xb8, 0x9b, 0x1e, 0xe9, 0x20, 0x09, 0xd4, 0x58, 0x0b, 0x8e, 0x72, 0xc8, 0xff,
- 0xb8, 0x6d, 0x49, 0x2a, 0x27, 0x93, 0xe2, 0xdd, 0xb0, 0xe4, 0x47, 0x59, 0x10, 0xbd, 0x03, 0x0f,
- 0x90, 0x0b, 0x91, 0xd4, 0xbb, 0xd8, 0x95, 0x7a, 0x67, 0xc2, 0x8d, 0x62, 0x28, 0xcd, 0xde, 0x80,
- 0x31, 0x93, 0x47, 0x86, 0xb0, 0xcb, 0xf9, 0x84, 0x70, 0x0a, 0x4a, 0x5a, 0x00, 0x54, 0x02, 0x96,
- 0xcb, 0x44, 0x68, 0x09, 0x20, 0x74, 0x15, 0xb2, 0x0d, 0xb3, 0xea, 0x17, 0xa2, 0xff, 0x25, 0x00,
- 0xae, 0x99, 0x55, 0x3b, 0x02, 0x37, 0xce, 0x6e, 0xcc, 0x7e, 0xc5, 0x1c, 0x02, 0x7d, 0x04, 0xe3,
- 0x7e, 0xc1, 0x17, 0xdd, 0x41, 0x21, 0x01, 0xae, 0xdf, 0x00, 0x5a, 0x9a, 0x64, 0x89, 0xcc, 0x3f,
- 0xc1, 0x01, 0x1c, 0x7a, 0x08, 0xb3, 0x6a, 0xf7, 0x3c, 0x35, 0x77, 0x64, 0xd7, 0xda, 0x99, 0x38,
- 0xed, 0x96, 0xfe, 0xe5, 0xb6, 0xa5, 0xd9, 0x1e, 0x12, 0xdc, 0x2b, 0x89, 0xbd, 0x8c, 0x8a, 0x4e,
- 0x91, 0x3b, 0x45, 0xf2, 0xcb, 0xfa, 0x75, 0xfb, 0xde, 0xcb, 0xfc, 0x13, 0x1c, 0xc0, 0xc9, 0xdf,
- 0x66, 0x61, 0x32, 0xd2, 0x7d, 0x1e, 0xb2, 0x67, 0x78, 0x6d, 0xc4, 0x81, 0x79, 0x86, 0x07, 0x77,
- 0xa0, 0x9e, 0xe1, 0x41, 0x1e, 0x92, 0x67, 0x78, 0xc2, 0x0e, 0xc9, 0x33, 0x42, 0x2f, 0xeb, 0xe3,
- 0x19, 0xcf, 0x32, 0x80, 0x7a, 0x83, 0x18, 0x7d, 0x0e, 0x63, 0x5e, 0xb9, 0xd8, 0x67, 0x49, 0x0d,
- 0x9a, 0x1b, 0x51, 0x3d, 0x05, 0x6a, 0xd7, 0xf4, 0x93, 0x1e, 0x68, 0xfa, 0xa1, 0x07, 0x31, 0x25,
- 0x06, 0x35, 0x37, 0x76, 0x52, 0xfc, 0x0c, 0xc6, 0x6d, 0x7f, 0xbc, 0xca, 0x0e, 0x3f, 0x5e, 0x71,
- 0x85, 0x07, 0x83, 0x55, 0x00, 0x89, 0xaa, 0x30, 0x49, 0xc2, 0x13, 0xce, 0xe8, 0x50, 0xcf, 0x98,
- 0x61, 0xe3, 0x54, 0x64, 0xb4, 0x89, 0xa0, 0xca, 0x3f, 0x77, 0x9b, 0xd5, 0x0b, 0xfb, 0xbf, 0xa2,
- 0x59, 0x0f, 0x6f, 0xc6, 0xfc, 0x47, 0x58, 0xf6, 0xfb, 0x34, 0xcc, 0x74, 0x17, 0xc9, 0xa1, 0x96,
- 0x09, 0x0f, 0xfa, 0x6e, 0x44, 0xd2, 0x43, 0x5d, 0x3a, 0x98, 0x81, 0x06, 0xdc, 0x75, 0x86, 0x2d,
- 0x91, 0x39, 0x70, 0x4b, 0xc8, 0x3f, 0x44, 0x75, 0x34, 0xfc, 0xc2, 0x25, 0x66, 0x3d, 0x99, 0x3e,
- 0xa4, 0xf5, 0xe4, 0x2b, 0x56, 0xd3, 0x8f, 0x69, 0x38, 0xfe, 0x7a, 0x43, 0x3f, 0xf8, 0x2e, 0xef,
- 0x71, 0xaf, 0xbe, 0x5e, 0xef, 0xd9, 0x07, 0x5a, 0xb1, 0x7d, 0x99, 0x86, 0x51, 0x3e, 0x9a, 0x1d,
- 0xc2, 0x42, 0xed, 0x72, 0x64, 0xa1, 0x76, 0x26, 0xa1, 0xc2, 0xf1, 0x1b, 0xc5, 0xae, 0xcf, 0xae,
- 0x75, 0xad, 0xcf, 0xce, 0xee, 0x8a, 0x94, 0xbc, 0x2c, 0x7b, 0x1b, 0x26, 0x02, 0x81, 0xe8, 0xff,
- 0xac, 0x57, 0x15, 0x33, 0x65, 0x8a, 0xdb, 0x36, 0xd8, 0xb0, 0x04, 0xc3, 0x64, 0x40, 0x21, 0x6b,
- 0x90, 0x0b, 0x49, 0xd8, 0x1b, 0x33, 0xa3, 0xb6, 0xc3, 0xeb, 0xe2, 0x89, 0x0e, 0x75, 0x6f, 0x4e,
- 0x28, 0xbd, 0xf3, 0xe4, 0x45, 0x7e, 0xe4, 0xe9, 0x8b, 0xfc, 0xc8, 0xf3, 0x17, 0xf9, 0x91, 0x2f,
- 0xdc, 0x7c, 0xea, 0x89, 0x9b, 0x4f, 0x3d, 0x75, 0xf3, 0xa9, 0xe7, 0x6e, 0x3e, 0xf5, 0x8b, 0x9b,
- 0x4f, 0x7d, 0xfd, 0x6b, 0x7e, 0xe4, 0xe3, 0xf9, 0xd8, 0x7f, 0xa9, 0xfe, 0x19, 0x00, 0x00, 0xff,
- 0xff, 0xd7, 0x67, 0xd4, 0x08, 0x6e, 0x1d, 0x00, 0x00,
-}
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/autoscaling/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/autoscaling/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..7b073f92
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v1/generated.protomessage.pb.go
@@ -0,0 +1,64 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*ContainerResourceMetricSource) ProtoMessage() {}
+
+func (*ContainerResourceMetricStatus) ProtoMessage() {}
+
+func (*CrossVersionObjectReference) ProtoMessage() {}
+
+func (*ExternalMetricSource) ProtoMessage() {}
+
+func (*ExternalMetricStatus) ProtoMessage() {}
+
+func (*HorizontalPodAutoscaler) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerList) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
+
+func (*MetricSpec) ProtoMessage() {}
+
+func (*MetricStatus) ProtoMessage() {}
+
+func (*ObjectMetricSource) ProtoMessage() {}
+
+func (*ObjectMetricStatus) ProtoMessage() {}
+
+func (*PodsMetricSource) ProtoMessage() {}
+
+func (*PodsMetricStatus) ProtoMessage() {}
+
+func (*ResourceMetricSource) ProtoMessage() {}
+
+func (*ResourceMetricStatus) ProtoMessage() {}
+
+func (*Scale) ProtoMessage() {}
+
+func (*ScaleSpec) ProtoMessage() {}
+
+func (*ScaleStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/autoscaling/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..d4f18650
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v1/zz_generated.model_name.go
@@ -0,0 +1,127 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ContainerResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ContainerResourceMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CrossVersionObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.CrossVersionObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ExternalMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ExternalMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscaler) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerCondition) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerList) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.MetricSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.MetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ObjectMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ObjectMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.PodsMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.PodsMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ResourceMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scale) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.Scale"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ScaleSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v1.ScaleStatus"
+}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/doc.go b/operator/vendor/k8s.io/api/autoscaling/v2/doc.go
index 8dea6339..e9a98ae0 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2/doc.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.autoscaling.v2
package v2
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/generated.pb.go b/operator/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
index 40b60ebe..7505c759 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/generated.pb.go
@@ -24,844 +24,62 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-func (*ContainerResourceMetricSource) ProtoMessage() {}
-func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{0}
-}
-func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src)
-}
-func (m *ContainerResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo
-
-func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-func (*ContainerResourceMetricStatus) ProtoMessage() {}
-func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{1}
-}
-func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src)
-}
-func (m *ContainerResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m)
-}
+func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo
+func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-func (*CrossVersionObjectReference) ProtoMessage() {}
-func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{2}
-}
-func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CrossVersionObjectReference.Merge(m, src)
-}
-func (m *CrossVersionObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *CrossVersionObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m)
-}
+func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo
+func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-func (*ExternalMetricSource) ProtoMessage() {}
-func (*ExternalMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{3}
-}
-func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricSource.Merge(m, src)
-}
-func (m *ExternalMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m)
-}
+func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} }
-var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo
+func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} }
-func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-func (*ExternalMetricStatus) ProtoMessage() {}
-func (*ExternalMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{4}
-}
-func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricStatus.Merge(m, src)
-}
-func (m *ExternalMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} }
-func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} }
-func (*HPAScalingPolicy) ProtoMessage() {}
-func (*HPAScalingPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{5}
-}
-func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HPAScalingPolicy.Merge(m, src)
-}
-func (m *HPAScalingPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *HPAScalingPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} }
-func (*HPAScalingRules) ProtoMessage() {}
-func (*HPAScalingRules) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{6}
-}
-func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HPAScalingRules) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HPAScalingRules.Merge(m, src)
-}
-func (m *HPAScalingRules) XXX_Size() int {
- return m.Size()
-}
-func (m *HPAScalingRules) XXX_DiscardUnknown() {
- xxx_messageInfo_HPAScalingRules.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-func (*HorizontalPodAutoscaler) ProtoMessage() {}
-func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{7}
-}
-func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src)
-}
-func (m *HorizontalPodAutoscaler) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m)
-}
+func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} }
-var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo
+func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} }
-func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {}
-func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{8}
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m)
-}
+func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo
+func (m *MetricTarget) Reset() { *m = MetricTarget{} }
-func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
-func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{9}
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m)
-}
+func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo
+func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (*HorizontalPodAutoscalerList) ProtoMessage() {}
-func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{10}
-}
-func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m)
-}
+func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo
+func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
-func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{11}
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m)
-}
+func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo
+func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
-func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{12}
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo
-
-func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} }
-func (*MetricIdentifier) ProtoMessage() {}
-func (*MetricIdentifier) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{13}
-}
-func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricIdentifier) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricIdentifier.Merge(m, src)
-}
-func (m *MetricIdentifier) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricIdentifier) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricIdentifier.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo
-
-func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-func (*MetricSpec) ProtoMessage() {}
-func (*MetricSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{14}
-}
-func (m *MetricSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricSpec.Merge(m, src)
-}
-func (m *MetricSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricSpec proto.InternalMessageInfo
-
-func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-func (*MetricStatus) ProtoMessage() {}
-func (*MetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{15}
-}
-func (m *MetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricStatus.Merge(m, src)
-}
-func (m *MetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricStatus proto.InternalMessageInfo
-
-func (m *MetricTarget) Reset() { *m = MetricTarget{} }
-func (*MetricTarget) ProtoMessage() {}
-func (*MetricTarget) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{16}
-}
-func (m *MetricTarget) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricTarget) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricTarget.Merge(m, src)
-}
-func (m *MetricTarget) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricTarget) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricTarget.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricTarget proto.InternalMessageInfo
-
-func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} }
-func (*MetricValueStatus) ProtoMessage() {}
-func (*MetricValueStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{17}
-}
-func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricValueStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricValueStatus.Merge(m, src)
-}
-func (m *MetricValueStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricValueStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricValueStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo
-
-func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-func (*ObjectMetricSource) ProtoMessage() {}
-func (*ObjectMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{18}
-}
-func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricSource.Merge(m, src)
-}
-func (m *ObjectMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo
-
-func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-func (*ObjectMetricStatus) ProtoMessage() {}
-func (*ObjectMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{19}
-}
-func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricStatus.Merge(m, src)
-}
-func (m *ObjectMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo
-
-func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-func (*PodsMetricSource) ProtoMessage() {}
-func (*PodsMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{20}
-}
-func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricSource.Merge(m, src)
-}
-func (m *PodsMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo
-
-func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-func (*PodsMetricStatus) ProtoMessage() {}
-func (*PodsMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{21}
-}
-func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricStatus.Merge(m, src)
-}
-func (m *PodsMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo
-
-func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-func (*ResourceMetricSource) ProtoMessage() {}
-func (*ResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{22}
-}
-func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricSource.Merge(m, src)
-}
-func (m *ResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo
-
-func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
-func (*ResourceMetricStatus) ProtoMessage() {}
-func (*ResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_4d5f2c8767749221, []int{23}
-}
-func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricStatus.Merge(m, src)
-}
-func (m *ResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2.ContainerResourceMetricSource")
- proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ContainerResourceMetricStatus")
- proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2.CrossVersionObjectReference")
- proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2.ExternalMetricSource")
- proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ExternalMetricStatus")
- proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2.HPAScalingPolicy")
- proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2.HPAScalingRules")
- proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscaler")
- proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerBehavior")
- proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerCondition")
- proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerList")
- proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerSpec")
- proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2.HorizontalPodAutoscalerStatus")
- proto.RegisterType((*MetricIdentifier)(nil), "k8s.io.api.autoscaling.v2.MetricIdentifier")
- proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2.MetricSpec")
- proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2.MetricStatus")
- proto.RegisterType((*MetricTarget)(nil), "k8s.io.api.autoscaling.v2.MetricTarget")
- proto.RegisterType((*MetricValueStatus)(nil), "k8s.io.api.autoscaling.v2.MetricValueStatus")
- proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2.ObjectMetricSource")
- proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ObjectMetricStatus")
- proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2.PodsMetricSource")
- proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2.PodsMetricStatus")
- proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2.ResourceMetricSource")
- proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2.ResourceMetricStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/autoscaling/v2/generated.proto", fileDescriptor_4d5f2c8767749221)
-}
-
-var fileDescriptor_4d5f2c8767749221 = []byte{
- // 1742 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xc9, 0x8f, 0x1b, 0x4b,
- 0x19, 0x9f, 0xb6, 0x3d, 0x5b, 0x79, 0xd6, 0xca, 0xe6, 0x4c, 0x14, 0x7b, 0xd4, 0x04, 0xb2, 0x40,
- 0xda, 0xc4, 0x84, 0x28, 0x22, 0x07, 0x34, 0x3d, 0x01, 0x32, 0xca, 0x0c, 0xe3, 0x94, 0x27, 0x19,
- 0x76, 0xa5, 0xdc, 0x5d, 0xe3, 0x29, 0xc6, 0xee, 0xb6, 0xba, 0xdb, 0x4e, 0x26, 0x12, 0x12, 0x17,
- 0xee, 0x08, 0x14, 0xf1, 0x4f, 0x44, 0x9c, 0x40, 0xe1, 0x00, 0x12, 0x12, 0x1c, 0x72, 0x41, 0xca,
- 0x81, 0x43, 0x4e, 0x16, 0x31, 0xd2, 0x3b, 0xbe, 0xe3, 0x3b, 0xe4, 0xf4, 0x54, 0x4b, 0xaf, 0xde,
- 0xc6, 0x79, 0x93, 0x91, 0xe6, 0xe6, 0xaa, 0xfa, 0xbe, 0xdf, 0xb7, 0xd4, 0xb7, 0x55, 0x1b, 0x5c,
- 0x3f, 0xb8, 0xeb, 0x6a, 0xd4, 0x2e, 0xe2, 0x26, 0x2d, 0xe2, 0x96, 0x67, 0xbb, 0x06, 0xae, 0x53,
- 0xab, 0x56, 0x6c, 0x97, 0x8a, 0x35, 0x62, 0x11, 0x07, 0x7b, 0xc4, 0xd4, 0x9a, 0x8e, 0xed, 0xd9,
- 0xf0, 0xa2, 0x20, 0xd5, 0x70, 0x93, 0x6a, 0x11, 0x52, 0xad, 0x5d, 0x5a, 0xb9, 0x59, 0xa3, 0xde,
- 0x7e, 0xab, 0xaa, 0x19, 0x76, 0xa3, 0x58, 0xb3, 0x6b, 0x76, 0x91, 0x73, 0x54, 0x5b, 0x7b, 0x7c,
- 0xc5, 0x17, 0xfc, 0x97, 0x40, 0x5a, 0x51, 0x23, 0x42, 0x0d, 0xdb, 0x21, 0xc5, 0xf6, 0xad, 0xa4,
- 0xb4, 0x95, 0xdb, 0x21, 0x4d, 0x03, 0x1b, 0xfb, 0xd4, 0x22, 0xce, 0x61, 0xb1, 0x79, 0x50, 0xe3,
- 0x4c, 0x0e, 0x71, 0xed, 0x96, 0x63, 0x90, 0xb1, 0xb8, 0xdc, 0x62, 0x83, 0x78, 0xb8, 0x9f, 0xac,
- 0xe2, 0x20, 0x2e, 0xa7, 0x65, 0x79, 0xb4, 0xd1, 0x2b, 0xe6, 0xce, 0x28, 0x06, 0xd7, 0xd8, 0x27,
- 0x0d, 0x9c, 0xe4, 0x53, 0x3f, 0x53, 0xc0, 0xe5, 0x75, 0xdb, 0xf2, 0x30, 0xe3, 0x40, 0xd2, 0x88,
- 0x2d, 0xe2, 0x39, 0xd4, 0xa8, 0xf0, 0xdf, 0x70, 0x1d, 0x64, 0x2c, 0xdc, 0x20, 0x39, 0x65, 0x55,
- 0xb9, 0x36, 0xab, 0x17, 0xdf, 0x74, 0x0a, 0x13, 0xdd, 0x4e, 0x21, 0xf3, 0x63, 0xdc, 0x20, 0x1f,
- 0x3a, 0x85, 0x42, 0xaf, 0xe3, 0x34, 0x1f, 0x86, 0x91, 0x20, 0xce, 0x0c, 0xb7, 0xc1, 0x94, 0x87,
- 0x9d, 0x1a, 0xf1, 0x72, 0xa9, 0x55, 0xe5, 0x5a, 0xb6, 0x74, 0x55, 0x1b, 0x78, 0x75, 0x9a, 0x90,
- 0xbe, 0xc3, 0xc9, 0xf5, 0x05, 0x29, 0x6f, 0x4a, 0xac, 0x91, 0x84, 0x81, 0x45, 0x30, 0x6b, 0xf8,
- 0x6a, 0xe7, 0xd2, 0x5c, 0xb5, 0x65, 0x49, 0x3a, 0x1b, 0xda, 0x13, 0xd2, 0xa8, 0x9f, 0x0f, 0x31,
- 0xd4, 0xc3, 0x5e, 0xcb, 0x3d, 0x1e, 0x43, 0x77, 0xc1, 0xb4, 0xd1, 0x72, 0x1c, 0x62, 0xf9, 0x96,
- 0x7e, 0x6b, 0xa4, 0xa5, 0x4f, 0x70, 0xbd, 0x45, 0x84, 0x0e, 0xfa, 0xa2, 0x94, 0x3a, 0xbd, 0x2e,
- 0x40, 0x90, 0x8f, 0x36, 0xbe, 0xc1, 0x2f, 0x15, 0x70, 0x69, 0xdd, 0xb1, 0x5d, 0xf7, 0x09, 0x71,
- 0x5c, 0x6a, 0x5b, 0xdb, 0xd5, 0x5f, 0x13, 0xc3, 0x43, 0x64, 0x8f, 0x38, 0xc4, 0x32, 0x08, 0x5c,
- 0x05, 0x99, 0x03, 0x6a, 0x99, 0xd2, 0xdc, 0x39, 0xdf, 0xdc, 0x87, 0xd4, 0x32, 0x11, 0x3f, 0x61,
- 0x14, 0xdc, 0x21, 0xa9, 0x38, 0x45, 0xc4, 0xda, 0x12, 0x00, 0xb8, 0x49, 0xa5, 0x00, 0xa9, 0x15,
- 0x94, 0x74, 0x60, 0xad, 0xbc, 0x21, 0x4f, 0x50, 0x84, 0x4a, 0xfd, 0xbb, 0x02, 0xce, 0xfe, 0xe0,
- 0xb9, 0x47, 0x1c, 0x0b, 0xd7, 0x63, 0x81, 0x56, 0x01, 0x53, 0x0d, 0xbe, 0xe6, 0x2a, 0x65, 0x4b,
- 0xdf, 0x1c, 0xe9, 0xb9, 0x0d, 0x93, 0x58, 0x1e, 0xdd, 0xa3, 0xc4, 0x09, 0xe3, 0x44, 0x9c, 0x20,
- 0x09, 0x75, 0xec, 0x81, 0xa7, 0xfe, 0xbb, 0x57, 0x7d, 0x11, 0x3e, 0x9f, 0x44, 0xfd, 0x4f, 0x15,
- 0x4e, 0xea, 0x9f, 0x15, 0xb0, 0xf4, 0xa0, 0xbc, 0x56, 0x11, 0xdc, 0x65, 0xbb, 0x4e, 0x8d, 0x43,
- 0x78, 0x17, 0x64, 0xbc, 0xc3, 0xa6, 0x9f, 0x01, 0x57, 0xfc, 0x0b, 0xdf, 0x39, 0x6c, 0xb2, 0x0c,
- 0x38, 0x9b, 0xa4, 0x67, 0xfb, 0x88, 0x73, 0xc0, 0xaf, 0x81, 0xc9, 0x36, 0x93, 0xcb, 0xb5, 0x9c,
- 0xd4, 0xe7, 0x25, 0xeb, 0x24, 0x57, 0x06, 0x89, 0x33, 0x78, 0x0f, 0xcc, 0x37, 0x89, 0x43, 0x6d,
- 0xb3, 0x42, 0x0c, 0xdb, 0x32, 0x5d, 0x1e, 0x30, 0x93, 0xfa, 0x39, 0x49, 0x3c, 0x5f, 0x8e, 0x1e,
- 0xa2, 0x38, 0xad, 0xfa, 0x45, 0x0a, 0x2c, 0x86, 0x0a, 0xa0, 0x56, 0x9d, 0xb8, 0xf0, 0x57, 0x60,
- 0xc5, 0xf5, 0x70, 0x95, 0xd6, 0xe9, 0x0b, 0xec, 0x51, 0xdb, 0xda, 0xa5, 0x96, 0x69, 0x3f, 0x8b,
- 0xa3, 0xe7, 0xbb, 0x9d, 0xc2, 0x4a, 0x65, 0x20, 0x15, 0x1a, 0x82, 0x00, 0x1f, 0x82, 0x39, 0x97,
- 0xd4, 0x89, 0xe1, 0x09, 0x7b, 0xa5, 0x5f, 0xae, 0x76, 0x3b, 0x85, 0xb9, 0x4a, 0x64, 0xff, 0x43,
- 0xa7, 0x70, 0x26, 0xe6, 0x18, 0x71, 0x88, 0x62, 0xcc, 0xf0, 0xa7, 0x60, 0xa6, 0xc9, 0x7e, 0x51,
- 0xe2, 0xe6, 0x52, 0xab, 0xe9, 0x11, 0x11, 0x92, 0xf4, 0xb5, 0xbe, 0x24, 0xbd, 0x34, 0x53, 0x96,
- 0x20, 0x28, 0x80, 0x83, 0x3f, 0x07, 0xb3, 0x9e, 0x5d, 0x27, 0x0e, 0xb6, 0x0c, 0x92, 0xcb, 0xf0,
- 0x38, 0xd1, 0x22, 0xd8, 0x41, 0x43, 0xd0, 0x9a, 0x07, 0x35, 0x2e, 0xcc, 0xef, 0x56, 0xda, 0xa3,
- 0x16, 0xb6, 0x3c, 0xea, 0x1d, 0xea, 0xf3, 0xac, 0x8e, 0xec, 0xf8, 0x20, 0x28, 0xc4, 0x53, 0x5f,
- 0xa7, 0xc0, 0x85, 0x07, 0xb6, 0x43, 0x5f, 0xb0, 0xca, 0x52, 0x2f, 0xdb, 0xe6, 0x9a, 0xd4, 0x94,
- 0x38, 0xf0, 0x29, 0x98, 0x61, 0x1d, 0xcc, 0xc4, 0x1e, 0x96, 0x51, 0xff, 0xed, 0x61, 0x72, 0x5d,
- 0x8d, 0x51, 0x6b, 0xed, 0x5b, 0x9a, 0x28, 0x46, 0x5b, 0xc4, 0xc3, 0x61, 0xbd, 0x08, 0xf7, 0x50,
- 0x80, 0x0a, 0x7f, 0x02, 0x32, 0x6e, 0x93, 0x18, 0x32, 0xfa, 0xef, 0x0c, 0xf3, 0x58, 0x7f, 0x1d,
- 0x2b, 0x4d, 0x62, 0x84, 0xb5, 0x8b, 0xad, 0x10, 0x47, 0x84, 0x4f, 0xc1, 0x94, 0xcb, 0xb3, 0x84,
- 0x07, 0x4a, 0xb6, 0x74, 0xf7, 0x23, 0xb0, 0x45, 0x96, 0x05, 0xc9, 0x2b, 0xd6, 0x48, 0xe2, 0xaa,
- 0xff, 0x51, 0x40, 0x61, 0x00, 0xa7, 0x4e, 0xf6, 0x71, 0x9b, 0xda, 0x0e, 0x7c, 0x04, 0xa6, 0xf9,
- 0xce, 0xe3, 0xa6, 0x74, 0xe0, 0x8d, 0x23, 0x05, 0x05, 0x8f, 0x7f, 0x3d, 0xcb, 0x52, 0xbb, 0x22,
- 0xd8, 0x91, 0x8f, 0x03, 0x77, 0xc1, 0x2c, 0xff, 0x79, 0xdf, 0x7e, 0x66, 0x49, 0xbf, 0x8d, 0x03,
- 0xca, 0x23, 0xa1, 0xe2, 0x03, 0xa0, 0x10, 0x4b, 0xfd, 0x5d, 0x1a, 0xac, 0x0e, 0xb0, 0x67, 0xdd,
- 0xb6, 0x4c, 0xca, 0x12, 0x08, 0x3e, 0x88, 0xd5, 0x90, 0xdb, 0x89, 0x1a, 0x72, 0x65, 0x14, 0x7f,
- 0xa4, 0xa6, 0x6c, 0x06, 0x17, 0x94, 0x8a, 0x61, 0x49, 0x37, 0x7f, 0xe8, 0x14, 0xfa, 0x4c, 0x6d,
- 0x5a, 0x80, 0x14, 0xbf, 0x0c, 0xd8, 0x06, 0xb0, 0x8e, 0x5d, 0x6f, 0xc7, 0xc1, 0x96, 0x2b, 0x24,
- 0xd1, 0x06, 0x91, 0x57, 0x7f, 0xe3, 0x68, 0x41, 0xcb, 0x38, 0xf4, 0x15, 0xa9, 0x05, 0xdc, 0xec,
- 0x41, 0x43, 0x7d, 0x24, 0xc0, 0x6f, 0x80, 0x29, 0x87, 0x60, 0xd7, 0xb6, 0x78, 0x62, 0xce, 0x86,
- 0xc1, 0x82, 0xf8, 0x2e, 0x92, 0xa7, 0xf0, 0x3a, 0x98, 0x6e, 0x10, 0xd7, 0xc5, 0x35, 0x92, 0x9b,
- 0xe4, 0x84, 0x41, 0xed, 0xde, 0x12, 0xdb, 0xc8, 0x3f, 0x57, 0xff, 0xab, 0x80, 0x4b, 0x03, 0xfc,
- 0xb8, 0x49, 0x5d, 0x0f, 0xfe, 0xa2, 0x27, 0x2b, 0xb5, 0xa3, 0x19, 0xc8, 0xb8, 0x79, 0x4e, 0x06,
- 0xc5, 0xc6, 0xdf, 0x89, 0x64, 0xe4, 0x2e, 0x98, 0xa4, 0x1e, 0x69, 0xf8, 0x45, 0xac, 0x34, 0x7e,
- 0xda, 0x84, 0xed, 0x61, 0x83, 0x01, 0x21, 0x81, 0xa7, 0xbe, 0x4e, 0x0f, 0x34, 0x8b, 0xa5, 0x2d,
- 0x6c, 0x83, 0x05, 0xbe, 0x92, 0x0d, 0x99, 0xec, 0x49, 0xe3, 0x86, 0x15, 0x85, 0x21, 0x03, 0x90,
- 0x7e, 0x5e, 0x6a, 0xb1, 0x50, 0x89, 0xa1, 0xa2, 0x84, 0x14, 0x78, 0x0b, 0x64, 0x1b, 0xd4, 0x42,
- 0xa4, 0x59, 0xa7, 0x06, 0x76, 0x65, 0x87, 0x5b, 0xec, 0x76, 0x0a, 0xd9, 0xad, 0x70, 0x1b, 0x45,
- 0x69, 0xe0, 0x77, 0x41, 0xb6, 0x81, 0x9f, 0x07, 0x2c, 0xa2, 0x13, 0x9d, 0x91, 0xf2, 0xb2, 0x5b,
- 0xe1, 0x11, 0x8a, 0xd2, 0xc1, 0x32, 0x8b, 0x01, 0xd6, 0xc3, 0xdd, 0x5c, 0x86, 0x3b, 0xf7, 0xeb,
- 0x23, 0xbb, 0x3d, 0x2f, 0x6f, 0x91, 0x50, 0xe1, 0xdc, 0xc8, 0x87, 0x81, 0x26, 0x98, 0xa9, 0xca,
- 0x52, 0xc3, 0xc3, 0x2a, 0x5b, 0xfa, 0xde, 0x47, 0xdc, 0x97, 0x44, 0xd0, 0xe7, 0x58, 0x48, 0xf8,
- 0x2b, 0x14, 0x20, 0xab, 0xaf, 0x32, 0xe0, 0xf2, 0xd0, 0x12, 0x09, 0x7f, 0x08, 0xa0, 0x5d, 0x75,
- 0x89, 0xd3, 0x26, 0xe6, 0x8f, 0xc4, 0x0b, 0x84, 0x0d, 0x8c, 0xec, 0xfe, 0xd2, 0xfa, 0x79, 0x96,
- 0x4d, 0xdb, 0x3d, 0xa7, 0xa8, 0x0f, 0x07, 0x34, 0xc0, 0x3c, 0xcb, 0x31, 0x71, 0x63, 0x54, 0xce,
- 0xa6, 0xe3, 0x25, 0xf0, 0x32, 0x1b, 0x35, 0x36, 0xa3, 0x20, 0x28, 0x8e, 0x09, 0xd7, 0xc0, 0xa2,
- 0x1c, 0x93, 0x12, 0x37, 0x78, 0x41, 0xfa, 0x79, 0x71, 0x3d, 0x7e, 0x8c, 0x92, 0xf4, 0x0c, 0xc2,
- 0x24, 0x2e, 0x75, 0x88, 0x19, 0x40, 0x64, 0xe2, 0x10, 0xf7, 0xe3, 0xc7, 0x28, 0x49, 0x0f, 0x6b,
- 0x60, 0x41, 0xa2, 0xca, 0x5b, 0xcd, 0x4d, 0xf2, 0x98, 0x18, 0x3d, 0xc1, 0xca, 0xb6, 0x14, 0xc4,
- 0xf7, 0x7a, 0x0c, 0x06, 0x25, 0x60, 0xa1, 0x0d, 0x80, 0xe1, 0x17, 0x4d, 0x37, 0x37, 0xc5, 0x85,
- 0xdc, 0x1b, 0x3f, 0x4a, 0x82, 0xc2, 0x1b, 0x76, 0xf4, 0x60, 0xcb, 0x45, 0x11, 0x11, 0xea, 0x1f,
- 0x15, 0xb0, 0x94, 0x9c, 0x80, 0x83, 0xc7, 0x86, 0x32, 0xf0, 0xb1, 0xf1, 0x4b, 0x30, 0x23, 0x06,
- 0x2a, 0xdb, 0x91, 0xd7, 0xfe, 0x9d, 0x23, 0x96, 0x35, 0x5c, 0x25, 0xf5, 0x8a, 0x64, 0x15, 0x41,
- 0xec, 0xaf, 0x50, 0x00, 0xa9, 0xbe, 0xcc, 0x00, 0x10, 0xe6, 0x14, 0xbc, 0x1d, 0xeb, 0x63, 0xab,
- 0x89, 0x3e, 0xb6, 0x14, 0x7d, 0xb9, 0x44, 0x7a, 0xd6, 0x23, 0x30, 0x65, 0xf3, 0x32, 0x23, 0x35,
- 0xbc, 0x39, 0xc4, 0x8f, 0xc1, 0xbc, 0x13, 0x00, 0xe9, 0x80, 0x35, 0x06, 0x59, 0xa7, 0x24, 0x10,
- 0xdc, 0x00, 0x99, 0xa6, 0x6d, 0xfa, 0x53, 0xca, 0xb0, 0x99, 0xb1, 0x6c, 0x9b, 0x6e, 0x0c, 0x6e,
- 0x86, 0x69, 0xcc, 0x76, 0x11, 0x87, 0x60, 0x23, 0xa8, 0x3f, 0xf9, 0xc9, 0x31, 0xb1, 0x38, 0x04,
- 0xae, 0xdf, 0xd7, 0x00, 0xe1, 0x3d, 0xff, 0x04, 0x05, 0x70, 0xf0, 0x37, 0x60, 0xd9, 0x48, 0xbe,
- 0xae, 0x73, 0xd3, 0x23, 0x07, 0xab, 0xa1, 0x9f, 0x1e, 0xf4, 0x73, 0xdd, 0x4e, 0x61, 0xb9, 0x87,
- 0x04, 0xf5, 0x4a, 0x62, 0x96, 0x11, 0xf9, 0x28, 0x93, 0x75, 0x6e, 0x98, 0x65, 0xfd, 0x9e, 0x9f,
- 0xc2, 0x32, 0xff, 0x04, 0x05, 0x70, 0xea, 0x9f, 0x32, 0x60, 0x2e, 0xf6, 0xd0, 0x3b, 0xe1, 0xc8,
- 0x10, 0xc9, 0x7c, 0x6c, 0x91, 0x21, 0xe0, 0x8e, 0x35, 0x32, 0x04, 0xe4, 0x09, 0x45, 0x86, 0x10,
- 0x76, 0x42, 0x91, 0x11, 0xb1, 0xac, 0x4f, 0x64, 0xfc, 0x2b, 0xe5, 0x47, 0x86, 0x18, 0x16, 0x8e,
- 0x16, 0x19, 0x82, 0x36, 0x12, 0x19, 0xdb, 0xd1, 0xb7, 0xf3, 0xf8, 0x2f, 0xb7, 0xd9, 0x9e, 0x77,
- 0xb6, 0x09, 0xe6, 0x70, 0x9b, 0x38, 0xb8, 0x46, 0xf8, 0xb6, 0x8c, 0x8f, 0x71, 0x71, 0x97, 0xd8,
- 0x33, 0x77, 0x2d, 0x82, 0x83, 0x62, 0xa8, 0xac, 0xa5, 0xcb, 0xf5, 0x63, 0x2f, 0x78, 0x3f, 0xcb,
- 0x2e, 0xc7, 0x5b, 0xfa, 0x5a, 0xcf, 0x29, 0xea, 0xc3, 0xa1, 0xfe, 0x21, 0x05, 0x96, 0x7b, 0xbe,
- 0x5c, 0x84, 0x4e, 0x51, 0x3e, 0x91, 0x53, 0x52, 0x27, 0xe8, 0x94, 0xf4, 0xd8, 0x4e, 0xf9, 0x6b,
- 0x0a, 0xc0, 0xde, 0xfe, 0x00, 0x0f, 0xf9, 0x58, 0x61, 0x38, 0xb4, 0x4a, 0x4c, 0x71, 0xfc, 0x15,
- 0x67, 0xe0, 0xe8, 0x38, 0x12, 0x85, 0x45, 0x49, 0x39, 0xc7, 0xff, 0x05, 0x37, 0xfc, 0x5e, 0x96,
- 0x3e, 0xb6, 0xef, 0x65, 0xea, 0x3f, 0x92, 0x7e, 0x3b, 0x85, 0xdf, 0xe6, 0xfa, 0xdd, 0x72, 0xfa,
- 0x64, 0x6e, 0x59, 0xfd, 0x9b, 0x02, 0x96, 0x92, 0x63, 0xc4, 0x29, 0xf9, 0x30, 0xfb, 0xcf, 0xb8,
- 0xea, 0xa7, 0xf1, 0xa3, 0xec, 0x2b, 0x05, 0x9c, 0x3d, 0x3d, 0xff, 0xc1, 0xa8, 0x7f, 0xe9, 0x55,
- 0xf7, 0x14, 0xfc, 0x93, 0xa2, 0x7f, 0xff, 0xcd, 0xfb, 0xfc, 0xc4, 0xdb, 0xf7, 0xf9, 0x89, 0x77,
- 0xef, 0xf3, 0x13, 0xbf, 0xed, 0xe6, 0x95, 0x37, 0xdd, 0xbc, 0xf2, 0xb6, 0x9b, 0x57, 0xde, 0x75,
- 0xf3, 0xca, 0xff, 0xba, 0x79, 0xe5, 0xf7, 0xff, 0xcf, 0x4f, 0xfc, 0xec, 0xe2, 0xc0, 0xbf, 0x21,
- 0xbf, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x23, 0xae, 0x54, 0xa2, 0x1c, 0x00, 0x00,
-}
+func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/generated.proto b/operator/vendor/k8s.io/api/autoscaling/v2/generated.proto
index 04c34d6e..a007676a 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2/generated.proto
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/generated.proto
@@ -123,7 +123,7 @@ message HPAScalingPolicy {
//
// The tolerance is applied to the metric values and prevents scaling too
// eagerly for small metric variations. (Note that setting a tolerance requires
-// enabling the alpha HPAConfigurableTolerance feature gate.)
+// the beta HPAConfigurableTolerance feature gate to be enabled.)
message HPAScalingRules {
// stabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down.
@@ -156,8 +156,8 @@ message HPAScalingRules {
// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
//
- // This is an alpha field and requires enabling the HPAConfigurableTolerance
- // feature gate.
+ // This is an beta field and requires the HPAConfigurableTolerance feature
+ // gate to be enabled.
//
// +featureGate=HPAConfigurableTolerance
// +optional
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/autoscaling/v2/generated.protomessage.pb.go
new file mode 100644
index 00000000..0a73d526
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/generated.protomessage.pb.go
@@ -0,0 +1,70 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v2
+
+func (*ContainerResourceMetricSource) ProtoMessage() {}
+
+func (*ContainerResourceMetricStatus) ProtoMessage() {}
+
+func (*CrossVersionObjectReference) ProtoMessage() {}
+
+func (*ExternalMetricSource) ProtoMessage() {}
+
+func (*ExternalMetricStatus) ProtoMessage() {}
+
+func (*HPAScalingPolicy) ProtoMessage() {}
+
+func (*HPAScalingRules) ProtoMessage() {}
+
+func (*HorizontalPodAutoscaler) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerList) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
+
+func (*MetricIdentifier) ProtoMessage() {}
+
+func (*MetricSpec) ProtoMessage() {}
+
+func (*MetricStatus) ProtoMessage() {}
+
+func (*MetricTarget) ProtoMessage() {}
+
+func (*MetricValueStatus) ProtoMessage() {}
+
+func (*ObjectMetricSource) ProtoMessage() {}
+
+func (*ObjectMetricStatus) ProtoMessage() {}
+
+func (*PodsMetricSource) ProtoMessage() {}
+
+func (*PodsMetricStatus) ProtoMessage() {}
+
+func (*ResourceMetricSource) ProtoMessage() {}
+
+func (*ResourceMetricStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/types.go b/operator/vendor/k8s.io/api/autoscaling/v2/types.go
index 9ce69b1e..03a06dc8 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2/types.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/types.go
@@ -182,7 +182,7 @@ const (
//
// The tolerance is applied to the metric values and prevents scaling too
// eagerly for small metric variations. (Note that setting a tolerance requires
-// enabling the alpha HPAConfigurableTolerance feature gate.)
+// the beta HPAConfigurableTolerance feature gate to be enabled.)
type HPAScalingRules struct {
// stabilizationWindowSeconds is the number of seconds for which past recommendations should be
// considered while scaling up or scaling down.
@@ -215,8 +215,8 @@ type HPAScalingRules struct {
// and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
// triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
//
- // This is an alpha field and requires enabling the HPAConfigurableTolerance
- // feature gate.
+ // This is an beta field and requires the HPAConfigurableTolerance feature
+ // gate to be enabled.
//
// +featureGate=HPAConfigurableTolerance
// +optional
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
index 017fefcd..af3f3022 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/types_swagger_doc_generated.go
@@ -92,11 +92,11 @@ func (HPAScalingPolicy) SwaggerDoc() map[string]string {
}
var map_HPAScalingRules = map[string]string{
- "": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires enabling the alpha HPAConfigurableTolerance feature gate.)",
+ "": "HPAScalingRules configures the scaling behavior for one direction via scaling Policy Rules and a configurable metric tolerance.\n\nScaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.\n\nThe tolerance is applied to the metric values and prevents scaling too eagerly for small metric variations. (Note that setting a tolerance requires the beta HPAConfigurableTolerance feature gate to be enabled.)",
"stabilizationWindowSeconds": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
"selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.",
"policies": "policies is a list of potential scaling polices which can be used during scaling. If not set, use the default values: - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window. - For scale down: allow all pods to be removed in a 15s window.",
- "tolerance": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an alpha field and requires enabling the HPAConfigurableTolerance feature gate.",
+ "tolerance": "tolerance is the tolerance on the ratio between the current and desired metric value under which no updates are made to the desired number of replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not set, the default cluster-wide tolerance is applied (by default 10%).\n\nFor example, if autoscaling is configured with a memory consumption target of 100Mi, and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be triggered when the actual consumption falls below 95Mi or exceeds 101Mi.\n\nThis is an beta field and requires the HPAConfigurableTolerance feature gate to be enabled.",
}
func (HPAScalingRules) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/autoscaling/v2/zz_generated.model_name.go
new file mode 100644
index 00000000..b15b3421
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v2/zz_generated.model_name.go
@@ -0,0 +1,142 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ContainerResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CrossVersionObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.CrossVersionObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ExternalMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ExternalMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HPAScalingPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HPAScalingPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HPAScalingRules) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HPAScalingRules"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscaler) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerBehavior) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerCondition) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerList) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricIdentifier) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.MetricIdentifier"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.MetricSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.MetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricTarget) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.MetricTarget"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricValueStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.MetricValueStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ObjectMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ObjectMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.PodsMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.PodsMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2.ResourceMetricStatus"
+}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/operator/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
index eac92e86..58cc1f89 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta1/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.autoscaling.v2beta1
package v2beta1
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
index 69567089..b3b535ad 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
@@ -24,658 +24,50 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-func (*ContainerResourceMetricSource) ProtoMessage() {}
-func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{0}
-}
-func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src)
-}
-func (m *ContainerResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m)
-}
+func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo
+func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-func (*ContainerResourceMetricStatus) ProtoMessage() {}
-func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{1}
-}
-func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src)
-}
-func (m *ContainerResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m)
-}
+func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo
+func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-func (*CrossVersionObjectReference) ProtoMessage() {}
-func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{2}
-}
-func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CrossVersionObjectReference.Merge(m, src)
-}
-func (m *CrossVersionObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *CrossVersionObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-func (*ExternalMetricSource) ProtoMessage() {}
-func (*ExternalMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{3}
-}
-func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricSource.Merge(m, src)
-}
-func (m *ExternalMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-func (*ExternalMetricStatus) ProtoMessage() {}
-func (*ExternalMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{4}
-}
-func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricStatus.Merge(m, src)
-}
-func (m *ExternalMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m)
-}
+func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo
+func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-func (*HorizontalPodAutoscaler) ProtoMessage() {}
-func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{5}
-}
-func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src)
-}
-func (m *HorizontalPodAutoscaler) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m)
-}
+func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo
+func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
-func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{6}
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m)
-}
+func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo
+func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (*HorizontalPodAutoscalerList) ProtoMessage() {}
-func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{7}
-}
-func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo
-
-func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
-func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{8}
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo
-
-func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
-func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{9}
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m)
-}
+func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo
-
-func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-func (*MetricSpec) ProtoMessage() {}
-func (*MetricSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{10}
-}
-func (m *MetricSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricSpec.Merge(m, src)
-}
-func (m *MetricSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricSpec proto.InternalMessageInfo
-
-func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-func (*MetricStatus) ProtoMessage() {}
-func (*MetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{11}
-}
-func (m *MetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricStatus.Merge(m, src)
-}
-func (m *MetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricStatus proto.InternalMessageInfo
-
-func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-func (*ObjectMetricSource) ProtoMessage() {}
-func (*ObjectMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{12}
-}
-func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricSource.Merge(m, src)
-}
-func (m *ObjectMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo
-
-func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-func (*ObjectMetricStatus) ProtoMessage() {}
-func (*ObjectMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{13}
-}
-func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricStatus.Merge(m, src)
-}
-func (m *ObjectMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo
-
-func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-func (*PodsMetricSource) ProtoMessage() {}
-func (*PodsMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{14}
-}
-func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricSource.Merge(m, src)
-}
-func (m *PodsMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo
-
-func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-func (*PodsMetricStatus) ProtoMessage() {}
-func (*PodsMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{15}
-}
-func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricStatus.Merge(m, src)
-}
-func (m *PodsMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo
-
-func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-func (*ResourceMetricSource) ProtoMessage() {}
-func (*ResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{16}
-}
-func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricSource.Merge(m, src)
-}
-func (m *ResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo
-
-func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
-func (*ResourceMetricStatus) ProtoMessage() {}
-func (*ResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ea74040359c1ed83, []int{17}
-}
-func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricStatus.Merge(m, src)
-}
-func (m *ResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricSource")
- proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ContainerResourceMetricStatus")
- proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference")
- proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource")
- proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus")
- proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscaler")
- proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition")
- proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerList")
- proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec")
- proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus")
- proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta1.MetricSpec")
- proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.MetricStatus")
- proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricSource")
- proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricStatus")
- proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricSource")
- proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricStatus")
- proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource")
- proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_ea74040359c1ed83)
-}
-
-var fileDescriptor_ea74040359c1ed83 = []byte{
- // 1549 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4d, 0x6c, 0x1b, 0xc5,
- 0x17, 0x8f, 0xed, 0x4d, 0x9a, 0x3c, 0xa7, 0xf9, 0x98, 0xf6, 0xdf, 0xba, 0xe9, 0xbf, 0x76, 0xb4,
- 0xfa, 0xeb, 0xaf, 0x50, 0xc1, 0xba, 0x35, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x2e, 0xd0, 0x8a, 0xb8,
- 0x2d, 0x93, 0xb4, 0x42, 0xd0, 0x22, 0x26, 0xeb, 0xa9, 0xb3, 0xc4, 0xde, 0xb5, 0x76, 0xc6, 0x51,
- 0x53, 0x84, 0x84, 0x90, 0xb8, 0x73, 0x81, 0x33, 0x48, 0x5c, 0x11, 0xe2, 0x02, 0x67, 0x6e, 0x3d,
- 0xf6, 0xd8, 0x0a, 0x64, 0x51, 0x73, 0xe0, 0xcc, 0xb5, 0x27, 0x34, 0xb3, 0xb3, 0xeb, 0x5d, 0xdb,
- 0x6b, 0x3b, 0x6e, 0x1a, 0x3e, 0xd4, 0x9b, 0x77, 0xe7, 0xbd, 0xdf, 0x9b, 0xf9, 0xbd, 0xaf, 0x79,
- 0x6b, 0x30, 0x76, 0x5e, 0x66, 0x86, 0xe5, 0xe4, 0x49, 0xc3, 0xca, 0x93, 0x26, 0x77, 0x98, 0x49,
- 0x6a, 0x96, 0x5d, 0xcd, 0xef, 0x16, 0xb6, 0x28, 0x27, 0xe7, 0xf3, 0x55, 0x6a, 0x53, 0x97, 0x70,
- 0x5a, 0x31, 0x1a, 0xae, 0xc3, 0x1d, 0x94, 0xf5, 0xe4, 0x0d, 0xd2, 0xb0, 0x8c, 0x90, 0xbc, 0xa1,
- 0xe4, 0x97, 0x9e, 0xab, 0x5a, 0x7c, 0xbb, 0xb9, 0x65, 0x98, 0x4e, 0x3d, 0x5f, 0x75, 0xaa, 0x4e,
- 0x5e, 0xaa, 0x6d, 0x35, 0x6f, 0xc9, 0x27, 0xf9, 0x20, 0x7f, 0x79, 0x70, 0x4b, 0x7a, 0xc8, 0xbc,
- 0xe9, 0xb8, 0x34, 0xbf, 0xdb, 0x63, 0x72, 0x69, 0xb5, 0x23, 0x53, 0x27, 0xe6, 0xb6, 0x65, 0x53,
- 0x77, 0x2f, 0xdf, 0xd8, 0xa9, 0x4a, 0x25, 0x97, 0x32, 0xa7, 0xe9, 0x9a, 0x74, 0x5f, 0x5a, 0x2c,
- 0x5f, 0xa7, 0x9c, 0xf4, 0xb3, 0x95, 0x8f, 0xd3, 0x72, 0x9b, 0x36, 0xb7, 0xea, 0xbd, 0x66, 0x5e,
- 0x1c, 0xa6, 0xc0, 0xcc, 0x6d, 0x5a, 0x27, 0xdd, 0x7a, 0xfa, 0xef, 0x49, 0x38, 0x53, 0x72, 0x6c,
- 0x4e, 0x84, 0x06, 0x56, 0x87, 0x28, 0x53, 0xee, 0x5a, 0xe6, 0x86, 0xfc, 0x8d, 0x4a, 0xa0, 0xd9,
- 0xa4, 0x4e, 0x33, 0x89, 0xe5, 0xc4, 0xca, 0x4c, 0x31, 0x7f, 0xb7, 0x95, 0x9b, 0x68, 0xb7, 0x72,
- 0xda, 0x65, 0x52, 0xa7, 0x8f, 0x5a, 0xb9, 0x5c, 0x2f, 0x71, 0x86, 0x0f, 0x23, 0x44, 0xb0, 0x54,
- 0x46, 0xef, 0x40, 0x86, 0x13, 0xb7, 0x4a, 0xf9, 0xda, 0x2e, 0x75, 0x49, 0x95, 0x5e, 0xe3, 0x56,
- 0xcd, 0xba, 0x43, 0xb8, 0xe5, 0xd8, 0x99, 0xe4, 0x72, 0x62, 0x65, 0xb2, 0xf8, 0xdf, 0x76, 0x2b,
- 0x97, 0xd9, 0x8c, 0x91, 0xc1, 0xb1, 0xda, 0x68, 0x17, 0x50, 0x64, 0xed, 0x3a, 0xa9, 0x35, 0x69,
- 0x26, 0xb5, 0x9c, 0x58, 0x49, 0x17, 0x0c, 0xa3, 0x13, 0x25, 0x01, 0x2b, 0x46, 0x63, 0xa7, 0x2a,
- 0xc3, 0xc6, 0x77, 0x99, 0xf1, 0x76, 0x93, 0xd8, 0xdc, 0xe2, 0x7b, 0xc5, 0x13, 0xed, 0x56, 0x0e,
- 0x6d, 0xf6, 0xa0, 0xe1, 0x3e, 0x16, 0x50, 0x1e, 0x66, 0x4c, 0x9f, 0xb7, 0x8c, 0x26, 0xb9, 0x59,
- 0x54, 0xdc, 0xcc, 0x74, 0x08, 0xed, 0xc8, 0xe8, 0x7f, 0x0c, 0x60, 0x9a, 0x13, 0xde, 0x64, 0x07,
- 0xc3, 0xf4, 0x7b, 0x70, 0xca, 0x6c, 0xba, 0x2e, 0xb5, 0xe3, 0xa9, 0x3e, 0xd3, 0x6e, 0xe5, 0x4e,
- 0x95, 0xe2, 0x84, 0x70, 0xbc, 0x3e, 0xfa, 0x18, 0x8e, 0x45, 0x17, 0x1f, 0x87, 0xed, 0xd3, 0xea,
- 0x80, 0xc7, 0x4a, 0xbd, 0x90, 0xb8, 0x9f, 0x9d, 0xfd, 0x73, 0xfe, 0x45, 0x02, 0x4e, 0x97, 0x5c,
- 0x87, 0xb1, 0xeb, 0xd4, 0x65, 0x96, 0x63, 0x5f, 0xd9, 0xfa, 0x90, 0x9a, 0x1c, 0xd3, 0x5b, 0xd4,
- 0xa5, 0xb6, 0x49, 0xd1, 0x32, 0x68, 0x3b, 0x96, 0x5d, 0x51, 0x8c, 0xcf, 0xfa, 0x8c, 0xbf, 0x65,
- 0xd9, 0x15, 0x2c, 0x57, 0x84, 0x84, 0xf4, 0x49, 0x32, 0x2a, 0x11, 0x22, 0xbc, 0x00, 0x40, 0x1a,
- 0x96, 0x32, 0x20, 0xa9, 0x98, 0x29, 0x22, 0x25, 0x07, 0x6b, 0x57, 0x2f, 0xa9, 0x15, 0x1c, 0x92,
- 0xd2, 0xbf, 0x4c, 0xc1, 0xf1, 0xd7, 0x6f, 0x73, 0xea, 0xda, 0xa4, 0x16, 0x49, 0xb6, 0x02, 0x40,
- 0x5d, 0x3e, 0x5f, 0xee, 0x04, 0x42, 0x00, 0x56, 0x0e, 0x56, 0x70, 0x48, 0x0a, 0x39, 0x30, 0xe7,
- 0x3d, 0x6d, 0xd0, 0x1a, 0x35, 0xb9, 0xe3, 0xca, 0xcd, 0xa6, 0x0b, 0xcf, 0x0f, 0xf2, 0x07, 0x33,
- 0x44, 0xe9, 0x31, 0x76, 0xcf, 0x1b, 0xeb, 0x64, 0x8b, 0xd6, 0x7c, 0xd5, 0x22, 0x6a, 0xb7, 0x72,
- 0x73, 0xe5, 0x08, 0x1c, 0xee, 0x82, 0x47, 0x04, 0xd2, 0x5e, 0x42, 0x3c, 0x8e, 0xf7, 0xe7, 0xdb,
- 0xad, 0x5c, 0x7a, 0xb3, 0x03, 0x83, 0xc3, 0x98, 0x31, 0x59, 0xad, 0x3d, 0xe9, 0xac, 0xd6, 0xbf,
- 0xea, 0x75, 0x8c, 0x97, 0x9b, 0xff, 0x08, 0xc7, 0x6c, 0xc3, 0xac, 0x4a, 0x9b, 0xc7, 0xf1, 0xcc,
- 0x71, 0x75, 0xac, 0xd9, 0x52, 0x08, 0x0b, 0x47, 0x90, 0xd1, 0x5e, 0xff, 0x42, 0x30, 0x9e, 0x83,
- 0x4e, 0xee, 0xa7, 0x08, 0xe8, 0x3f, 0x25, 0xe1, 0xe4, 0x45, 0xc7, 0xb5, 0xee, 0x88, 0x2c, 0xaf,
- 0x5d, 0x75, 0x2a, 0x6b, 0xaa, 0xfd, 0x53, 0x17, 0x7d, 0x00, 0xd3, 0x82, 0xbd, 0x0a, 0xe1, 0x44,
- 0xfa, 0x28, 0x5d, 0x38, 0x37, 0x1a, 0xd7, 0x5e, 0x61, 0x28, 0x53, 0x4e, 0x3a, 0x5e, 0xed, 0xbc,
- 0xc3, 0x01, 0x2a, 0xba, 0x09, 0x1a, 0x6b, 0x50, 0x53, 0x79, 0xf2, 0x15, 0x63, 0xf0, 0x35, 0xc4,
- 0x88, 0xd9, 0xe8, 0x46, 0x83, 0x9a, 0x9d, 0x62, 0x22, 0x9e, 0xb0, 0x84, 0x45, 0x14, 0xa6, 0x98,
- 0x0c, 0x38, 0xe5, 0xbb, 0x57, 0xc7, 0x35, 0x20, 0x41, 0x8a, 0x73, 0xca, 0xc4, 0x94, 0xf7, 0x8c,
- 0x15, 0xb8, 0xfe, 0x59, 0x0a, 0x96, 0x63, 0x34, 0x4b, 0x8e, 0x5d, 0xb1, 0x64, 0xb1, 0xbf, 0x08,
- 0x1a, 0xdf, 0x6b, 0xf8, 0xc1, 0xbe, 0xea, 0xef, 0x76, 0x73, 0xaf, 0x21, 0xda, 0xd1, 0xff, 0x86,
- 0xe9, 0x0b, 0x39, 0x2c, 0x11, 0xd0, 0x7a, 0x70, 0xaa, 0x64, 0x04, 0x4b, 0x6d, 0xeb, 0x51, 0x2b,
- 0xd7, 0xe7, 0xfe, 0x65, 0x04, 0x48, 0xd1, 0xcd, 0x8b, 0xda, 0x50, 0x23, 0x8c, 0x6f, 0xba, 0xc4,
- 0x66, 0x9e, 0x25, 0xab, 0xee, 0xc7, 0xfa, 0xd9, 0xd1, 0xdc, 0x2d, 0x34, 0x8a, 0x4b, 0x6a, 0x17,
- 0x68, 0xbd, 0x07, 0x0d, 0xf7, 0xb1, 0x80, 0xfe, 0x0f, 0x53, 0x2e, 0x25, 0xcc, 0xb1, 0x55, 0xeb,
- 0x09, 0xc8, 0xc5, 0xf2, 0x2d, 0x56, 0xab, 0xe8, 0x19, 0x38, 0x52, 0xa7, 0x8c, 0x91, 0x2a, 0xcd,
- 0x4c, 0x4a, 0xc1, 0x79, 0x25, 0x78, 0xa4, 0xec, 0xbd, 0xc6, 0xfe, 0xba, 0xfe, 0x20, 0x01, 0xa7,
- 0x63, 0x78, 0x5c, 0xb7, 0x18, 0x47, 0x37, 0x7a, 0xe2, 0xd9, 0x18, 0xb1, 0x76, 0x58, 0xcc, 0x8b,
- 0xe6, 0x05, 0x65, 0x7b, 0xda, 0x7f, 0x13, 0x8a, 0xe5, 0x1b, 0x30, 0x69, 0x71, 0x5a, 0x17, 0x5e,
- 0x49, 0xad, 0xa4, 0x0b, 0x2f, 0x8d, 0x19, 0x6b, 0xc5, 0xa3, 0xca, 0xc6, 0xe4, 0x25, 0x81, 0x86,
- 0x3d, 0x50, 0xfd, 0xe7, 0x64, 0xec, 0xd9, 0x44, 0xc0, 0xa3, 0x8f, 0x60, 0x4e, 0x3e, 0x79, 0x95,
- 0x19, 0xd3, 0x5b, 0xea, 0x84, 0x43, 0x73, 0x6a, 0x40, 0x43, 0x2f, 0x9e, 0x50, 0x5b, 0x99, 0xdb,
- 0x88, 0x40, 0xe3, 0x2e, 0x53, 0xe8, 0x3c, 0xa4, 0xeb, 0x96, 0x8d, 0x69, 0xa3, 0x66, 0x99, 0x84,
- 0xa9, 0x7b, 0x91, 0x6c, 0x49, 0xe5, 0xce, 0x6b, 0x1c, 0x96, 0x41, 0x2f, 0x40, 0xba, 0x4e, 0x6e,
- 0x07, 0x2a, 0x29, 0xa9, 0x72, 0x4c, 0xd9, 0x4b, 0x97, 0x3b, 0x4b, 0x38, 0x2c, 0x87, 0xae, 0x89,
- 0x68, 0x10, 0x55, 0x9a, 0x65, 0x34, 0x49, 0xf3, 0xd9, 0x61, 0xe7, 0x53, 0x45, 0x5e, 0x94, 0x88,
- 0x50, 0xe4, 0x48, 0x08, 0xec, 0x63, 0xe9, 0x3f, 0x68, 0x70, 0x66, 0x60, 0xee, 0xa3, 0x37, 0x00,
- 0x39, 0x5b, 0x8c, 0xba, 0xbb, 0xb4, 0xf2, 0xa6, 0x77, 0xe9, 0x17, 0xf7, 0x13, 0xc1, 0x71, 0xca,
- 0x6b, 0x89, 0x57, 0x7a, 0x56, 0x71, 0x1f, 0x0d, 0x64, 0xc2, 0x51, 0x91, 0x0c, 0x1e, 0xa1, 0x96,
- 0xba, 0x0a, 0xed, 0x2f, 0xd3, 0x16, 0xdb, 0xad, 0xdc, 0xd1, 0xf5, 0x30, 0x08, 0x8e, 0x62, 0xa2,
- 0x35, 0x98, 0x57, 0xb5, 0xbe, 0x8b, 0xe0, 0x93, 0x8a, 0x81, 0xf9, 0x52, 0x74, 0x19, 0x77, 0xcb,
- 0x0b, 0x88, 0x0a, 0x65, 0x96, 0x4b, 0x2b, 0x01, 0x84, 0x16, 0x85, 0xb8, 0x10, 0x5d, 0xc6, 0xdd,
- 0xf2, 0xa8, 0x06, 0x73, 0x0a, 0x55, 0xf1, 0x9d, 0x99, 0x94, 0x2e, 0x7b, 0x76, 0x44, 0x97, 0x79,
- 0x45, 0x37, 0x88, 0xc1, 0x52, 0x04, 0x0b, 0x77, 0x61, 0x23, 0x0e, 0x60, 0xfa, 0x25, 0x8e, 0x65,
- 0xa6, 0xa4, 0xa5, 0xd7, 0xc6, 0xcc, 0xc1, 0xa0, 0x56, 0x76, 0xda, 0x57, 0xf0, 0x8a, 0xe1, 0x90,
- 0x1d, 0xfd, 0x3b, 0x0d, 0xa0, 0x13, 0x61, 0x68, 0x35, 0x52, 0xe4, 0x97, 0xbb, 0x8a, 0xfc, 0x42,
- 0xf8, 0x72, 0x1a, 0x2a, 0xe8, 0xd7, 0x61, 0xca, 0x91, 0x99, 0xa7, 0x82, 0xa1, 0x30, 0x6c, 0xdb,
- 0x41, 0x2f, 0x0d, 0xd0, 0x8a, 0x20, 0x4a, 0xa7, 0xca, 0x5f, 0x85, 0x86, 0x2e, 0x83, 0xd6, 0x70,
- 0x2a, 0x7e, 0xf3, 0x3b, 0x37, 0x0c, 0xf5, 0xaa, 0x53, 0x61, 0x11, 0xcc, 0x69, 0xb1, 0x77, 0xf1,
- 0x16, 0x4b, 0x1c, 0xf4, 0x3e, 0x4c, 0xfb, 0xd7, 0x0d, 0x75, 0x37, 0x59, 0x1d, 0x86, 0xd9, 0x6f,
- 0x06, 0x2e, 0xce, 0x8a, 0x0a, 0xea, 0xaf, 0xe0, 0x00, 0x13, 0x7d, 0x9a, 0x80, 0x45, 0xb3, 0x7b,
- 0xa6, 0xcb, 0x1c, 0x19, 0xad, 0x75, 0x0f, 0x1c, 0xbb, 0x8b, 0xff, 0x69, 0xb7, 0x72, 0x8b, 0x3d,
- 0x22, 0xb8, 0xd7, 0x9c, 0x38, 0x24, 0x55, 0x57, 0x56, 0xd9, 0x70, 0x46, 0x38, 0x64, 0xbf, 0xd9,
- 0xc3, 0x3b, 0xa4, 0xbf, 0x82, 0x03, 0x4c, 0xfd, 0x7b, 0x0d, 0x66, 0x23, 0x77, 0xe1, 0xbf, 0x22,
- 0x66, 0xbc, 0xd4, 0x3a, 0xd8, 0x98, 0xf1, 0x30, 0x0f, 0x3e, 0x66, 0x3c, 0xdc, 0x43, 0x8d, 0x19,
- 0xcf, 0xe4, 0x61, 0xc6, 0x4c, 0xe8, 0x90, 0x7d, 0x62, 0xe6, 0x41, 0x0a, 0x50, 0x6f, 0xce, 0x23,
- 0x13, 0xa6, 0xbc, 0xa1, 0xeb, 0x20, 0x7a, 0x7d, 0x70, 0xff, 0x52, 0x6d, 0x5d, 0x41, 0x77, 0x8d,
- 0x6a, 0xc9, 0x91, 0x46, 0x35, 0x7a, 0x10, 0x23, 0x6d, 0x70, 0x19, 0x88, 0x1d, 0x6b, 0x6f, 0xc2,
- 0x34, 0xf3, 0x67, 0x41, 0x6d, 0xfc, 0x59, 0x50, 0xb2, 0x1e, 0x4c, 0x81, 0x01, 0x24, 0xaa, 0xc0,
- 0x2c, 0x09, 0x8f, 0x63, 0x93, 0x63, 0x1d, 0x63, 0x41, 0xcc, 0x7e, 0x91, 0x39, 0x2c, 0x82, 0xaa,
- 0xff, 0xd2, 0xed, 0x5b, 0xaf, 0x2a, 0xfc, 0x6d, 0x7d, 0x7b, 0x78, 0x53, 0xf1, 0xbf, 0xc2, 0xbd,
- 0x5f, 0x27, 0x61, 0xa1, 0xbb, 0xb1, 0x8e, 0xf5, 0xf9, 0xe3, 0x4e, 0xdf, 0x6f, 0x38, 0xc9, 0xb1,
- 0x36, 0x1d, 0xcc, 0x6a, 0x23, 0x7e, 0x9d, 0x0d, 0x7b, 0x22, 0x75, 0xe0, 0x9e, 0xd0, 0xbf, 0x89,
- 0x72, 0x34, 0xfe, 0x27, 0xa2, 0x98, 0x0f, 0xaa, 0xc9, 0x43, 0xfa, 0xa0, 0xfa, 0x84, 0x69, 0xfa,
- 0x36, 0x09, 0xc7, 0x9f, 0xfe, 0xa7, 0x30, 0xfa, 0xd7, 0xc7, 0x1f, 0x7b, 0xf9, 0x7a, 0xfa, 0xcf,
- 0xc0, 0x28, 0x81, 0x5c, 0xbc, 0x70, 0xf7, 0x61, 0x76, 0xe2, 0xde, 0xc3, 0xec, 0xc4, 0xfd, 0x87,
- 0xd9, 0x89, 0x4f, 0xda, 0xd9, 0xc4, 0xdd, 0x76, 0x36, 0x71, 0xaf, 0x9d, 0x4d, 0xdc, 0x6f, 0x67,
- 0x13, 0xbf, 0xb6, 0xb3, 0x89, 0xcf, 0x7f, 0xcb, 0x4e, 0xbc, 0x9b, 0x1d, 0xfc, 0x27, 0xe3, 0x9f,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x05, 0xaa, 0x18, 0x85, 0x1c, 0x00, 0x00,
-}
+func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..ab1fe8c8
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.protomessage.pb.go
@@ -0,0 +1,58 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v2beta1
+
+func (*ContainerResourceMetricSource) ProtoMessage() {}
+
+func (*ContainerResourceMetricStatus) ProtoMessage() {}
+
+func (*CrossVersionObjectReference) ProtoMessage() {}
+
+func (*ExternalMetricSource) ProtoMessage() {}
+
+func (*ExternalMetricStatus) ProtoMessage() {}
+
+func (*HorizontalPodAutoscaler) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerList) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
+
+func (*MetricSpec) ProtoMessage() {}
+
+func (*MetricStatus) ProtoMessage() {}
+
+func (*ObjectMetricSource) ProtoMessage() {}
+
+func (*ObjectMetricStatus) ProtoMessage() {}
+
+func (*PodsMetricSource) ProtoMessage() {}
+
+func (*PodsMetricStatus) ProtoMessage() {}
+
+func (*ResourceMetricSource) ProtoMessage() {}
+
+func (*ResourceMetricStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..2b8674e6
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.model_name.go
@@ -0,0 +1,112 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v2beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ContainerResourceMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CrossVersionObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.CrossVersionObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ExternalMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ExternalMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscaler) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerCondition) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerList) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.MetricSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.MetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ObjectMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ObjectMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.PodsMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.PodsMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta1.ResourceMetricStatus"
+}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/operator/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
index 15003729..5ea9edb6 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta2/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.autoscaling.v2beta2
package v2beta2
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
index 74197950..87c87386 100644
--- a/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
@@ -24,843 +24,62 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-func (m *ContainerResourceMetricSource) Reset() { *m = ContainerResourceMetricSource{} }
-func (*ContainerResourceMetricSource) ProtoMessage() {}
-func (*ContainerResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{0}
-}
-func (m *ContainerResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricSource.Merge(m, src)
-}
-func (m *ContainerResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerResourceMetricSource proto.InternalMessageInfo
-
-func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-func (*ContainerResourceMetricStatus) ProtoMessage() {}
-func (*ContainerResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{1}
-}
-func (m *ContainerResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResourceMetricStatus.Merge(m, src)
-}
-func (m *ContainerResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResourceMetricStatus.DiscardUnknown(m)
-}
+func (m *ContainerResourceMetricStatus) Reset() { *m = ContainerResourceMetricStatus{} }
-var xxx_messageInfo_ContainerResourceMetricStatus proto.InternalMessageInfo
+func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} }
-func (*CrossVersionObjectReference) ProtoMessage() {}
-func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{2}
-}
-func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CrossVersionObjectReference.Merge(m, src)
-}
-func (m *CrossVersionObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *CrossVersionObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m)
-}
+func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo
+func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} }
-func (*ExternalMetricSource) ProtoMessage() {}
-func (*ExternalMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{3}
-}
-func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricSource.Merge(m, src)
-}
-func (m *ExternalMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m)
-}
+func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} }
-var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo
+func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} }
-func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} }
-func (*ExternalMetricStatus) ProtoMessage() {}
-func (*ExternalMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{4}
-}
-func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExternalMetricStatus.Merge(m, src)
-}
-func (m *ExternalMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ExternalMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} }
-func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} }
-func (*HPAScalingPolicy) ProtoMessage() {}
-func (*HPAScalingPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{5}
-}
-func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HPAScalingPolicy.Merge(m, src)
-}
-func (m *HPAScalingPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *HPAScalingPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} }
-func (*HPAScalingRules) ProtoMessage() {}
-func (*HPAScalingRules) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{6}
-}
-func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HPAScalingRules) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HPAScalingRules.Merge(m, src)
-}
-func (m *HPAScalingRules) XXX_Size() int {
- return m.Size()
-}
-func (m *HPAScalingRules) XXX_DiscardUnknown() {
- xxx_messageInfo_HPAScalingRules.DiscardUnknown(m)
-}
+func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo
+func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} }
-func (*HorizontalPodAutoscaler) ProtoMessage() {}
-func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{7}
-}
-func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src)
-}
-func (m *HorizontalPodAutoscaler) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m)
-}
+func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} }
-var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo
+func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} }
-func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {}
-func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{8}
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m)
-}
+func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo
+func (m *MetricTarget) Reset() { *m = MetricTarget{} }
-func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} }
-func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
-func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{9}
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m)
-}
+func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo
+func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} }
-func (*HorizontalPodAutoscalerList) ProtoMessage() {}
-func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{10}
-}
-func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerList) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m)
-}
+func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo
+func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} }
-func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
-func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{11}
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m)
-}
+func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo
+func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} }
-func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
-func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{12}
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src)
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo
-
-func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} }
-func (*MetricIdentifier) ProtoMessage() {}
-func (*MetricIdentifier) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{13}
-}
-func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricIdentifier) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricIdentifier.Merge(m, src)
-}
-func (m *MetricIdentifier) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricIdentifier) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricIdentifier.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo
-
-func (m *MetricSpec) Reset() { *m = MetricSpec{} }
-func (*MetricSpec) ProtoMessage() {}
-func (*MetricSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{14}
-}
-func (m *MetricSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricSpec.Merge(m, src)
-}
-func (m *MetricSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricSpec proto.InternalMessageInfo
-
-func (m *MetricStatus) Reset() { *m = MetricStatus{} }
-func (*MetricStatus) ProtoMessage() {}
-func (*MetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{15}
-}
-func (m *MetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricStatus.Merge(m, src)
-}
-func (m *MetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricStatus proto.InternalMessageInfo
-
-func (m *MetricTarget) Reset() { *m = MetricTarget{} }
-func (*MetricTarget) ProtoMessage() {}
-func (*MetricTarget) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{16}
-}
-func (m *MetricTarget) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricTarget) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricTarget.Merge(m, src)
-}
-func (m *MetricTarget) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricTarget) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricTarget.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricTarget proto.InternalMessageInfo
-
-func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} }
-func (*MetricValueStatus) ProtoMessage() {}
-func (*MetricValueStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{17}
-}
-func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MetricValueStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricValueStatus.Merge(m, src)
-}
-func (m *MetricValueStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *MetricValueStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricValueStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo
-
-func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} }
-func (*ObjectMetricSource) ProtoMessage() {}
-func (*ObjectMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{18}
-}
-func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricSource.Merge(m, src)
-}
-func (m *ObjectMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo
-
-func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} }
-func (*ObjectMetricStatus) ProtoMessage() {}
-func (*ObjectMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{19}
-}
-func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMetricStatus.Merge(m, src)
-}
-func (m *ObjectMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo
-
-func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} }
-func (*PodsMetricSource) ProtoMessage() {}
-func (*PodsMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{20}
-}
-func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricSource.Merge(m, src)
-}
-func (m *PodsMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo
-
-func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} }
-func (*PodsMetricStatus) ProtoMessage() {}
-func (*PodsMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{21}
-}
-func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodsMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodsMetricStatus.Merge(m, src)
-}
-func (m *PodsMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodsMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo
-
-func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} }
-func (*ResourceMetricSource) ProtoMessage() {}
-func (*ResourceMetricSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{22}
-}
-func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricSource.Merge(m, src)
-}
-func (m *ResourceMetricSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo
-
-func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
-func (*ResourceMetricStatus) ProtoMessage() {}
-func (*ResourceMetricStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_1076ab1fac987148, []int{23}
-}
-func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceMetricStatus.Merge(m, src)
-}
-func (m *ResourceMetricStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceMetricStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ContainerResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricSource")
- proto.RegisterType((*ContainerResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ContainerResourceMetricStatus")
- proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference")
- proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource")
- proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus")
- proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingPolicy")
- proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingRules")
- proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscaler")
- proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior")
- proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition")
- proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerList")
- proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec")
- proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus")
- proto.RegisterType((*MetricIdentifier)(nil), "k8s.io.api.autoscaling.v2beta2.MetricIdentifier")
- proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta2.MetricSpec")
- proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.MetricStatus")
- proto.RegisterType((*MetricTarget)(nil), "k8s.io.api.autoscaling.v2beta2.MetricTarget")
- proto.RegisterType((*MetricValueStatus)(nil), "k8s.io.api.autoscaling.v2beta2.MetricValueStatus")
- proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ObjectMetricSource")
- proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ObjectMetricStatus")
- proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.PodsMetricSource")
- proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.PodsMetricStatus")
- proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource")
- proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_1076ab1fac987148)
-}
-
-var fileDescriptor_1076ab1fac987148 = []byte{
- // 1727 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x1b, 0xc7,
- 0x15, 0xd7, 0x92, 0xd4, 0xd7, 0x50, 0x9f, 0xe3, 0x2f, 0x42, 0x86, 0x49, 0x61, 0x6b, 0xb4, 0xae,
- 0xd1, 0x2e, 0x2b, 0x56, 0x6d, 0x0d, 0x18, 0x45, 0xab, 0x95, 0xdb, 0xda, 0xb0, 0x64, 0xab, 0x43,
- 0x59, 0x2d, 0x02, 0xd9, 0xc8, 0x70, 0x77, 0x44, 0x4d, 0x44, 0xee, 0x12, 0xbb, 0x4b, 0xda, 0x72,
- 0x80, 0x20, 0x08, 0x90, 0x7b, 0x90, 0x20, 0xd7, 0xfc, 0x09, 0x09, 0x7c, 0x09, 0x90, 0x63, 0x3e,
- 0x60, 0x18, 0x41, 0x10, 0xf8, 0x16, 0xe7, 0x42, 0xc4, 0xcc, 0x31, 0xc7, 0xdc, 0x7c, 0x0a, 0xe6,
- 0x63, 0x3f, 0x49, 0x89, 0x94, 0x20, 0x29, 0xd0, 0x8d, 0x3b, 0xf3, 0xde, 0xef, 0xcd, 0x7b, 0xf3,
- 0x7b, 0x6f, 0xde, 0x0c, 0x81, 0xb6, 0x73, 0xcd, 0xd5, 0xa8, 0x5d, 0xc4, 0x0d, 0x5a, 0xc4, 0x4d,
- 0xcf, 0x76, 0x0d, 0x5c, 0xa3, 0x56, 0xb5, 0xd8, 0x2a, 0x55, 0x88, 0x87, 0x4b, 0xc5, 0x2a, 0xb1,
- 0x88, 0x83, 0x3d, 0x62, 0x6a, 0x0d, 0xc7, 0xf6, 0x6c, 0x98, 0x17, 0xf2, 0x1a, 0x6e, 0x50, 0x2d,
- 0x22, 0xaf, 0x49, 0xf9, 0xb9, 0x3f, 0x56, 0xa9, 0xb7, 0xdd, 0xac, 0x68, 0x86, 0x5d, 0x2f, 0x56,
- 0xed, 0xaa, 0x5d, 0xe4, 0x6a, 0x95, 0xe6, 0x16, 0xff, 0xe2, 0x1f, 0xfc, 0x97, 0x80, 0x9b, 0x53,
- 0x23, 0xe6, 0x0d, 0xdb, 0x21, 0xc5, 0xd6, 0x42, 0xd2, 0xe4, 0xdc, 0x62, 0x28, 0x53, 0xc7, 0xc6,
- 0x36, 0xb5, 0x88, 0xb3, 0x5b, 0x6c, 0xec, 0x54, 0xb9, 0x92, 0x43, 0x5c, 0xbb, 0xe9, 0x18, 0xe4,
- 0x40, 0x5a, 0x6e, 0xb1, 0x4e, 0x3c, 0xdc, 0xcb, 0x56, 0x71, 0x2f, 0x2d, 0xa7, 0x69, 0x79, 0xb4,
- 0xde, 0x6d, 0xe6, 0xaf, 0xfd, 0x14, 0x5c, 0x63, 0x9b, 0xd4, 0x71, 0x52, 0x4f, 0xfd, 0x49, 0x01,
- 0x97, 0x96, 0x6d, 0xcb, 0xc3, 0x4c, 0x03, 0x49, 0x27, 0x56, 0x89, 0xe7, 0x50, 0xa3, 0xcc, 0x7f,
- 0xc3, 0x65, 0x90, 0xb1, 0x70, 0x9d, 0xe4, 0x94, 0x79, 0xe5, 0xca, 0xb8, 0x5e, 0x7c, 0xd6, 0x2e,
- 0x0c, 0x75, 0xda, 0x85, 0xcc, 0x1d, 0x5c, 0x27, 0xaf, 0xda, 0x85, 0x42, 0x77, 0xe0, 0x34, 0x1f,
- 0x86, 0x89, 0x20, 0xae, 0x0c, 0xd7, 0xc1, 0x88, 0x87, 0x9d, 0x2a, 0xf1, 0x72, 0xa9, 0x79, 0xe5,
- 0x4a, 0xb6, 0xf4, 0x07, 0x6d, 0xff, 0xfd, 0xd3, 0xc4, 0x12, 0xd6, 0xb9, 0x8e, 0x3e, 0x25, 0x8d,
- 0x8e, 0x88, 0x6f, 0x24, 0xb1, 0x60, 0x11, 0x8c, 0x1b, 0xfe, 0xda, 0x73, 0x69, 0xbe, 0xbe, 0x59,
- 0x29, 0x3a, 0x1e, 0x3a, 0x15, 0xca, 0xa8, 0x3f, 0xef, 0xe3, 0xad, 0x87, 0xbd, 0xa6, 0x7b, 0x34,
- 0xde, 0x6e, 0x82, 0x51, 0xa3, 0xe9, 0x38, 0xc4, 0xf2, 0xdd, 0x5d, 0x18, 0xcc, 0xdd, 0x0d, 0x5c,
- 0x6b, 0x12, 0xb1, 0x10, 0x7d, 0x5a, 0x9a, 0x1e, 0x5d, 0x16, 0x48, 0xc8, 0x87, 0x3c, 0xb8, 0xd7,
- 0x1f, 0x2a, 0xe0, 0xe2, 0xb2, 0x63, 0xbb, 0xee, 0x06, 0x71, 0x5c, 0x6a, 0x5b, 0x77, 0x2b, 0x6f,
- 0x10, 0xc3, 0x43, 0x64, 0x8b, 0x38, 0xc4, 0x32, 0x08, 0x9c, 0x07, 0x99, 0x1d, 0x6a, 0x99, 0xd2,
- 0xe7, 0x09, 0xdf, 0xe7, 0xdb, 0xd4, 0x32, 0x11, 0x9f, 0x61, 0x12, 0x3c, 0x2a, 0xa9, 0xb8, 0x44,
- 0xc4, 0xe5, 0x12, 0x00, 0xb8, 0x41, 0xa5, 0x01, 0xb9, 0x2a, 0x28, 0xe5, 0xc0, 0xd2, 0xda, 0x2d,
- 0x39, 0x83, 0x22, 0x52, 0xea, 0x53, 0x05, 0x9c, 0xfd, 0xd7, 0x23, 0x8f, 0x38, 0x16, 0xae, 0xc5,
- 0x28, 0xf7, 0x7f, 0x30, 0x52, 0xe7, 0xdf, 0x7c, 0x49, 0xd9, 0xd2, 0x9f, 0x06, 0x0b, 0xdf, 0x2d,
- 0x93, 0x58, 0x1e, 0xdd, 0xa2, 0xc4, 0x09, 0x19, 0x23, 0x66, 0x90, 0xc4, 0x3b, 0x1e, 0x1e, 0xaa,
- 0xdf, 0x76, 0x3b, 0x22, 0xd8, 0x74, 0x7c, 0x8e, 0x1c, 0x2b, 0xc5, 0xd4, 0x8f, 0x15, 0x30, 0x73,
- 0x73, 0x6d, 0xa9, 0x2c, 0x20, 0xd6, 0xec, 0x1a, 0x35, 0x76, 0xe1, 0x35, 0x90, 0xf1, 0x76, 0x1b,
- 0x7e, 0x6a, 0x5c, 0xf6, 0x49, 0xb0, 0xbe, 0xdb, 0x60, 0xa9, 0x71, 0x36, 0x29, 0xcf, 0xc6, 0x11,
- 0xd7, 0x80, 0xbf, 0x01, 0xc3, 0x2d, 0x66, 0x97, 0x2f, 0x75, 0x58, 0x9f, 0x94, 0xaa, 0xc3, 0x7c,
- 0x31, 0x48, 0xcc, 0xc1, 0xeb, 0x60, 0xb2, 0x41, 0x1c, 0x6a, 0x9b, 0x65, 0x62, 0xd8, 0x96, 0xe9,
- 0x72, 0x12, 0x0d, 0xeb, 0xe7, 0xa4, 0xf0, 0xe4, 0x5a, 0x74, 0x12, 0xc5, 0x65, 0xd5, 0x8f, 0x52,
- 0x60, 0x3a, 0x5c, 0x00, 0x6a, 0xd6, 0x88, 0x0b, 0x1f, 0x80, 0x39, 0xd7, 0xc3, 0x15, 0x5a, 0xa3,
- 0x8f, 0xb1, 0x47, 0x6d, 0xeb, 0x7f, 0xd4, 0x32, 0xed, 0x87, 0x71, 0xf4, 0x7c, 0xa7, 0x5d, 0x98,
- 0x2b, 0xef, 0x29, 0x85, 0xf6, 0x41, 0x80, 0xb7, 0xc1, 0x84, 0x4b, 0x6a, 0xc4, 0xf0, 0x84, 0xbf,
- 0x32, 0x2e, 0xbf, 0xeb, 0xb4, 0x0b, 0x13, 0xe5, 0xc8, 0xf8, 0xab, 0x76, 0xe1, 0x4c, 0x2c, 0x30,
- 0x62, 0x12, 0xc5, 0x94, 0xe1, 0x03, 0x30, 0xd6, 0x60, 0xbf, 0x28, 0x71, 0x73, 0xa9, 0xf9, 0xf4,
- 0x20, 0x5c, 0x49, 0x06, 0x5c, 0x9f, 0x91, 0xa1, 0x1a, 0x5b, 0x93, 0x48, 0x28, 0xc0, 0x54, 0x3f,
- 0x4f, 0x81, 0x0b, 0x37, 0x6d, 0x87, 0x3e, 0x66, 0x55, 0xa1, 0xb6, 0x66, 0x9b, 0x4b, 0x12, 0x91,
- 0x38, 0xf0, 0x75, 0x30, 0xc6, 0xce, 0x21, 0x13, 0x7b, 0xb8, 0x07, 0x4f, 0x83, 0xe3, 0x44, 0x6b,
- 0xec, 0x54, 0xd9, 0x80, 0xab, 0x31, 0x69, 0xad, 0xb5, 0xa0, 0x89, 0x42, 0xb2, 0x4a, 0x3c, 0x1c,
- 0xe6, 0x7a, 0x38, 0x86, 0x02, 0x54, 0x78, 0x1f, 0x64, 0xdc, 0x06, 0x31, 0x24, 0x55, 0xaf, 0xf7,
- 0xf5, 0xac, 0xf7, 0x42, 0xcb, 0x0d, 0x62, 0x84, 0xc5, 0x87, 0x7d, 0x21, 0x0e, 0x0b, 0x09, 0x18,
- 0x71, 0x39, 0xa5, 0xf9, 0xae, 0x66, 0x4b, 0x7f, 0x3f, 0xac, 0x01, 0x91, 0x17, 0x41, 0xce, 0x89,
- 0x6f, 0x24, 0xc1, 0xd5, 0xef, 0x14, 0x50, 0xd8, 0x43, 0x53, 0x27, 0xdb, 0xb8, 0x45, 0x6d, 0x07,
- 0x6e, 0x80, 0x51, 0x3e, 0x72, 0xaf, 0x21, 0x43, 0x59, 0x1c, 0x7c, 0x1b, 0x39, 0x6d, 0xf5, 0x2c,
- 0xcb, 0xc8, 0xb2, 0xc0, 0x40, 0x3e, 0x18, 0xdc, 0x04, 0xe3, 0xfc, 0xe7, 0x0d, 0xfb, 0xa1, 0x25,
- 0xc3, 0x78, 0x60, 0xe4, 0x49, 0x76, 0x42, 0x94, 0x7d, 0x14, 0x14, 0x02, 0xaa, 0xef, 0xa6, 0xc1,
- 0xfc, 0x1e, 0x9e, 0x2d, 0xdb, 0x96, 0x49, 0x19, 0xf9, 0xe1, 0xcd, 0x58, 0xfe, 0x2f, 0x26, 0xf2,
- 0xff, 0x72, 0x3f, 0xfd, 0x48, 0x3d, 0x58, 0x09, 0xf6, 0x2b, 0x15, 0xc3, 0x92, 0x01, 0x7f, 0xd5,
- 0x2e, 0xf4, 0xe8, 0xc7, 0xb4, 0x00, 0x29, 0xbe, 0x2d, 0xb0, 0x05, 0x60, 0x0d, 0xbb, 0xde, 0xba,
- 0x83, 0x2d, 0x57, 0x58, 0xa2, 0x75, 0x22, 0x99, 0x70, 0x75, 0x30, 0x22, 0x33, 0x0d, 0x7d, 0x4e,
- 0xae, 0x02, 0xae, 0x74, 0xa1, 0xa1, 0x1e, 0x16, 0xe0, 0x6f, 0xc1, 0x88, 0x43, 0xb0, 0x6b, 0x5b,
- 0xb9, 0x0c, 0xf7, 0x22, 0xa0, 0x0d, 0xe2, 0xa3, 0x48, 0xce, 0xc2, 0xdf, 0x83, 0xd1, 0x3a, 0x71,
- 0x5d, 0x5c, 0x25, 0xb9, 0x61, 0x2e, 0x18, 0xd4, 0xdd, 0x55, 0x31, 0x8c, 0xfc, 0x79, 0xf5, 0x7b,
- 0x05, 0x5c, 0xdc, 0x23, 0x8e, 0x2b, 0xd4, 0xf5, 0xe0, 0x66, 0x57, 0xa6, 0x6a, 0x83, 0x39, 0xc8,
- 0xb4, 0x79, 0x9e, 0x06, 0x35, 0xc2, 0x1f, 0x89, 0x64, 0xe9, 0x26, 0x18, 0xa6, 0x1e, 0xa9, 0xfb,
- 0x05, 0xe8, 0x6f, 0x87, 0xcc, 0xa2, 0xb0, 0xbe, 0xdf, 0x62, 0x68, 0x48, 0x80, 0xaa, 0x4f, 0xd3,
- 0x7b, 0xfa, 0xc6, 0x52, 0x19, 0xbe, 0x09, 0xa6, 0xf8, 0x97, 0x3c, 0x5b, 0xc9, 0x96, 0xf4, 0xb0,
- 0x6f, 0xb5, 0xd8, 0xa7, 0xb5, 0xd1, 0xcf, 0xcb, 0xa5, 0x4c, 0x95, 0x63, 0xd0, 0x28, 0x61, 0x0a,
- 0x2e, 0x80, 0x6c, 0x9d, 0x5a, 0x88, 0x34, 0x6a, 0xd4, 0xc0, 0xae, 0x3c, 0xa7, 0xa6, 0x3b, 0xed,
- 0x42, 0x76, 0x35, 0x1c, 0x46, 0x51, 0x19, 0xf8, 0x17, 0x90, 0xad, 0xe3, 0x47, 0x81, 0x8a, 0x38,
- 0x4f, 0xce, 0x48, 0x7b, 0xd9, 0xd5, 0x70, 0x0a, 0x45, 0xe5, 0xe0, 0x3d, 0xc6, 0x06, 0x76, 0x12,
- 0xbb, 0xb9, 0x0c, 0x0f, 0xf3, 0xd5, 0xc1, 0x0e, 0x6e, 0x5e, 0xfc, 0x22, 0xcc, 0xe1, 0x10, 0xc8,
- 0xc7, 0x82, 0x14, 0x8c, 0x55, 0x64, 0x0d, 0xe2, 0x2c, 0xcb, 0x96, 0xfe, 0x71, 0xd8, 0xed, 0x93,
- 0x30, 0xfa, 0x04, 0xa3, 0x89, 0xff, 0x85, 0x02, 0x78, 0xf5, 0xd3, 0x0c, 0xb8, 0xb4, 0x6f, 0x01,
- 0x85, 0xff, 0x06, 0xd0, 0xae, 0xb8, 0xc4, 0x69, 0x11, 0xf3, 0x3f, 0xe2, 0xbe, 0xc1, 0x9a, 0x42,
- 0xb6, 0x9d, 0x69, 0xfd, 0x3c, 0xcb, 0xb0, 0xbb, 0x5d, 0xb3, 0xa8, 0x87, 0x06, 0x34, 0xc0, 0x24,
- 0xcb, 0x3b, 0xb1, 0x77, 0x54, 0xf6, 0x9f, 0x07, 0x4b, 0xea, 0x59, 0xd6, 0x3a, 0xac, 0x44, 0x41,
- 0x50, 0x1c, 0x13, 0x2e, 0x81, 0x69, 0xd9, 0xf6, 0x24, 0xf6, 0xf2, 0x82, 0x0c, 0xf6, 0xf4, 0x72,
- 0x7c, 0x1a, 0x25, 0xe5, 0x19, 0x84, 0x49, 0x5c, 0xea, 0x10, 0x33, 0x80, 0xc8, 0xc4, 0x21, 0x6e,
- 0xc4, 0xa7, 0x51, 0x52, 0x1e, 0xd6, 0xc0, 0x94, 0x44, 0x95, 0x5b, 0x9b, 0x1b, 0xe6, 0xec, 0x18,
- 0xb0, 0x41, 0x95, 0x27, 0x57, 0x40, 0xf7, 0xe5, 0x18, 0x16, 0x4a, 0x60, 0x43, 0x0f, 0x00, 0xc3,
- 0xaf, 0xa6, 0x6e, 0x6e, 0x84, 0x5b, 0xfa, 0xe7, 0x21, 0xf9, 0x12, 0x94, 0xe5, 0xb0, 0x07, 0x08,
- 0x86, 0x5c, 0x14, 0xb1, 0xa3, 0x7e, 0xa0, 0x80, 0x99, 0x64, 0x83, 0x1b, 0x5c, 0x2d, 0x94, 0x3d,
- 0xaf, 0x16, 0xf7, 0xc1, 0x98, 0x68, 0x95, 0x6c, 0x47, 0x12, 0xe0, 0xcf, 0x03, 0x16, 0x3d, 0x5c,
- 0x21, 0xb5, 0xb2, 0x54, 0x15, 0x74, 0xf6, 0xbf, 0x50, 0x00, 0xa9, 0x7e, 0x92, 0x01, 0x20, 0x4c,
- 0x31, 0xb8, 0x18, 0x3b, 0xe5, 0xe6, 0x13, 0xa7, 0xdc, 0x4c, 0xf4, 0x9e, 0x12, 0x39, 0xd1, 0x36,
- 0xc0, 0x88, 0xcd, 0x4b, 0x8f, 0x5c, 0x61, 0xa9, 0x5f, 0x30, 0x83, 0x36, 0x29, 0x40, 0xd3, 0x01,
- 0x3b, 0x3b, 0x64, 0x01, 0x93, 0x68, 0xf0, 0x0e, 0xc8, 0x34, 0x6c, 0xd3, 0xef, 0x6b, 0xfa, 0xb6,
- 0x84, 0x6b, 0xb6, 0xe9, 0xc6, 0x30, 0xc7, 0xd8, 0xda, 0xd9, 0x28, 0xe2, 0x38, 0xac, 0xcd, 0xf4,
- 0x5f, 0x2a, 0x38, 0x45, 0xb3, 0xa5, 0xc5, 0x7e, 0x98, 0xbd, 0x1e, 0x05, 0x44, 0x30, 0xfd, 0x19,
- 0x14, 0x60, 0xc2, 0x77, 0x14, 0x30, 0x6b, 0x24, 0x2f, 0xd8, 0xb9, 0xd1, 0xc1, 0xba, 0xb2, 0x7d,
- 0xdf, 0x21, 0xf4, 0x73, 0x9d, 0x76, 0x61, 0xb6, 0x4b, 0x04, 0x75, 0x9b, 0x63, 0x4e, 0x12, 0x79,
- 0x1b, 0x93, 0xb5, 0xb0, 0xaf, 0x93, 0xbd, 0xae, 0xa1, 0xc2, 0x49, 0x7f, 0x06, 0x05, 0x98, 0xea,
- 0x93, 0x0c, 0x98, 0x88, 0x5d, 0xf3, 0x7e, 0x0d, 0xce, 0x88, 0x84, 0x3f, 0x5a, 0xce, 0x08, 0xcc,
- 0xa3, 0xe7, 0x8c, 0xc0, 0x3d, 0x51, 0xce, 0x08, 0x93, 0x27, 0xc9, 0x99, 0x88, 0x93, 0x3d, 0x38,
- 0xf3, 0x65, 0xca, 0xe7, 0x8c, 0x68, 0x3a, 0x06, 0xe3, 0x8c, 0x90, 0x8d, 0x70, 0xe6, 0x6e, 0xf4,
- 0x26, 0xdd, 0xa7, 0xfb, 0xd3, 0xfc, 0x08, 0x6b, 0xff, 0x6d, 0x62, 0xcb, 0xa3, 0xde, 0xae, 0x3e,
- 0xde, 0x75, 0xeb, 0x36, 0xc1, 0x04, 0x6e, 0x11, 0x07, 0x57, 0x09, 0x1f, 0x96, 0xa4, 0x39, 0x28,
- 0xee, 0x0c, 0xbb, 0xf4, 0x2e, 0x45, 0x70, 0x50, 0x0c, 0x95, 0x35, 0x04, 0xf2, 0xfb, 0x9e, 0x17,
- 0xdc, 0xa6, 0xe5, 0x19, 0xc9, 0x1b, 0x82, 0xa5, 0xae, 0x59, 0xd4, 0x43, 0x43, 0x7d, 0x3f, 0x05,
- 0x66, 0xbb, 0xde, 0x31, 0xc2, 0xa0, 0x28, 0xc7, 0x14, 0x94, 0xd4, 0x09, 0x06, 0x25, 0x7d, 0xe0,
- 0xa0, 0x7c, 0x95, 0x02, 0xb0, 0xfb, 0x38, 0x81, 0x6f, 0xf1, 0xa6, 0xc4, 0x70, 0x68, 0x85, 0x98,
- 0x62, 0xfa, 0x28, 0x1a, 0xea, 0x68, 0x47, 0x13, 0xc5, 0x46, 0x49, 0x63, 0xc7, 0xf4, 0xe4, 0x1b,
- 0xbe, 0xa8, 0xa5, 0x8f, 0xf6, 0x45, 0x4d, 0xfd, 0x26, 0x19, 0xc6, 0x53, 0xfd, 0x84, 0xd7, 0x6b,
- 0xfb, 0xd3, 0x27, 0xb8, 0xfd, 0xea, 0x17, 0x0a, 0x98, 0x49, 0xb6, 0x23, 0xa7, 0xee, 0x61, 0xf7,
- 0xeb, 0xb8, 0x13, 0xa7, 0xfb, 0x51, 0xf7, 0x89, 0x02, 0xce, 0x9e, 0xb2, 0x7f, 0x78, 0xd4, 0xcf,
- 0xba, 0xd7, 0x7c, 0x5a, 0xfe, 0xa7, 0xd1, 0x6f, 0x3c, 0x7b, 0x99, 0x1f, 0x7a, 0xfe, 0x32, 0x3f,
- 0xf4, 0xe2, 0x65, 0x7e, 0xe8, 0xed, 0x4e, 0x5e, 0x79, 0xd6, 0xc9, 0x2b, 0xcf, 0x3b, 0x79, 0xe5,
- 0x45, 0x27, 0xaf, 0xfc, 0xd0, 0xc9, 0x2b, 0xef, 0xfd, 0x98, 0x1f, 0x7a, 0x2d, 0xbf, 0xff, 0x1f,
- 0x9f, 0xbf, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x27, 0xde, 0xc0, 0x19, 0x1d, 0x00, 0x00,
-}
+func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} }
func (m *ContainerResourceMetricSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.protomessage.pb.go
new file mode 100644
index 00000000..6064b1eb
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.protomessage.pb.go
@@ -0,0 +1,70 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v2beta2
+
+func (*ContainerResourceMetricSource) ProtoMessage() {}
+
+func (*ContainerResourceMetricStatus) ProtoMessage() {}
+
+func (*CrossVersionObjectReference) ProtoMessage() {}
+
+func (*ExternalMetricSource) ProtoMessage() {}
+
+func (*ExternalMetricStatus) ProtoMessage() {}
+
+func (*HPAScalingPolicy) ProtoMessage() {}
+
+func (*HPAScalingRules) ProtoMessage() {}
+
+func (*HorizontalPodAutoscaler) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerList) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
+
+func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
+
+func (*MetricIdentifier) ProtoMessage() {}
+
+func (*MetricSpec) ProtoMessage() {}
+
+func (*MetricStatus) ProtoMessage() {}
+
+func (*MetricTarget) ProtoMessage() {}
+
+func (*MetricValueStatus) ProtoMessage() {}
+
+func (*ObjectMetricSource) ProtoMessage() {}
+
+func (*ObjectMetricStatus) ProtoMessage() {}
+
+func (*PodsMetricSource) ProtoMessage() {}
+
+func (*PodsMetricStatus) ProtoMessage() {}
+
+func (*ResourceMetricSource) ProtoMessage() {}
+
+func (*ResourceMetricStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.model_name.go
new file mode 100644
index 00000000..20e8a854
--- /dev/null
+++ b/operator/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.model_name.go
@@ -0,0 +1,142 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v2beta2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ContainerResourceMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CrossVersionObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.CrossVersionObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ExternalMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExternalMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ExternalMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HPAScalingPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HPAScalingPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HPAScalingRules) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HPAScalingRules"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscaler) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerBehavior) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerCondition) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerList) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HorizontalPodAutoscalerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricIdentifier) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.MetricIdentifier"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricSpec) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.MetricSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.MetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricTarget) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.MetricTarget"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MetricValueStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.MetricValueStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ObjectMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ObjectMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.PodsMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodsMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.PodsMetricStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricSource) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ResourceMetricSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceMetricStatus) OpenAPIModelName() string {
+ return "io.k8s.api.autoscaling.v2beta2.ResourceMetricStatus"
+}
diff --git a/operator/vendor/k8s.io/api/batch/v1/doc.go b/operator/vendor/k8s.io/api/batch/v1/doc.go
index 69088e2c..1525ce2a 100644
--- a/operator/vendor/k8s.io/api/batch/v1/doc.go
+++ b/operator/vendor/k8s.io/api/batch/v1/doc.go
@@ -18,4 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.batch.v1
+
package v1
diff --git a/operator/vendor/k8s.io/api/batch/v1/generated.pb.go b/operator/vendor/k8s.io/api/batch/v1/generated.pb.go
index 6108a608..099350e7 100644
--- a/operator/vendor/k8s.io/api/batch/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/batch/v1/generated.pb.go
@@ -24,12 +24,10 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -37,642 +35,43 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CronJob) Reset() { *m = CronJob{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *CronJobList) Reset() { *m = CronJobList{} }
-func (m *CronJob) Reset() { *m = CronJob{} }
-func (*CronJob) ProtoMessage() {}
-func (*CronJob) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{0}
-}
-func (m *CronJob) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJob) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJob.Merge(m, src)
-}
-func (m *CronJob) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJob) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJob.DiscardUnknown(m)
-}
+func (m *CronJobSpec) Reset() { *m = CronJobSpec{} }
-var xxx_messageInfo_CronJob proto.InternalMessageInfo
+func (m *CronJobStatus) Reset() { *m = CronJobStatus{} }
-func (m *CronJobList) Reset() { *m = CronJobList{} }
-func (*CronJobList) ProtoMessage() {}
-func (*CronJobList) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{1}
-}
-func (m *CronJobList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJobList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJobList.Merge(m, src)
-}
-func (m *CronJobList) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJobList) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJobList.DiscardUnknown(m)
-}
+func (m *Job) Reset() { *m = Job{} }
-var xxx_messageInfo_CronJobList proto.InternalMessageInfo
-
-func (m *CronJobSpec) Reset() { *m = CronJobSpec{} }
-func (*CronJobSpec) ProtoMessage() {}
-func (*CronJobSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{2}
-}
-func (m *CronJobSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJobSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJobSpec.Merge(m, src)
-}
-func (m *CronJobSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJobSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJobSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo
-
-func (m *CronJobStatus) Reset() { *m = CronJobStatus{} }
-func (*CronJobStatus) ProtoMessage() {}
-func (*CronJobStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{3}
-}
-func (m *CronJobStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJobStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJobStatus.Merge(m, src)
-}
-func (m *CronJobStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJobStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJobStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo
-
-func (m *Job) Reset() { *m = Job{} }
-func (*Job) ProtoMessage() {}
-func (*Job) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{4}
-}
-func (m *Job) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Job) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Job.Merge(m, src)
-}
-func (m *Job) XXX_Size() int {
- return m.Size()
-}
-func (m *Job) XXX_DiscardUnknown() {
- xxx_messageInfo_Job.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Job proto.InternalMessageInfo
-
-func (m *JobCondition) Reset() { *m = JobCondition{} }
-func (*JobCondition) ProtoMessage() {}
-func (*JobCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{5}
-}
-func (m *JobCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JobCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JobCondition.Merge(m, src)
-}
-func (m *JobCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *JobCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_JobCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_JobCondition proto.InternalMessageInfo
-
-func (m *JobList) Reset() { *m = JobList{} }
-func (*JobList) ProtoMessage() {}
-func (*JobList) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{6}
-}
-func (m *JobList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JobList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JobList.Merge(m, src)
-}
-func (m *JobList) XXX_Size() int {
- return m.Size()
-}
-func (m *JobList) XXX_DiscardUnknown() {
- xxx_messageInfo_JobList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_JobList proto.InternalMessageInfo
-
-func (m *JobSpec) Reset() { *m = JobSpec{} }
-func (*JobSpec) ProtoMessage() {}
-func (*JobSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{7}
-}
-func (m *JobSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JobSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JobSpec.Merge(m, src)
-}
-func (m *JobSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *JobSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_JobSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_JobSpec proto.InternalMessageInfo
-
-func (m *JobStatus) Reset() { *m = JobStatus{} }
-func (*JobStatus) ProtoMessage() {}
-func (*JobStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{8}
-}
-func (m *JobStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JobStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JobStatus.Merge(m, src)
-}
-func (m *JobStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *JobStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_JobStatus.DiscardUnknown(m)
-}
+func (m *JobCondition) Reset() { *m = JobCondition{} }
-var xxx_messageInfo_JobStatus proto.InternalMessageInfo
+func (m *JobList) Reset() { *m = JobList{} }
-func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} }
-func (*JobTemplateSpec) ProtoMessage() {}
-func (*JobTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{9}
-}
-func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JobTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JobTemplateSpec.Merge(m, src)
-}
-func (m *JobTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *JobTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m)
-}
+func (m *JobSpec) Reset() { *m = JobSpec{} }
-var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo
+func (m *JobStatus) Reset() { *m = JobStatus{} }
-func (m *PodFailurePolicy) Reset() { *m = PodFailurePolicy{} }
-func (*PodFailurePolicy) ProtoMessage() {}
-func (*PodFailurePolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{10}
-}
-func (m *PodFailurePolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodFailurePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodFailurePolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodFailurePolicy.Merge(m, src)
-}
-func (m *PodFailurePolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *PodFailurePolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_PodFailurePolicy.DiscardUnknown(m)
-}
+func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} }
-var xxx_messageInfo_PodFailurePolicy proto.InternalMessageInfo
+func (m *PodFailurePolicy) Reset() { *m = PodFailurePolicy{} }
func (m *PodFailurePolicyOnExitCodesRequirement) Reset() {
*m = PodFailurePolicyOnExitCodesRequirement{}
}
-func (*PodFailurePolicyOnExitCodesRequirement) ProtoMessage() {}
-func (*PodFailurePolicyOnExitCodesRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{11}
-}
-func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement.Merge(m, src)
-}
-func (m *PodFailurePolicyOnExitCodesRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *PodFailurePolicyOnExitCodesRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodFailurePolicyOnExitCodesRequirement proto.InternalMessageInfo
func (m *PodFailurePolicyOnPodConditionsPattern) Reset() {
*m = PodFailurePolicyOnPodConditionsPattern{}
}
-func (*PodFailurePolicyOnPodConditionsPattern) ProtoMessage() {}
-func (*PodFailurePolicyOnPodConditionsPattern) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{12}
-}
-func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern.Merge(m, src)
-}
-func (m *PodFailurePolicyOnPodConditionsPattern) XXX_Size() int {
- return m.Size()
-}
-func (m *PodFailurePolicyOnPodConditionsPattern) XXX_DiscardUnknown() {
- xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern.DiscardUnknown(m)
-}
-var xxx_messageInfo_PodFailurePolicyOnPodConditionsPattern proto.InternalMessageInfo
+func (m *PodFailurePolicyRule) Reset() { *m = PodFailurePolicyRule{} }
-func (m *PodFailurePolicyRule) Reset() { *m = PodFailurePolicyRule{} }
-func (*PodFailurePolicyRule) ProtoMessage() {}
-func (*PodFailurePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{13}
-}
-func (m *PodFailurePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodFailurePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodFailurePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodFailurePolicyRule.Merge(m, src)
-}
-func (m *PodFailurePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *PodFailurePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_PodFailurePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodFailurePolicyRule proto.InternalMessageInfo
-
-func (m *SuccessPolicy) Reset() { *m = SuccessPolicy{} }
-func (*SuccessPolicy) ProtoMessage() {}
-func (*SuccessPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{14}
-}
-func (m *SuccessPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SuccessPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SuccessPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SuccessPolicy.Merge(m, src)
-}
-func (m *SuccessPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *SuccessPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_SuccessPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SuccessPolicy proto.InternalMessageInfo
-
-func (m *SuccessPolicyRule) Reset() { *m = SuccessPolicyRule{} }
-func (*SuccessPolicyRule) ProtoMessage() {}
-func (*SuccessPolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{15}
-}
-func (m *SuccessPolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SuccessPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SuccessPolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SuccessPolicyRule.Merge(m, src)
-}
-func (m *SuccessPolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *SuccessPolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_SuccessPolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SuccessPolicyRule proto.InternalMessageInfo
-
-func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} }
-func (*UncountedTerminatedPods) ProtoMessage() {}
-func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) {
- return fileDescriptor_79228dc2c4001a22, []int{16}
-}
-func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UncountedTerminatedPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UncountedTerminatedPods) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UncountedTerminatedPods.Merge(m, src)
-}
-func (m *UncountedTerminatedPods) XXX_Size() int {
- return m.Size()
-}
-func (m *UncountedTerminatedPods) XXX_DiscardUnknown() {
- xxx_messageInfo_UncountedTerminatedPods.DiscardUnknown(m)
-}
+func (m *SuccessPolicy) Reset() { *m = SuccessPolicy{} }
-var xxx_messageInfo_UncountedTerminatedPods proto.InternalMessageInfo
+func (m *SuccessPolicyRule) Reset() { *m = SuccessPolicyRule{} }
-func init() {
- proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1.CronJob")
- proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1.CronJobList")
- proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1.CronJobSpec")
- proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1.CronJobStatus")
- proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job")
- proto.RegisterType((*JobCondition)(nil), "k8s.io.api.batch.v1.JobCondition")
- proto.RegisterType((*JobList)(nil), "k8s.io.api.batch.v1.JobList")
- proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec")
- proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus")
- proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1.JobTemplateSpec")
- proto.RegisterType((*PodFailurePolicy)(nil), "k8s.io.api.batch.v1.PodFailurePolicy")
- proto.RegisterType((*PodFailurePolicyOnExitCodesRequirement)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement")
- proto.RegisterType((*PodFailurePolicyOnPodConditionsPattern)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern")
- proto.RegisterType((*PodFailurePolicyRule)(nil), "k8s.io.api.batch.v1.PodFailurePolicyRule")
- proto.RegisterType((*SuccessPolicy)(nil), "k8s.io.api.batch.v1.SuccessPolicy")
- proto.RegisterType((*SuccessPolicyRule)(nil), "k8s.io.api.batch.v1.SuccessPolicyRule")
- proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/batch/v1/generated.proto", fileDescriptor_79228dc2c4001a22)
-}
-
-var fileDescriptor_79228dc2c4001a22 = []byte{
- // 1882 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0xdb, 0xc8,
- 0x15, 0x37, 0x6d, 0xcb, 0x96, 0x46, 0xfe, 0x90, 0x27, 0x4e, 0xa2, 0xba, 0x0b, 0xd1, 0xab, 0xec,
- 0x06, 0xde, 0x76, 0x2b, 0x6d, 0xbc, 0x41, 0xb7, 0x1f, 0x68, 0xb1, 0xa1, 0xd2, 0x6c, 0xe3, 0x95,
- 0x37, 0xea, 0xc8, 0x69, 0x81, 0xdd, 0xb4, 0xe8, 0x88, 0x1c, 0xc9, 0xdc, 0x50, 0x1c, 0x96, 0x1c,
- 0x1a, 0xf1, 0xa5, 0x28, 0xd0, 0x7f, 0xa0, 0x3d, 0xf6, 0x1f, 0xe8, 0xb1, 0x97, 0xf6, 0xdc, 0xde,
- 0x8a, 0x1c, 0x17, 0x3d, 0x2d, 0x7a, 0x20, 0x1a, 0xf6, 0x0f, 0xe8, 0xdd, 0x45, 0x81, 0x62, 0x86,
- 0xc3, 0x4f, 0x91, 0x5e, 0x67, 0x81, 0x06, 0xbd, 0x89, 0xef, 0xfd, 0xde, 0x6f, 0x1e, 0xe7, 0x7d,
- 0x52, 0xe0, 0xd6, 0xd3, 0x6f, 0x79, 0x3d, 0x93, 0xf6, 0xb1, 0x63, 0xf6, 0x27, 0x98, 0xe9, 0xa7,
- 0xfd, 0xb3, 0x3b, 0xfd, 0x19, 0xb1, 0x89, 0x8b, 0x19, 0x31, 0x7a, 0x8e, 0x4b, 0x19, 0x85, 0xd7,
- 0x22, 0x50, 0x0f, 0x3b, 0x66, 0x4f, 0x80, 0x7a, 0x67, 0x77, 0xf6, 0xbe, 0x31, 0x33, 0xd9, 0xa9,
- 0x3f, 0xe9, 0xe9, 0x74, 0xde, 0x9f, 0xd1, 0x19, 0xed, 0x0b, 0xec, 0xc4, 0x9f, 0x8a, 0x27, 0xf1,
- 0x20, 0x7e, 0x45, 0x1c, 0x7b, 0xdd, 0xcc, 0x41, 0x3a, 0x75, 0x49, 0xc9, 0x39, 0x7b, 0x77, 0x53,
- 0xcc, 0x1c, 0xeb, 0xa7, 0xa6, 0x4d, 0xdc, 0xf3, 0xbe, 0xf3, 0x74, 0xc6, 0x05, 0x5e, 0x7f, 0x4e,
- 0x18, 0x2e, 0xb3, 0xea, 0x57, 0x59, 0xb9, 0xbe, 0xcd, 0xcc, 0x39, 0x59, 0x30, 0xf8, 0xe6, 0x17,
- 0x19, 0x78, 0xfa, 0x29, 0x99, 0xe3, 0xa2, 0x5d, 0xf7, 0xdf, 0x0a, 0x58, 0x1f, 0xb8, 0xd4, 0x3e,
- 0xa2, 0x13, 0xf8, 0x73, 0x50, 0xe7, 0xfe, 0x18, 0x98, 0xe1, 0xb6, 0xb2, 0xaf, 0x1c, 0x34, 0x0f,
- 0xdf, 0xe9, 0xa5, 0xb7, 0x94, 0xd0, 0xf6, 0x9c, 0xa7, 0x33, 0x2e, 0xf0, 0x7a, 0x1c, 0xdd, 0x3b,
- 0xbb, 0xd3, 0x7b, 0x34, 0xf9, 0x94, 0xe8, 0xec, 0x98, 0x30, 0xac, 0xc1, 0xe7, 0x81, 0xba, 0x14,
- 0x06, 0x2a, 0x48, 0x65, 0x28, 0x61, 0x85, 0x1a, 0x58, 0xf5, 0x1c, 0xa2, 0xb7, 0x97, 0x05, 0xfb,
- 0x7e, 0xaf, 0x24, 0x06, 0x3d, 0xe9, 0xcd, 0xd8, 0x21, 0xba, 0xb6, 0x21, 0xd9, 0x56, 0xf9, 0x13,
- 0x12, 0xb6, 0xf0, 0x08, 0xac, 0x79, 0x0c, 0x33, 0xdf, 0x6b, 0xaf, 0x08, 0x96, 0xee, 0xa5, 0x2c,
- 0x02, 0xa9, 0x6d, 0x49, 0x9e, 0xb5, 0xe8, 0x19, 0x49, 0x86, 0xee, 0x1f, 0x14, 0xd0, 0x94, 0xc8,
- 0xa1, 0xe9, 0x31, 0xf8, 0x64, 0xe1, 0x06, 0x7a, 0x57, 0xbb, 0x01, 0x6e, 0x2d, 0xde, 0xbf, 0x25,
- 0x4f, 0xaa, 0xc7, 0x92, 0xcc, 0xdb, 0xdf, 0x03, 0x35, 0x93, 0x91, 0xb9, 0xd7, 0x5e, 0xde, 0x5f,
- 0x39, 0x68, 0x1e, 0xbe, 0x76, 0x99, 0xe3, 0xda, 0xa6, 0x24, 0xaa, 0x3d, 0xe4, 0x26, 0x28, 0xb2,
- 0xec, 0xfe, 0x6d, 0x35, 0x71, 0x98, 0x5f, 0x09, 0x7c, 0x1b, 0xd4, 0x79, 0x60, 0x0d, 0xdf, 0x22,
- 0xc2, 0xe1, 0x46, 0xea, 0xc0, 0x58, 0xca, 0x51, 0x82, 0x80, 0x07, 0xa0, 0xce, 0x73, 0xe1, 0x63,
- 0x6a, 0x93, 0x76, 0x5d, 0xa0, 0x37, 0x38, 0xf2, 0x44, 0xca, 0x50, 0xa2, 0x85, 0x8f, 0xc1, 0x4d,
- 0x8f, 0x61, 0x97, 0x99, 0xf6, 0xec, 0x3e, 0xc1, 0x86, 0x65, 0xda, 0x64, 0x4c, 0x74, 0x6a, 0x1b,
- 0x9e, 0x88, 0xdd, 0x8a, 0xf6, 0xd5, 0x30, 0x50, 0x6f, 0x8e, 0xcb, 0x21, 0xa8, 0xca, 0x16, 0x3e,
- 0x01, 0x3b, 0x3a, 0xb5, 0x75, 0xdf, 0x75, 0x89, 0xad, 0x9f, 0x8f, 0xa8, 0x65, 0xea, 0xe7, 0x22,
- 0x8c, 0x0d, 0xad, 0x27, 0xfd, 0xde, 0x19, 0x14, 0x01, 0x17, 0x65, 0x42, 0xb4, 0x48, 0x04, 0xdf,
- 0x04, 0xeb, 0x9e, 0xef, 0x39, 0xc4, 0x36, 0xda, 0xab, 0xfb, 0xca, 0x41, 0x5d, 0x6b, 0x86, 0x81,
- 0xba, 0x3e, 0x8e, 0x44, 0x28, 0xd6, 0xc1, 0x4f, 0x40, 0xf3, 0x53, 0x3a, 0x39, 0x21, 0x73, 0xc7,
- 0xc2, 0x8c, 0xb4, 0x6b, 0x22, 0xce, 0x6f, 0x94, 0x06, 0xe3, 0x28, 0xc5, 0x89, 0x7c, 0xbc, 0x26,
- 0x9d, 0x6c, 0x66, 0x14, 0x28, 0xcb, 0x06, 0x7f, 0x06, 0xf6, 0x3c, 0x5f, 0xd7, 0x89, 0xe7, 0x4d,
- 0x7d, 0xeb, 0x88, 0x4e, 0xbc, 0x1f, 0x9a, 0x1e, 0xa3, 0xee, 0xf9, 0xd0, 0x9c, 0x9b, 0xac, 0xbd,
- 0xb6, 0xaf, 0x1c, 0xd4, 0xb4, 0x4e, 0x18, 0xa8, 0x7b, 0xe3, 0x4a, 0x14, 0xba, 0x84, 0x01, 0x22,
- 0x70, 0x63, 0x8a, 0x4d, 0x8b, 0x18, 0x0b, 0xdc, 0xeb, 0x82, 0x7b, 0x2f, 0x0c, 0xd4, 0x1b, 0x0f,
- 0x4a, 0x11, 0xa8, 0xc2, 0xb2, 0xfb, 0xe7, 0x65, 0xb0, 0x99, 0xab, 0x17, 0xf8, 0x21, 0x58, 0xc3,
- 0x3a, 0x33, 0xcf, 0x78, 0x52, 0xf1, 0x54, 0xbd, 0x95, 0xbd, 0x1d, 0xde, 0xe9, 0xd2, 0xaa, 0x47,
- 0x64, 0x4a, 0x78, 0x10, 0x48, 0x5a, 0x64, 0xf7, 0x84, 0x29, 0x92, 0x14, 0xd0, 0x02, 0x2d, 0x0b,
- 0x7b, 0x2c, 0xce, 0x47, 0x9e, 0x6d, 0x22, 0x3e, 0xcd, 0xc3, 0xaf, 0x5d, 0xad, 0xb8, 0xb8, 0x85,
- 0xb6, 0x1b, 0x06, 0x6a, 0x6b, 0x58, 0xe0, 0x41, 0x0b, 0xcc, 0xd0, 0x05, 0x50, 0xc8, 0x92, 0x2b,
- 0x14, 0xe7, 0xd5, 0x5e, 0xfa, 0xbc, 0x1b, 0x61, 0xa0, 0xc2, 0xe1, 0x02, 0x13, 0x2a, 0x61, 0xef,
- 0xfe, 0x4b, 0x01, 0x2b, 0xaf, 0xa6, 0x81, 0x7e, 0x3f, 0xd7, 0x40, 0x5f, 0xab, 0x4a, 0xda, 0xca,
- 0xe6, 0xf9, 0xa0, 0xd0, 0x3c, 0x3b, 0x95, 0x0c, 0x97, 0x37, 0xce, 0xbf, 0xae, 0x80, 0x8d, 0x23,
- 0x3a, 0x19, 0x50, 0xdb, 0x30, 0x99, 0x49, 0x6d, 0x78, 0x17, 0xac, 0xb2, 0x73, 0x27, 0x6e, 0x42,
- 0xfb, 0xf1, 0xd1, 0x27, 0xe7, 0x0e, 0xb9, 0x08, 0xd4, 0x56, 0x16, 0xcb, 0x65, 0x48, 0xa0, 0xe1,
- 0x30, 0x71, 0x67, 0x59, 0xd8, 0xdd, 0xcd, 0x1f, 0x77, 0x11, 0xa8, 0x25, 0x23, 0xb6, 0x97, 0x30,
- 0xe5, 0x9d, 0x82, 0x33, 0xb0, 0xc9, 0x83, 0x33, 0x72, 0xe9, 0x24, 0xca, 0xb2, 0x95, 0x97, 0x8e,
- 0xfa, 0x75, 0xe9, 0xc0, 0xe6, 0x30, 0x4b, 0x84, 0xf2, 0xbc, 0xf0, 0x2c, 0xca, 0xb1, 0x13, 0x17,
- 0xdb, 0x5e, 0xf4, 0x4a, 0x5f, 0x2e, 0xa7, 0xf7, 0xe4, 0x69, 0x22, 0xcf, 0xf2, 0x6c, 0xa8, 0xe4,
- 0x04, 0x78, 0x1b, 0xac, 0xb9, 0x04, 0x7b, 0xd4, 0x16, 0xf9, 0xdc, 0x48, 0xa3, 0x83, 0x84, 0x14,
- 0x49, 0x2d, 0x7c, 0x0b, 0xac, 0xcf, 0x89, 0xe7, 0xe1, 0x19, 0x11, 0x1d, 0xa7, 0xa1, 0x6d, 0x4b,
- 0xe0, 0xfa, 0x71, 0x24, 0x46, 0xb1, 0xbe, 0xfb, 0x7b, 0x05, 0xac, 0xbf, 0x9a, 0xe9, 0xf7, 0xbd,
- 0xfc, 0xf4, 0x6b, 0x57, 0x65, 0x5e, 0xc5, 0xe4, 0xfb, 0x5d, 0x43, 0x38, 0x2a, 0xa6, 0xde, 0x1d,
- 0xd0, 0x74, 0xb0, 0x8b, 0x2d, 0x8b, 0x58, 0xa6, 0x37, 0x17, 0xbe, 0xd6, 0xb4, 0x6d, 0xde, 0x97,
- 0x47, 0xa9, 0x18, 0x65, 0x31, 0xdc, 0x44, 0xa7, 0x73, 0xc7, 0x22, 0xfc, 0x32, 0xa3, 0x74, 0x93,
- 0x26, 0x83, 0x54, 0x8c, 0xb2, 0x18, 0xf8, 0x08, 0x5c, 0x8f, 0x3a, 0x58, 0x71, 0x02, 0xae, 0x88,
- 0x09, 0xf8, 0x95, 0x30, 0x50, 0xaf, 0xdf, 0x2b, 0x03, 0xa0, 0x72, 0x3b, 0x38, 0x03, 0x2d, 0x87,
- 0x1a, 0xbc, 0x39, 0xfb, 0x2e, 0x91, 0xc3, 0xaf, 0x29, 0xee, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x15,
- 0xc0, 0x51, 0x0f, 0x2c, 0x4a, 0xd1, 0x02, 0x29, 0xfc, 0x04, 0x6c, 0xca, 0x11, 0x22, 0x4f, 0x69,
- 0x5d, 0xb2, 0x29, 0x8d, 0xb3, 0x48, 0x6d, 0x87, 0x27, 0x7f, 0x4e, 0x84, 0xf2, 0x5c, 0xf0, 0x2e,
- 0xd8, 0x98, 0x60, 0xfd, 0x29, 0x9d, 0x4e, 0xb3, 0x73, 0xa7, 0x15, 0x06, 0xea, 0x86, 0x96, 0x91,
- 0xa3, 0x1c, 0x0a, 0x0e, 0xc1, 0x6e, 0xf6, 0x79, 0x44, 0xdc, 0x87, 0xb6, 0x41, 0x9e, 0xb5, 0x37,
- 0x84, 0x75, 0x3b, 0x0c, 0xd4, 0x5d, 0xad, 0x44, 0x8f, 0x4a, 0xad, 0xe0, 0xfb, 0xa0, 0x35, 0xc7,
- 0xcf, 0xa2, 0x31, 0x27, 0x24, 0xc4, 0x6b, 0x6f, 0x0a, 0x26, 0x71, 0x45, 0xc7, 0x05, 0x1d, 0x5a,
- 0x40, 0xc3, 0x9f, 0x82, 0xba, 0x47, 0x2c, 0xa2, 0x33, 0xea, 0xca, 0xc2, 0x7d, 0xf7, 0x8a, 0xb9,
- 0x8e, 0x27, 0xc4, 0x1a, 0x4b, 0xd3, 0x68, 0x7f, 0x8a, 0x9f, 0x50, 0x42, 0x09, 0xbf, 0x03, 0xb6,
- 0xe6, 0xd8, 0xf6, 0x71, 0x82, 0x14, 0x15, 0x5b, 0xd7, 0x60, 0x18, 0xa8, 0x5b, 0xc7, 0x39, 0x0d,
- 0x2a, 0x20, 0xe1, 0x8f, 0x40, 0x9d, 0xc5, 0xcb, 0xc9, 0x9a, 0x70, 0xad, 0x74, 0xfc, 0x8e, 0xa8,
- 0x91, 0xdb, 0x4d, 0x92, 0xda, 0x4b, 0x16, 0x93, 0x84, 0x86, 0xaf, 0x73, 0x8c, 0x59, 0x32, 0x0f,
- 0xef, 0x4d, 0x19, 0x71, 0x1f, 0x98, 0xb6, 0xe9, 0x9d, 0x12, 0x43, 0xec, 0x81, 0xb5, 0x68, 0x9d,
- 0x3b, 0x39, 0x19, 0x96, 0x41, 0x50, 0x95, 0x2d, 0x1c, 0x82, 0xad, 0xb4, 0x60, 0x8e, 0xa9, 0x41,
- 0xda, 0x0d, 0xd1, 0x6e, 0xde, 0xe0, 0x6f, 0x39, 0xc8, 0x69, 0x2e, 0x16, 0x24, 0xa8, 0x60, 0x9b,
- 0x5d, 0xdf, 0xc0, 0x25, 0xeb, 0x9b, 0x01, 0x76, 0x1d, 0x6a, 0x20, 0xe2, 0x58, 0x58, 0x27, 0x73,
- 0x62, 0x33, 0x99, 0xe3, 0x5b, 0xe2, 0xe8, 0x77, 0x78, 0x26, 0x8d, 0x4a, 0xf4, 0x17, 0x15, 0x72,
- 0x54, 0xca, 0x06, 0xbf, 0x0e, 0x1a, 0x73, 0x6c, 0xe3, 0x19, 0x31, 0xb4, 0xf3, 0xf6, 0xb6, 0xa0,
- 0xde, 0x0c, 0x03, 0xb5, 0x71, 0x1c, 0x0b, 0x51, 0xaa, 0xef, 0xfe, 0xa7, 0x06, 0x1a, 0xe9, 0xf2,
- 0xf4, 0x18, 0x00, 0x3d, 0x9e, 0x50, 0x9e, 0x5c, 0xa0, 0x5e, 0xaf, 0xea, 0x76, 0xc9, 0x2c, 0x4b,
- 0x07, 0x7f, 0x22, 0xf2, 0x50, 0x86, 0x08, 0xfe, 0x04, 0x34, 0xc4, 0x5a, 0x2d, 0x66, 0xcd, 0xf2,
- 0x4b, 0xcf, 0x1a, 0xe1, 0xfd, 0x38, 0x26, 0x40, 0x29, 0x17, 0x9c, 0x66, 0xa3, 0xf8, 0x25, 0xe7,
- 0x26, 0xcc, 0x47, 0x5c, 0x1c, 0x51, 0x60, 0xe5, 0xd3, 0x4b, 0x2e, 0x95, 0xab, 0x22, 0xe7, 0xaa,
- 0xf6, 0xc5, 0x3e, 0x68, 0x88, 0x8e, 0x43, 0x0c, 0x62, 0x88, 0xb2, 0xa9, 0x69, 0x3b, 0x12, 0xda,
- 0x18, 0xc7, 0x0a, 0x94, 0x62, 0x38, 0x71, 0xb4, 0xd9, 0xca, 0xfd, 0x3a, 0x21, 0x8e, 0x4a, 0x1e,
- 0x49, 0x2d, 0x9f, 0x01, 0x8c, 0xb8, 0x73, 0xd3, 0xc6, 0xfc, 0xdb, 0x44, 0xb4, 0x5e, 0x39, 0x03,
- 0x4e, 0x52, 0x31, 0xca, 0x62, 0xe0, 0x7d, 0xd0, 0x92, 0x6f, 0x91, 0x36, 0x9a, 0x75, 0x91, 0x0d,
- 0x6d, 0x79, 0x48, 0x6b, 0x50, 0xd0, 0xa3, 0x05, 0x0b, 0xf8, 0x1e, 0xd8, 0x9c, 0xe6, 0x7a, 0x15,
- 0x10, 0x14, 0xa2, 0xd7, 0xe6, 0x1b, 0x55, 0x1e, 0x07, 0x7f, 0xad, 0x80, 0x9b, 0xbe, 0xad, 0x53,
- 0xdf, 0x66, 0xc4, 0x88, 0x9d, 0x24, 0xc6, 0x88, 0x1a, 0x9e, 0x28, 0xdc, 0xe6, 0xe1, 0xdb, 0xa5,
- 0x89, 0xf5, 0xb8, 0xdc, 0x26, 0x2a, 0xf3, 0x0a, 0x25, 0xaa, 0x3a, 0x09, 0xaa, 0xa0, 0xe6, 0x12,
- 0x6c, 0x9c, 0x8b, 0xea, 0xae, 0x69, 0x0d, 0x3e, 0x9b, 0x11, 0x17, 0xa0, 0x48, 0xde, 0xfd, 0xa3,
- 0x02, 0xb6, 0x0b, 0x9f, 0x4a, 0xff, 0xff, 0xbb, 0x70, 0x77, 0x02, 0x16, 0x66, 0x29, 0xfc, 0x08,
- 0xd4, 0x5c, 0xdf, 0x22, 0x71, 0xd9, 0xbe, 0x75, 0xa5, 0xb9, 0x8c, 0x7c, 0x8b, 0xa4, 0x5b, 0x0b,
- 0x7f, 0xf2, 0x50, 0x44, 0xd3, 0xfd, 0xbb, 0x02, 0x6e, 0x17, 0xe1, 0x8f, 0xec, 0x1f, 0x3c, 0x33,
- 0xd9, 0x80, 0x1a, 0xc4, 0x43, 0xe4, 0x17, 0xbe, 0xe9, 0x8a, 0xbe, 0xc3, 0x93, 0x44, 0xa7, 0x36,
- 0xc3, 0xfc, 0x5a, 0x3e, 0xc2, 0xf3, 0x78, 0x95, 0x16, 0x49, 0x32, 0xc8, 0x2a, 0x50, 0x1e, 0x07,
- 0xc7, 0xa0, 0x4e, 0x1d, 0xe2, 0x62, 0x3e, 0x65, 0xa2, 0x35, 0xfa, 0xbd, 0x78, 0x14, 0x3c, 0x92,
- 0xf2, 0x8b, 0x40, 0xbd, 0x75, 0x89, 0x1b, 0x31, 0x0c, 0x25, 0x44, 0xb0, 0x0b, 0xd6, 0xce, 0xb0,
- 0xe5, 0x13, 0xbe, 0xed, 0xac, 0x1c, 0xd4, 0x34, 0xc0, 0xeb, 0xe9, 0xc7, 0x42, 0x82, 0xa4, 0xa6,
- 0xfb, 0x97, 0xd2, 0x97, 0x1b, 0x51, 0x23, 0xed, 0x60, 0x23, 0xcc, 0x18, 0x71, 0x6d, 0xf8, 0x41,
- 0xee, 0xf3, 0xe0, 0xdd, 0xc2, 0xe7, 0xc1, 0xad, 0x92, 0x25, 0x3f, 0x4b, 0xf3, 0xbf, 0xfa, 0x62,
- 0xe8, 0x3e, 0x5f, 0x06, 0xbb, 0x65, 0xd1, 0x84, 0xef, 0x47, 0xbd, 0x8a, 0xda, 0xd2, 0xe3, 0x83,
- 0x6c, 0xaf, 0xa2, 0xf6, 0x45, 0xa0, 0xde, 0x28, 0xda, 0x45, 0x1a, 0x24, 0xed, 0xa0, 0x0d, 0x9a,
- 0x34, 0xbd, 0x61, 0x99, 0xa4, 0xdf, 0xbd, 0x52, 0x3e, 0x95, 0x27, 0x48, 0xd4, 0xa9, 0xb2, 0xba,
- 0xec, 0x01, 0xf0, 0x97, 0x60, 0x9b, 0xe6, 0xef, 0x5e, 0x44, 0xee, 0xea, 0x67, 0x96, 0xc5, 0x4d,
- 0xbb, 0x29, 0xdf, 0x7b, 0xbb, 0xa0, 0x47, 0xc5, 0xc3, 0xba, 0x4f, 0x40, 0x7e, 0x6d, 0x84, 0x1f,
- 0xe6, 0x4b, 0xe9, 0xf6, 0x17, 0x2f, 0x9f, 0x97, 0xd4, 0xd1, 0x6f, 0x15, 0xb0, 0xb3, 0x80, 0xe5,
- 0x6b, 0x60, 0x32, 0x05, 0xe2, 0xd6, 0x1a, 0xc5, 0x4b, 0xac, 0x81, 0xe3, 0x82, 0x0e, 0x2d, 0xa0,
- 0xf9, 0x9e, 0x96, 0xc8, 0x06, 0xbc, 0xf9, 0xc9, 0x2f, 0x03, 0x31, 0xcf, 0xc6, 0x39, 0x0d, 0x2a,
- 0x20, 0xbb, 0x7f, 0x52, 0x40, 0x55, 0x2f, 0x85, 0xa3, 0xec, 0x0c, 0xe3, 0x17, 0xd0, 0xd0, 0x0e,
- 0x73, 0xf3, 0xeb, 0x22, 0x50, 0x5f, 0xaf, 0xfa, 0xcb, 0x96, 0x27, 0xba, 0xd7, 0x7b, 0xfc, 0xf0,
- 0x7e, 0x76, 0xc8, 0x7d, 0x90, 0x0c, 0xb9, 0x65, 0x41, 0xd7, 0x4f, 0x07, 0xdc, 0xd5, 0xb8, 0xa4,
- 0xb9, 0xf6, 0xed, 0xe7, 0x2f, 0x3a, 0x4b, 0x9f, 0xbd, 0xe8, 0x2c, 0x7d, 0xfe, 0xa2, 0xb3, 0xf4,
- 0xab, 0xb0, 0xa3, 0x3c, 0x0f, 0x3b, 0xca, 0x67, 0x61, 0x47, 0xf9, 0x3c, 0xec, 0x28, 0xff, 0x08,
- 0x3b, 0xca, 0x6f, 0xfe, 0xd9, 0x59, 0xfa, 0xf8, 0x5a, 0xc9, 0x7f, 0xe8, 0xff, 0x0d, 0x00, 0x00,
- 0xff, 0xff, 0x1e, 0x70, 0x68, 0xe1, 0x59, 0x17, 0x00, 0x00,
-}
+func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} }
func (m *CronJob) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/batch/v1/generated.proto b/operator/vendor/k8s.io/api/batch/v1/generated.proto
index c0ce8cef..ca8248ff 100644
--- a/operator/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/batch/v1/generated.proto
@@ -342,9 +342,6 @@ message JobSpec {
// by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
- //
- // This field is beta-level. The job controller accepts setting the field
- // when the feature gate JobManagedBy is enabled (enabled by default).
// +optional
optional string managedBy = 15;
}
@@ -532,6 +529,7 @@ message PodFailurePolicyOnPodConditionsPattern {
// Specifies the required Pod condition status. To match a pod condition
// it is required that the specified status equals the pod condition status.
// Defaults to True.
+ // +optional
optional string status = 2;
}
diff --git a/operator/vendor/k8s.io/api/batch/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/batch/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..82928e07
--- /dev/null
+++ b/operator/vendor/k8s.io/api/batch/v1/generated.protomessage.pb.go
@@ -0,0 +1,56 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*CronJob) ProtoMessage() {}
+
+func (*CronJobList) ProtoMessage() {}
+
+func (*CronJobSpec) ProtoMessage() {}
+
+func (*CronJobStatus) ProtoMessage() {}
+
+func (*Job) ProtoMessage() {}
+
+func (*JobCondition) ProtoMessage() {}
+
+func (*JobList) ProtoMessage() {}
+
+func (*JobSpec) ProtoMessage() {}
+
+func (*JobStatus) ProtoMessage() {}
+
+func (*JobTemplateSpec) ProtoMessage() {}
+
+func (*PodFailurePolicy) ProtoMessage() {}
+
+func (*PodFailurePolicyOnExitCodesRequirement) ProtoMessage() {}
+
+func (*PodFailurePolicyOnPodConditionsPattern) ProtoMessage() {}
+
+func (*PodFailurePolicyRule) ProtoMessage() {}
+
+func (*SuccessPolicy) ProtoMessage() {}
+
+func (*SuccessPolicyRule) ProtoMessage() {}
+
+func (*UncountedTerminatedPods) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/batch/v1/types.go b/operator/vendor/k8s.io/api/batch/v1/types.go
index 9183c073..26d0ee6d 100644
--- a/operator/vendor/k8s.io/api/batch/v1/types.go
+++ b/operator/vendor/k8s.io/api/batch/v1/types.go
@@ -209,6 +209,7 @@ type PodFailurePolicyOnPodConditionsPattern struct {
// Specifies the required Pod condition status. To match a pod condition
// it is required that the specified status equals the pod condition status.
// Defaults to True.
+ // +optional
Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,req,name=status"`
}
@@ -468,9 +469,6 @@ type JobSpec struct {
// by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
// characters as defined by RFC 3986. The value cannot exceed 63 characters.
// This field is immutable.
- //
- // This field is beta-level. The job controller accepts setting the field
- // when the feature gate JobManagedBy is enabled (enabled by default).
// +optional
ManagedBy *string `json:"managedBy,omitempty" protobuf:"bytes,15,opt,name=managedBy"`
}
diff --git a/operator/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index 451f4609..267df429 100644
--- a/operator/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -127,7 +127,7 @@ var map_JobSpec = map[string]string{
"completionMode": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.",
"suspend": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
"podReplacementPolicy": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.",
- "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.\n\nThis field is beta-level. The job controller accepts setting the field when the feature gate JobManagedBy is enabled (enabled by default).",
+ "managedBy": "ManagedBy field indicates the controller that manages a Job. The k8s Job controller reconciles jobs which don't have this field at all or the field value is the reserved string `kubernetes.io/job-controller`, but skips reconciling Jobs with a custom value for this field. The value must be a valid domain-prefixed path (e.g. acme.io/foo) - all characters before the first \"/\" must be a valid subdomain as defined by RFC 1123. All characters trailing the first \"/\" must be valid HTTP Path characters as defined by RFC 3986. The value cannot exceed 63 characters. This field is immutable.",
}
func (JobSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/batch/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/batch/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..c5b86f29
--- /dev/null
+++ b/operator/vendor/k8s.io/api/batch/v1/zz_generated.model_name.go
@@ -0,0 +1,107 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJob) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.CronJob"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJobList) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.CronJobList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJobSpec) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.CronJobSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJobStatus) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.CronJobStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Job) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.Job"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JobCondition) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.JobCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JobList) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.JobList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JobSpec) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.JobSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JobStatus) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.JobStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JobTemplateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.JobTemplateSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodFailurePolicy) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.PodFailurePolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodFailurePolicyOnExitCodesRequirement) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodFailurePolicyOnPodConditionsPattern) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodFailurePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.PodFailurePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SuccessPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.SuccessPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SuccessPolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.SuccessPolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UncountedTerminatedPods) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1.UncountedTerminatedPods"
+}
diff --git a/operator/vendor/k8s.io/api/batch/v1beta1/doc.go b/operator/vendor/k8s.io/api/batch/v1beta1/doc.go
index 3430d693..b7108848 100644
--- a/operator/vendor/k8s.io/api/batch/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/batch/v1beta1/doc.go
@@ -18,5 +18,6 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.batch.v1beta1
package v1beta1
diff --git a/operator/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/batch/v1beta1/generated.pb.go
index 895d9c91..5e0888a6 100644
--- a/operator/vendor/k8s.io/api/batch/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/batch/v1beta1/generated.pb.go
@@ -24,231 +24,23 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CronJob) Reset() { *m = CronJob{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *CronJobList) Reset() { *m = CronJobList{} }
-func (m *CronJob) Reset() { *m = CronJob{} }
-func (*CronJob) ProtoMessage() {}
-func (*CronJob) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed95843ae7b4086b, []int{0}
-}
-func (m *CronJob) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJob) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJob.Merge(m, src)
-}
-func (m *CronJob) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJob) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJob.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CronJob proto.InternalMessageInfo
-
-func (m *CronJobList) Reset() { *m = CronJobList{} }
-func (*CronJobList) ProtoMessage() {}
-func (*CronJobList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed95843ae7b4086b, []int{1}
-}
-func (m *CronJobList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJobList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJobList.Merge(m, src)
-}
-func (m *CronJobList) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJobList) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJobList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CronJobList proto.InternalMessageInfo
-
-func (m *CronJobSpec) Reset() { *m = CronJobSpec{} }
-func (*CronJobSpec) ProtoMessage() {}
-func (*CronJobSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed95843ae7b4086b, []int{2}
-}
-func (m *CronJobSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJobSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJobSpec.Merge(m, src)
-}
-func (m *CronJobSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJobSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJobSpec.DiscardUnknown(m)
-}
+func (m *CronJobSpec) Reset() { *m = CronJobSpec{} }
-var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo
+func (m *CronJobStatus) Reset() { *m = CronJobStatus{} }
-func (m *CronJobStatus) Reset() { *m = CronJobStatus{} }
-func (*CronJobStatus) ProtoMessage() {}
-func (*CronJobStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed95843ae7b4086b, []int{3}
-}
-func (m *CronJobStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CronJobStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CronJobStatus.Merge(m, src)
-}
-func (m *CronJobStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *CronJobStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_CronJobStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo
-
-func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} }
-func (*JobTemplateSpec) ProtoMessage() {}
-func (*JobTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ed95843ae7b4086b, []int{4}
-}
-func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *JobTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_JobTemplateSpec.Merge(m, src)
-}
-func (m *JobTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *JobTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob")
- proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList")
- proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec")
- proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus")
- proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_ed95843ae7b4086b)
-}
-
-var fileDescriptor_ed95843ae7b4086b = []byte{
- // 771 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x8f, 0xdb, 0x44,
- 0x14, 0xc7, 0xe3, 0x6c, 0x7e, 0x75, 0xd2, 0xc2, 0x76, 0x40, 0x5b, 0x2b, 0x20, 0x3b, 0xa4, 0xaa,
- 0x08, 0x08, 0xc6, 0xec, 0x0a, 0x21, 0x4e, 0x95, 0x70, 0x51, 0x81, 0x25, 0xa8, 0x68, 0x52, 0x2e,
- 0x55, 0x85, 0x3a, 0x9e, 0x4c, 0x92, 0xe9, 0xda, 0x1e, 0xcb, 0x33, 0x5e, 0x29, 0x37, 0x2e, 0xdc,
- 0xf9, 0x5f, 0xb8, 0x73, 0xde, 0x63, 0x6f, 0xf4, 0x64, 0xb1, 0xe6, 0xbf, 0xe0, 0x84, 0x66, 0xe2,
- 0x4d, 0xd2, 0xc4, 0xe9, 0x96, 0x0b, 0xb7, 0xcc, 0xf3, 0xf7, 0xfb, 0x99, 0xa7, 0xf7, 0xde, 0xbc,
- 0x80, 0xe1, 0xd9, 0x97, 0x12, 0x71, 0xe1, 0x91, 0x84, 0x7b, 0x01, 0x51, 0x74, 0xee, 0x9d, 0x1f,
- 0x07, 0x4c, 0x91, 0x63, 0x6f, 0xc6, 0x62, 0x96, 0x12, 0xc5, 0x26, 0x28, 0x49, 0x85, 0x12, 0xd0,
- 0x5e, 0x2a, 0x11, 0x49, 0x38, 0x32, 0x4a, 0x54, 0x2a, 0x7b, 0x9f, 0xce, 0xb8, 0x9a, 0x67, 0x01,
- 0xa2, 0x22, 0xf2, 0x66, 0x62, 0x26, 0x3c, 0x63, 0x08, 0xb2, 0xa9, 0x39, 0x99, 0x83, 0xf9, 0xb5,
- 0x04, 0xf5, 0xee, 0x56, 0x5c, 0xb9, 0x7d, 0x5b, 0x6f, 0xb0, 0x21, 0xa2, 0x22, 0x65, 0x55, 0x9a,
- 0xcf, 0xd7, 0x9a, 0x88, 0xd0, 0x39, 0x8f, 0x59, 0xba, 0xf0, 0x92, 0xb3, 0x99, 0x0e, 0x48, 0x2f,
- 0x62, 0x8a, 0x54, 0xb9, 0xbc, 0x7d, 0xae, 0x34, 0x8b, 0x15, 0x8f, 0xd8, 0x8e, 0xe1, 0x8b, 0xeb,
- 0x0c, 0x92, 0xce, 0x59, 0x44, 0xb6, 0x7d, 0x83, 0x5f, 0xeb, 0xa0, 0xfd, 0x20, 0x15, 0xf1, 0xa9,
- 0x08, 0xe0, 0x33, 0xd0, 0xd1, 0xf9, 0x4c, 0x88, 0x22, 0xb6, 0xd5, 0xb7, 0x86, 0xdd, 0x93, 0xcf,
- 0xd0, 0xba, 0x9e, 0x2b, 0x2c, 0x4a, 0xce, 0x66, 0x3a, 0x20, 0x91, 0x56, 0xa3, 0xf3, 0x63, 0xf4,
- 0x28, 0x78, 0xce, 0xa8, 0xfa, 0x81, 0x29, 0xe2, 0xc3, 0x8b, 0xdc, 0xad, 0x15, 0xb9, 0x0b, 0xd6,
- 0x31, 0xbc, 0xa2, 0xc2, 0x6f, 0x40, 0x43, 0x26, 0x8c, 0xda, 0x75, 0x43, 0xbf, 0x87, 0xf6, 0x75,
- 0x0b, 0x95, 0x29, 0x8d, 0x13, 0x46, 0xfd, 0x9b, 0x25, 0xb2, 0xa1, 0x4f, 0xd8, 0x00, 0xe0, 0x23,
- 0xd0, 0x92, 0x8a, 0xa8, 0x4c, 0xda, 0x07, 0x06, 0xf5, 0xe1, 0xf5, 0x28, 0x23, 0xf7, 0xdf, 0x2a,
- 0x61, 0xad, 0xe5, 0x19, 0x97, 0x98, 0xc1, 0xef, 0x16, 0xe8, 0x96, 0xca, 0x11, 0x97, 0x0a, 0x3e,
- 0xdd, 0xa9, 0x05, 0x7a, 0xb3, 0x5a, 0x68, 0xb7, 0xa9, 0xc4, 0x61, 0x79, 0x53, 0xe7, 0x2a, 0xb2,
- 0x51, 0x87, 0x87, 0xa0, 0xc9, 0x15, 0x8b, 0xa4, 0x5d, 0xef, 0x1f, 0x0c, 0xbb, 0x27, 0x1f, 0x5c,
- 0x9b, 0xbd, 0x7f, 0xab, 0xa4, 0x35, 0xbf, 0xd3, 0x3e, 0xbc, 0xb4, 0x0f, 0xfe, 0x6c, 0xac, 0xb2,
- 0xd6, 0xc5, 0x81, 0x9f, 0x80, 0x8e, 0xee, 0xf3, 0x24, 0x0b, 0x99, 0xc9, 0xfa, 0xc6, 0x3a, 0x8b,
- 0x71, 0x19, 0xc7, 0x2b, 0x05, 0x1c, 0x82, 0x8e, 0x1e, 0x8d, 0x27, 0x22, 0x66, 0x76, 0xc7, 0xa8,
- 0x6f, 0x6a, 0xe5, 0xe3, 0x32, 0x86, 0x57, 0x5f, 0xe1, 0x4f, 0xe0, 0x8e, 0x54, 0x24, 0x55, 0x3c,
- 0x9e, 0x7d, 0xcd, 0xc8, 0x24, 0xe4, 0x31, 0x1b, 0x33, 0x2a, 0xe2, 0x89, 0x34, 0xad, 0x3c, 0xf0,
- 0xdf, 0x2b, 0x72, 0xf7, 0xce, 0xb8, 0x5a, 0x82, 0xf7, 0x79, 0xe1, 0x53, 0x70, 0x9b, 0x8a, 0x98,
- 0x66, 0x69, 0xca, 0x62, 0xba, 0xf8, 0x51, 0x84, 0x9c, 0x2e, 0x4c, 0x43, 0x6f, 0xf8, 0xa8, 0xcc,
- 0xfb, 0xf6, 0x83, 0x6d, 0xc1, 0x3f, 0x55, 0x41, 0xbc, 0x0b, 0x82, 0xf7, 0x40, 0x5b, 0x66, 0x32,
- 0x61, 0xf1, 0xc4, 0x6e, 0xf4, 0xad, 0x61, 0xc7, 0xef, 0x16, 0xb9, 0xdb, 0x1e, 0x2f, 0x43, 0xf8,
- 0xea, 0x1b, 0x7c, 0x06, 0xba, 0xcf, 0x45, 0xf0, 0x98, 0x45, 0x49, 0x48, 0x14, 0xb3, 0x9b, 0xa6,
- 0xd9, 0x1f, 0xed, 0xef, 0xc8, 0xe9, 0x5a, 0x6c, 0xc6, 0xf3, 0x9d, 0x32, 0xd3, 0xee, 0xc6, 0x07,
- 0xbc, 0x89, 0x84, 0x3f, 0x83, 0x9e, 0xcc, 0x28, 0x65, 0x52, 0x4e, 0xb3, 0xf0, 0x54, 0x04, 0xf2,
- 0x5b, 0x2e, 0x95, 0x48, 0x17, 0x23, 0x1e, 0x71, 0x65, 0xb7, 0xfa, 0xd6, 0xb0, 0xe9, 0x3b, 0x45,
- 0xee, 0xf6, 0xc6, 0x7b, 0x55, 0xf8, 0x35, 0x04, 0x88, 0xc1, 0xd1, 0x94, 0xf0, 0x90, 0x4d, 0x76,
- 0xd8, 0x6d, 0xc3, 0xee, 0x15, 0xb9, 0x7b, 0xf4, 0xb0, 0x52, 0x81, 0xf7, 0x38, 0x07, 0x7f, 0xd4,
- 0xc1, 0xad, 0x57, 0x5e, 0x0e, 0xfc, 0x1e, 0xb4, 0x08, 0x55, 0xfc, 0x5c, 0x4f, 0x96, 0x1e, 0xda,
- 0xbb, 0x9b, 0x25, 0xd2, 0xdb, 0x6f, 0xbd, 0x09, 0x30, 0x9b, 0x32, 0xdd, 0x09, 0xb6, 0x7e, 0x6e,
- 0x5f, 0x19, 0x2b, 0x2e, 0x11, 0x30, 0x04, 0x87, 0x21, 0x91, 0xea, 0x6a, 0x28, 0xf5, 0xc8, 0x99,
- 0x26, 0x75, 0x4f, 0x3e, 0x7e, 0xb3, 0x67, 0xa6, 0x1d, 0xfe, 0xbb, 0x45, 0xee, 0x1e, 0x8e, 0xb6,
- 0x38, 0x78, 0x87, 0x0c, 0x53, 0x00, 0x4d, 0x6c, 0x55, 0x42, 0x73, 0x5f, 0xf3, 0x3f, 0xdf, 0x77,
- 0x54, 0xe4, 0x2e, 0x1c, 0xed, 0x90, 0x70, 0x05, 0x5d, 0x2f, 0x94, 0xb7, 0xb7, 0x46, 0xe5, 0x7f,
- 0x58, 0xb0, 0xf7, 0x5f, 0x59, 0xb0, 0xef, 0x57, 0x4d, 0x31, 0x7a, 0xcd, 0x5e, 0xf5, 0xef, 0x5f,
- 0x5c, 0x3a, 0xb5, 0x17, 0x97, 0x4e, 0xed, 0xe5, 0xa5, 0x53, 0xfb, 0xa5, 0x70, 0xac, 0x8b, 0xc2,
- 0xb1, 0x5e, 0x14, 0x8e, 0xf5, 0xb2, 0x70, 0xac, 0xbf, 0x0a, 0xc7, 0xfa, 0xed, 0x6f, 0xa7, 0xf6,
- 0xc4, 0xde, 0xf7, 0x7f, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xaa, 0x2c, 0x86, 0xaa,
- 0x07, 0x00, 0x00,
-}
+func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} }
func (m *CronJob) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/batch/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/batch/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..57520d7a
--- /dev/null
+++ b/operator/vendor/k8s.io/api/batch/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,32 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*CronJob) ProtoMessage() {}
+
+func (*CronJobList) ProtoMessage() {}
+
+func (*CronJobSpec) ProtoMessage() {}
+
+func (*CronJobStatus) ProtoMessage() {}
+
+func (*JobTemplateSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/batch/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/batch/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..77fe2f66
--- /dev/null
+++ b/operator/vendor/k8s.io/api/batch/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJob) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1beta1.CronJob"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJobList) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1beta1.CronJobList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJobSpec) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1beta1.CronJobSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CronJobStatus) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1beta1.CronJobStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in JobTemplateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.batch.v1beta1.JobTemplateSpec"
+}
diff --git a/operator/vendor/k8s.io/api/certificates/v1/doc.go b/operator/vendor/k8s.io/api/certificates/v1/doc.go
index 6c16fc29..3ed26758 100644
--- a/operator/vendor/k8s.io/api/certificates/v1/doc.go
+++ b/operator/vendor/k8s.io/api/certificates/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.certificates.v1
+
// +groupName=certificates.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/certificates/v1/generated.pb.go b/operator/vendor/k8s.io/api/certificates/v1/generated.pb.go
index cba4a8ea..e47a42e6 100644
--- a/operator/vendor/k8s.io/api/certificates/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/certificates/v1/generated.pb.go
@@ -23,270 +23,26 @@ import (
fmt "fmt"
io "io"
-
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ "sort"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} }
-func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} }
-func (*CertificateSigningRequest) ProtoMessage() {}
-func (*CertificateSigningRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_5f7d41da689f96f7, []int{0}
-}
-func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequest.Merge(m, src)
-}
-func (m *CertificateSigningRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m)
-}
+func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} }
-var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo
+func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} }
-func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} }
-func (*CertificateSigningRequestCondition) ProtoMessage() {}
-func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5f7d41da689f96f7, []int{1}
-}
-func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src)
-}
-func (m *CertificateSigningRequestCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo
+func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} }
-func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} }
-func (*CertificateSigningRequestList) ProtoMessage() {}
-func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5f7d41da689f96f7, []int{2}
-}
-func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestList.Merge(m, src)
-}
-func (m *CertificateSigningRequestList) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestList) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo
-
-func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} }
-func (*CertificateSigningRequestSpec) ProtoMessage() {}
-func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5f7d41da689f96f7, []int{3}
-}
-func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src)
-}
-func (m *CertificateSigningRequestSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo
-
-func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} }
-func (*CertificateSigningRequestStatus) ProtoMessage() {}
-func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5f7d41da689f96f7, []int{4}
-}
-func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src)
-}
-func (m *CertificateSigningRequestStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo
-
-func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (*ExtraValue) ProtoMessage() {}
-func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_5f7d41da689f96f7, []int{5}
-}
-func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExtraValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtraValue.Merge(m, src)
-}
-func (m *ExtraValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ExtraValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtraValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1.CertificateSigningRequest")
- proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1.CertificateSigningRequestCondition")
- proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1.CertificateSigningRequestList")
- proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1.CertificateSigningRequestSpec")
- proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1.CertificateSigningRequestSpec.ExtraEntry")
- proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1.CertificateSigningRequestStatus")
- proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1.ExtraValue")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/certificates/v1/generated.proto", fileDescriptor_5f7d41da689f96f7)
-}
-
-var fileDescriptor_5f7d41da689f96f7 = []byte{
- // 896 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
- 0x14, 0xf7, 0xc6, 0x7f, 0x62, 0x8f, 0x43, 0xda, 0x8e, 0xa0, 0x5a, 0x2c, 0xd5, 0x6b, 0xad, 0xa0,
- 0x0a, 0x15, 0xcc, 0x92, 0xa8, 0x40, 0x28, 0x08, 0xa1, 0x4d, 0x23, 0x54, 0x91, 0x82, 0x34, 0x49,
- 0x38, 0x14, 0x0e, 0x9d, 0xac, 0x5f, 0x37, 0xd3, 0x74, 0xff, 0xb0, 0x33, 0x6b, 0xd5, 0xb7, 0x7e,
- 0x04, 0x8e, 0x1c, 0xf9, 0x02, 0x7c, 0x06, 0xae, 0x39, 0xf6, 0x58, 0x24, 0x64, 0x11, 0xf7, 0x5b,
- 0xe4, 0x84, 0x66, 0x76, 0xbc, 0x76, 0x9c, 0xb8, 0x0d, 0xb9, 0x79, 0x7e, 0xf3, 0x7b, 0xbf, 0xdf,
- 0x7b, 0x6f, 0xdf, 0x1b, 0x19, 0xdd, 0x39, 0xda, 0x14, 0x84, 0x27, 0x1e, 0x4b, 0xb9, 0x17, 0x40,
- 0x26, 0xf9, 0x13, 0x1e, 0x30, 0x09, 0xc2, 0x1b, 0xac, 0x7b, 0x21, 0xc4, 0x90, 0x31, 0x09, 0x7d,
- 0x92, 0x66, 0x89, 0x4c, 0x70, 0xa7, 0xe0, 0x12, 0x96, 0x72, 0x32, 0xcb, 0x25, 0x83, 0xf5, 0xce,
- 0x27, 0x21, 0x97, 0x87, 0xf9, 0x01, 0x09, 0x92, 0xc8, 0x0b, 0x93, 0x30, 0xf1, 0x74, 0xc8, 0x41,
- 0xfe, 0x44, 0x9f, 0xf4, 0x41, 0xff, 0x2a, 0xa4, 0x3a, 0xee, 0xac, 0x6d, 0x92, 0xc1, 0x05, 0x76,
- 0x9d, 0xbb, 0x53, 0x4e, 0xc4, 0x82, 0x43, 0x1e, 0x43, 0x36, 0xf4, 0xd2, 0xa3, 0x50, 0x01, 0xc2,
- 0x8b, 0x40, 0xb2, 0x8b, 0xa2, 0xbc, 0x45, 0x51, 0x59, 0x1e, 0x4b, 0x1e, 0xc1, 0xb9, 0x80, 0xcf,
- 0xdf, 0x16, 0x20, 0x82, 0x43, 0x88, 0xd8, 0x7c, 0x9c, 0xfb, 0xd7, 0x12, 0x7a, 0x7f, 0x6b, 0xda,
- 0x85, 0x5d, 0x1e, 0xc6, 0x3c, 0x0e, 0x29, 0xfc, 0x9a, 0x83, 0x90, 0xf8, 0x31, 0x6a, 0xaa, 0x0c,
- 0xfb, 0x4c, 0x32, 0xdb, 0xea, 0x59, 0x6b, 0xed, 0x8d, 0x4f, 0xc9, 0xb4, 0x7d, 0xa5, 0x11, 0x49,
- 0x8f, 0x42, 0x05, 0x08, 0xa2, 0xd8, 0x64, 0xb0, 0x4e, 0x7e, 0x3c, 0x78, 0x0a, 0x81, 0x7c, 0x08,
- 0x92, 0xf9, 0xf8, 0x78, 0xe4, 0x54, 0xc6, 0x23, 0x07, 0x4d, 0x31, 0x5a, 0xaa, 0xe2, 0x9f, 0x51,
- 0x4d, 0xa4, 0x10, 0xd8, 0x4b, 0x5a, 0xfd, 0x4b, 0xb2, 0xf8, 0xe3, 0x90, 0x85, 0x69, 0xee, 0xa6,
- 0x10, 0xf8, 0x2b, 0xc6, 0xa6, 0xa6, 0x4e, 0x54, 0x8b, 0xe2, 0x00, 0x35, 0x84, 0x64, 0x32, 0x17,
- 0x76, 0x55, 0xcb, 0x7f, 0x75, 0x35, 0x79, 0x2d, 0xe1, 0xaf, 0x1a, 0x83, 0x46, 0x71, 0xa6, 0x46,
- 0xda, 0x7d, 0x5d, 0x45, 0xee, 0xc2, 0xd8, 0xad, 0x24, 0xee, 0x73, 0xc9, 0x93, 0x18, 0x6f, 0xa2,
- 0x9a, 0x1c, 0xa6, 0xa0, 0xdb, 0xd8, 0xf2, 0x3f, 0x98, 0x64, 0xbb, 0x37, 0x4c, 0xe1, 0x74, 0xe4,
- 0xbc, 0x3b, 0xcf, 0x57, 0x38, 0xd5, 0x11, 0x78, 0xa7, 0xac, 0xa2, 0xa1, 0x63, 0xef, 0x9e, 0x4d,
- 0xe4, 0x74, 0xe4, 0x5c, 0x30, 0x87, 0xa4, 0x54, 0x3a, 0x9b, 0x2e, 0xbe, 0x8d, 0x1a, 0x19, 0x30,
- 0x91, 0xc4, 0xba, 0xe5, 0xad, 0x69, 0x59, 0x54, 0xa3, 0xd4, 0xdc, 0xe2, 0x8f, 0xd0, 0x72, 0x04,
- 0x42, 0xb0, 0x10, 0x74, 0xf3, 0x5a, 0xfe, 0x35, 0x43, 0x5c, 0x7e, 0x58, 0xc0, 0x74, 0x72, 0x8f,
- 0x9f, 0xa2, 0xd5, 0x67, 0x4c, 0xc8, 0xfd, 0xb4, 0xcf, 0x24, 0xec, 0xf1, 0x08, 0xec, 0x9a, 0x6e,
- 0xf7, 0x9d, 0xcb, 0xcd, 0x8a, 0x8a, 0xf0, 0x6f, 0x1a, 0xf5, 0xd5, 0x9d, 0x33, 0x4a, 0x74, 0x4e,
- 0x19, 0x0f, 0x10, 0x56, 0xc8, 0x5e, 0xc6, 0x62, 0x51, 0x34, 0x4a, 0xf9, 0xd5, 0xff, 0xb7, 0x5f,
- 0xc7, 0xf8, 0xe1, 0x9d, 0x73, 0x6a, 0xf4, 0x02, 0x07, 0xf7, 0x6f, 0x0b, 0xdd, 0x5a, 0xf8, 0x95,
- 0x77, 0xb8, 0x90, 0xf8, 0x97, 0x73, 0xbb, 0x42, 0x2e, 0x97, 0x8f, 0x8a, 0xd6, 0x9b, 0x72, 0xdd,
- 0xe4, 0xd4, 0x9c, 0x20, 0x33, 0x7b, 0xf2, 0x08, 0xd5, 0xb9, 0x84, 0x48, 0xd8, 0x4b, 0xbd, 0xea,
- 0x5a, 0x7b, 0xe3, 0xb3, 0x2b, 0x4d, 0xb2, 0xff, 0x8e, 0x71, 0xa8, 0x3f, 0x50, 0x5a, 0xb4, 0x90,
- 0x74, 0xff, 0xac, 0xbd, 0xa1, 0x36, 0xb5, 0x4e, 0xf8, 0x43, 0xb4, 0x9c, 0x15, 0x47, 0x5d, 0xda,
- 0x8a, 0xdf, 0x56, 0x83, 0x60, 0x18, 0x74, 0x72, 0x87, 0x37, 0x10, 0x12, 0x3c, 0x8c, 0x21, 0xfb,
- 0x81, 0x45, 0x60, 0x2f, 0xeb, 0xb1, 0x29, 0xd7, 0x7f, 0xb7, 0xbc, 0xa1, 0x33, 0x2c, 0xbc, 0x85,
- 0x6e, 0xc0, 0xf3, 0x94, 0x67, 0x4c, 0xcf, 0x2a, 0x04, 0x49, 0xdc, 0x17, 0x76, 0xb3, 0x67, 0xad,
- 0xd5, 0xfd, 0xf7, 0xc6, 0x23, 0xe7, 0xc6, 0xf6, 0xfc, 0x25, 0x3d, 0xcf, 0xc7, 0x04, 0x35, 0x72,
- 0x35, 0x8a, 0xc2, 0xae, 0xf7, 0xaa, 0x6b, 0x2d, 0xff, 0xa6, 0x1a, 0xe8, 0x7d, 0x8d, 0x9c, 0x8e,
- 0x9c, 0xe6, 0xf7, 0x30, 0xd4, 0x07, 0x6a, 0x58, 0xf8, 0x63, 0xd4, 0xcc, 0x05, 0x64, 0xb1, 0x4a,
- 0xb3, 0x58, 0x83, 0xb2, 0xf7, 0xfb, 0x06, 0xa7, 0x25, 0x03, 0xdf, 0x42, 0xd5, 0x9c, 0xf7, 0xcd,
- 0x1a, 0xb4, 0x0d, 0xb1, 0xba, 0xff, 0xe0, 0x3e, 0x55, 0x38, 0x76, 0x51, 0x23, 0xcc, 0x92, 0x3c,
- 0x15, 0x76, 0x4d, 0x9b, 0x23, 0x65, 0xfe, 0x9d, 0x46, 0xa8, 0xb9, 0xc1, 0x1c, 0xd5, 0xe1, 0xb9,
- 0xcc, 0x98, 0xdd, 0xd0, 0x9f, 0xef, 0xfe, 0x95, 0xdf, 0x39, 0xb2, 0xad, 0x64, 0xb6, 0x63, 0x99,
- 0x0d, 0xa7, 0x5f, 0x53, 0x63, 0xb4, 0x70, 0xe8, 0x3c, 0x46, 0x68, 0xca, 0xc1, 0xd7, 0x51, 0xf5,
- 0x08, 0x86, 0xc5, 0xab, 0x43, 0xd5, 0x4f, 0xfc, 0x35, 0xaa, 0x0f, 0xd8, 0xb3, 0x1c, 0xcc, 0x93,
- 0x7b, 0xfb, 0x4d, 0xa9, 0x68, 0xa1, 0x9f, 0x14, 0x9b, 0x16, 0x41, 0xf7, 0x96, 0x36, 0x2d, 0xf7,
- 0xd8, 0x42, 0xce, 0x5b, 0x5e, 0x4b, 0x9c, 0x21, 0x14, 0x4c, 0x5e, 0x20, 0x61, 0x5b, 0xba, 0xea,
- 0x6f, 0xae, 0x54, 0x75, 0xf9, 0x90, 0x4d, 0x47, 0xa9, 0x84, 0x04, 0x9d, 0x71, 0xc1, 0xeb, 0xa8,
- 0x3d, 0xa3, 0xaa, 0xeb, 0x5b, 0xf1, 0xaf, 0x8d, 0x47, 0x4e, 0x7b, 0x46, 0x9c, 0xce, 0x72, 0xdc,
- 0x2f, 0x4c, 0xb3, 0x74, 0x8d, 0xd8, 0x99, 0x2c, 0x99, 0xa5, 0x3f, 0x64, 0x6b, 0x7e, 0x53, 0xee,
- 0x35, 0x7f, 0xff, 0xc3, 0xa9, 0xbc, 0xf8, 0xa7, 0x57, 0xf1, 0xbf, 0x3d, 0x3e, 0xe9, 0x56, 0x5e,
- 0x9e, 0x74, 0x2b, 0xaf, 0x4e, 0xba, 0x95, 0x17, 0xe3, 0xae, 0x75, 0x3c, 0xee, 0x5a, 0x2f, 0xc7,
- 0x5d, 0xeb, 0xd5, 0xb8, 0x6b, 0xfd, 0x3b, 0xee, 0x5a, 0xbf, 0xbd, 0xee, 0x56, 0x1e, 0x75, 0x16,
- 0xff, 0x2f, 0xf9, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x4a, 0x4f, 0xbc, 0xb4, 0x08, 0x00, 0x00,
-}
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -486,7 +242,7 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int,
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
@@ -822,7 +578,7 @@ func (this *CertificateSigningRequestSpec) String() string {
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
mapStringForExtra := "map[string]ExtraValue{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
diff --git a/operator/vendor/k8s.io/api/certificates/v1/generated.proto b/operator/vendor/k8s.io/api/certificates/v1/generated.proto
index 24528fc8..a689f3e8 100644
--- a/operator/vendor/k8s.io/api/certificates/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/certificates/v1/generated.proto
@@ -111,7 +111,6 @@ message CertificateSigningRequestList {
message CertificateSigningRequestSpec {
// request contains an x509 certificate signing request encoded in a "CERTIFICATE REQUEST" PEM block.
// When serialized as JSON or YAML, the data is additionally base64-encoded.
- // +listType=atomic
optional bytes request = 1;
// signerName indicates the requested signer, and is a qualified name.
@@ -207,6 +206,7 @@ message CertificateSigningRequestStatus {
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
+ // +k8s:customUnique
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
@@ -239,7 +239,6 @@ message CertificateSigningRequestStatus {
// -----END CERTIFICATE-----
// )
//
- // +listType=atomic
// +optional
optional bytes certificate = 2;
}
diff --git a/operator/vendor/k8s.io/api/certificates/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/certificates/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..52763058
--- /dev/null
+++ b/operator/vendor/k8s.io/api/certificates/v1/generated.protomessage.pb.go
@@ -0,0 +1,34 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*CertificateSigningRequest) ProtoMessage() {}
+
+func (*CertificateSigningRequestCondition) ProtoMessage() {}
+
+func (*CertificateSigningRequestList) ProtoMessage() {}
+
+func (*CertificateSigningRequestSpec) ProtoMessage() {}
+
+func (*CertificateSigningRequestStatus) ProtoMessage() {}
+
+func (*ExtraValue) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/certificates/v1/types.go b/operator/vendor/k8s.io/api/certificates/v1/types.go
index 71203e80..8cd56e6d 100644
--- a/operator/vendor/k8s.io/api/certificates/v1/types.go
+++ b/operator/vendor/k8s.io/api/certificates/v1/types.go
@@ -61,7 +61,6 @@ type CertificateSigningRequest struct {
type CertificateSigningRequestSpec struct {
// request contains an x509 certificate signing request encoded in a "CERTIFICATE REQUEST" PEM block.
// When serialized as JSON or YAML, the data is additionally base64-encoded.
- // +listType=atomic
Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"`
// signerName indicates the requested signer, and is a qualified name.
@@ -182,6 +181,7 @@ type CertificateSigningRequestStatus struct {
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
+ // +k8s:customUnique
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
@@ -214,7 +214,6 @@ type CertificateSigningRequestStatus struct {
// -----END CERTIFICATE-----
// )
//
- // +listType=atomic
// +optional
Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"`
}
diff --git a/operator/vendor/k8s.io/api/certificates/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/certificates/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..9c7ad07a
--- /dev/null
+++ b/operator/vendor/k8s.io/api/certificates/v1/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequest) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1.CertificateSigningRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestCondition) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1.CertificateSigningRequestCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestList) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1.CertificateSigningRequestList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestSpec) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1.CertificateSigningRequestSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestStatus) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1.CertificateSigningRequestStatus"
+}
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/doc.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/doc.go
index 01481df8..b8b1b1a6 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.certificates.v1alpha1
// +groupName=certificates.k8s.io
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
index c260f043..29b8843a 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.pb.go
@@ -24,299 +24,16 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
-
- k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} }
-func (*ClusterTrustBundle) ProtoMessage() {}
-func (*ClusterTrustBundle) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{0}
-}
-func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundle.Merge(m, src)
-}
-func (m *ClusterTrustBundle) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundle) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo
-
-func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} }
-func (*ClusterTrustBundleList) ProtoMessage() {}
-func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{1}
-}
-func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundleList.Merge(m, src)
-}
-func (m *ClusterTrustBundleList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundleList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo
-
-func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} }
-func (*ClusterTrustBundleSpec) ProtoMessage() {}
-func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{2}
-}
-func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src)
-}
-func (m *ClusterTrustBundleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
-
-func (m *PodCertificateRequest) Reset() { *m = PodCertificateRequest{} }
-func (*PodCertificateRequest) ProtoMessage() {}
-func (*PodCertificateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{3}
-}
-func (m *PodCertificateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodCertificateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodCertificateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodCertificateRequest.Merge(m, src)
-}
-func (m *PodCertificateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *PodCertificateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_PodCertificateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodCertificateRequest proto.InternalMessageInfo
-
-func (m *PodCertificateRequestList) Reset() { *m = PodCertificateRequestList{} }
-func (*PodCertificateRequestList) ProtoMessage() {}
-func (*PodCertificateRequestList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{4}
-}
-func (m *PodCertificateRequestList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodCertificateRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodCertificateRequestList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodCertificateRequestList.Merge(m, src)
-}
-func (m *PodCertificateRequestList) XXX_Size() int {
- return m.Size()
-}
-func (m *PodCertificateRequestList) XXX_DiscardUnknown() {
- xxx_messageInfo_PodCertificateRequestList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodCertificateRequestList proto.InternalMessageInfo
-
-func (m *PodCertificateRequestSpec) Reset() { *m = PodCertificateRequestSpec{} }
-func (*PodCertificateRequestSpec) ProtoMessage() {}
-func (*PodCertificateRequestSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{5}
-}
-func (m *PodCertificateRequestSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodCertificateRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodCertificateRequestSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodCertificateRequestSpec.Merge(m, src)
-}
-func (m *PodCertificateRequestSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PodCertificateRequestSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PodCertificateRequestSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodCertificateRequestSpec proto.InternalMessageInfo
-
-func (m *PodCertificateRequestStatus) Reset() { *m = PodCertificateRequestStatus{} }
-func (*PodCertificateRequestStatus) ProtoMessage() {}
-func (*PodCertificateRequestStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_f73d5fe56c015bb8, []int{6}
-}
-func (m *PodCertificateRequestStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodCertificateRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodCertificateRequestStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodCertificateRequestStatus.Merge(m, src)
-}
-func (m *PodCertificateRequestStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodCertificateRequestStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodCertificateRequestStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodCertificateRequestStatus proto.InternalMessageInfo
+func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} }
-func init() {
- proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundle")
- proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleList")
- proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1alpha1.ClusterTrustBundleSpec")
- proto.RegisterType((*PodCertificateRequest)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequest")
- proto.RegisterType((*PodCertificateRequestList)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestList")
- proto.RegisterType((*PodCertificateRequestSpec)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestSpec")
- proto.RegisterType((*PodCertificateRequestStatus)(nil), "k8s.io.api.certificates.v1alpha1.PodCertificateRequestStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/certificates/v1alpha1/generated.proto", fileDescriptor_f73d5fe56c015bb8)
-}
+func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} }
-var fileDescriptor_f73d5fe56c015bb8 = []byte{
- // 918 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0xcf, 0x6f, 0xe3, 0x44,
- 0x14, 0xc7, 0xe3, 0xb6, 0x69, 0x9b, 0x49, 0x5b, 0xda, 0x61, 0x17, 0x99, 0x22, 0x39, 0x21, 0x07,
- 0x54, 0x90, 0xb0, 0xb7, 0xa5, 0xb0, 0x2b, 0x10, 0x48, 0x75, 0x0a, 0x52, 0xe9, 0x6e, 0x36, 0x9a,
- 0x74, 0xf9, 0xb1, 0x5a, 0x24, 0x1c, 0xe7, 0x25, 0x19, 0x1a, 0x7b, 0x8c, 0x67, 0x5c, 0xb5, 0x37,
- 0x24, 0xfe, 0x01, 0xfe, 0x23, 0xae, 0x3d, 0x2e, 0x5c, 0xd8, 0x53, 0xa0, 0xe6, 0x6f, 0xe0, 0xb2,
- 0x27, 0xe4, 0xb1, 0x9d, 0x5f, 0x4e, 0xb6, 0xd9, 0x1e, 0x7a, 0xcb, 0xbc, 0x79, 0xdf, 0xcf, 0xfb,
- 0xbe, 0x99, 0x37, 0x56, 0xd0, 0xbd, 0xd3, 0x07, 0x5c, 0xa7, 0xcc, 0xb0, 0x3c, 0x6a, 0xd8, 0xe0,
- 0x0b, 0xda, 0xa6, 0xb6, 0x25, 0x80, 0x1b, 0x67, 0xbb, 0x56, 0xcf, 0xeb, 0x5a, 0xbb, 0x46, 0x07,
- 0x5c, 0xf0, 0x2d, 0x01, 0x2d, 0xdd, 0xf3, 0x99, 0x60, 0xb8, 0x1c, 0x2b, 0x74, 0xcb, 0xa3, 0xfa,
- 0xa8, 0x42, 0x4f, 0x15, 0xdb, 0x1f, 0x76, 0xa8, 0xe8, 0x06, 0x4d, 0xdd, 0x66, 0x8e, 0xd1, 0x61,
- 0x1d, 0x66, 0x48, 0x61, 0x33, 0x68, 0xcb, 0x95, 0x5c, 0xc8, 0x5f, 0x31, 0x70, 0x7b, 0x7f, 0x68,
- 0xc1, 0xb1, 0xec, 0x2e, 0x75, 0xc1, 0xbf, 0x30, 0xbc, 0xd3, 0x4e, 0x14, 0xe0, 0x86, 0x03, 0xc2,
- 0x32, 0xce, 0x32, 0x36, 0xb6, 0x8d, 0x59, 0x2a, 0x3f, 0x70, 0x05, 0x75, 0x20, 0x23, 0xf8, 0xe4,
- 0x3a, 0x01, 0xb7, 0xbb, 0xe0, 0x58, 0x93, 0xba, 0xca, 0x9f, 0x0a, 0xc2, 0xd5, 0x5e, 0xc0, 0x05,
- 0xf8, 0x27, 0x7e, 0xc0, 0x85, 0x19, 0xb8, 0xad, 0x1e, 0xe0, 0x1f, 0xd1, 0x6a, 0x64, 0xad, 0x65,
- 0x09, 0x4b, 0x55, 0xca, 0xca, 0x4e, 0x71, 0xef, 0x9e, 0x3e, 0x3c, 0x99, 0x41, 0x05, 0xdd, 0x3b,
- 0xed, 0x44, 0x01, 0xae, 0x47, 0xd9, 0xfa, 0xd9, 0xae, 0xfe, 0xb8, 0xf9, 0x13, 0xd8, 0xe2, 0x11,
- 0x08, 0xcb, 0xc4, 0x97, 0xfd, 0x52, 0x2e, 0xec, 0x97, 0xd0, 0x30, 0x46, 0x06, 0x54, 0xfc, 0x14,
- 0x2d, 0x71, 0x0f, 0x6c, 0x75, 0x41, 0xd2, 0x1f, 0xe8, 0xd7, 0x9d, 0xbb, 0x9e, 0x75, 0xd9, 0xf0,
- 0xc0, 0x36, 0xd7, 0x92, 0x2a, 0x4b, 0xd1, 0x8a, 0x48, 0x66, 0xe5, 0x0f, 0x05, 0xbd, 0x95, 0x4d,
- 0x7f, 0x48, 0xb9, 0xc0, 0xcf, 0x32, 0x8d, 0xe9, 0xf3, 0x35, 0x16, 0xa9, 0x65, 0x5b, 0x9b, 0x49,
- 0xc1, 0xd5, 0x34, 0x32, 0xd2, 0xd4, 0xf7, 0x28, 0x4f, 0x05, 0x38, 0x5c, 0x5d, 0x28, 0x2f, 0xee,
- 0x14, 0xf7, 0xf6, 0x6f, 0xd2, 0x95, 0xb9, 0x9e, 0x14, 0xc8, 0x1f, 0x45, 0x28, 0x12, 0x13, 0x2b,
- 0xbf, 0x4e, 0xed, 0x29, 0x6a, 0x1a, 0xef, 0x21, 0xc4, 0x69, 0xc7, 0x05, 0xbf, 0x66, 0x39, 0x20,
- 0xbb, 0x2a, 0x0c, 0x0f, 0xbf, 0x31, 0xd8, 0x21, 0x23, 0x59, 0xf8, 0x63, 0x54, 0x14, 0x43, 0x8c,
- 0xbc, 0x85, 0x82, 0xf9, 0x66, 0x22, 0x2a, 0x8e, 0x54, 0x20, 0xa3, 0x79, 0x95, 0xdf, 0x17, 0xd0,
- 0xdd, 0x3a, 0x6b, 0x55, 0x87, 0xbd, 0x10, 0xf8, 0x39, 0x00, 0x2e, 0x6e, 0x61, 0x62, 0x7e, 0x18,
- 0x9b, 0x98, 0xcf, 0xae, 0x3f, 0xdb, 0xa9, 0x46, 0x67, 0x0d, 0x0d, 0x06, 0xb4, 0xcc, 0x85, 0x25,
- 0x02, 0xae, 0x2e, 0xca, 0x02, 0x9f, 0xdf, 0xb4, 0x80, 0x84, 0x98, 0x1b, 0x49, 0x89, 0xe5, 0x78,
- 0x4d, 0x12, 0x78, 0xe5, 0x2f, 0x05, 0xbd, 0x3d, 0x55, 0x77, 0x0b, 0xe3, 0xf9, 0x6c, 0x7c, 0x3c,
- 0xef, 0xdf, 0xb0, 0xc3, 0x19, 0x13, 0xfa, 0x5f, 0x7e, 0x46, 0x67, 0x37, 0x1e, 0xd2, 0xf7, 0xd1,
- 0x8a, 0xc7, 0x5a, 0x52, 0x10, 0x0f, 0xe8, 0x1b, 0x89, 0x60, 0xa5, 0x1e, 0x87, 0x49, 0xba, 0x8f,
- 0x8f, 0xd1, 0xb2, 0xc7, 0x5a, 0x4f, 0x8e, 0x0e, 0xe5, 0xed, 0x15, 0xcc, 0x8f, 0xd2, 0xe3, 0xaf,
- 0xcb, 0xe8, 0xcb, 0x7e, 0xe9, 0xdd, 0x59, 0x5f, 0x48, 0x71, 0xe1, 0x01, 0xd7, 0x9f, 0x1c, 0x1d,
- 0x92, 0x04, 0x81, 0xbf, 0x46, 0x98, 0x83, 0x7f, 0x46, 0x6d, 0x38, 0xb0, 0x6d, 0x16, 0xb8, 0x42,
- 0x5a, 0x58, 0x92, 0xe0, 0xed, 0x04, 0x8c, 0x1b, 0x99, 0x0c, 0x32, 0x45, 0x85, 0x7b, 0x68, 0x6b,
- 0x3c, 0x1a, 0x79, 0xcc, 0x4b, 0xd4, 0x17, 0x09, 0x6a, 0xab, 0x31, 0x99, 0x30, 0x9f, 0xdd, 0x2c,
- 0x18, 0x7f, 0x83, 0x56, 0x5d, 0xd6, 0x02, 0xe9, 0x77, 0x59, 0x16, 0xf9, 0x34, 0x9d, 0x87, 0x5a,
- 0x12, 0x7f, 0xd9, 0x2f, 0xbd, 0xf7, 0x6a, 0x76, 0x9a, 0x49, 0x06, 0x2c, 0x5c, 0x43, 0x2b, 0xd1,
- 0xef, 0xc8, 0xfb, 0x8a, 0xc4, 0xee, 0xa7, 0x37, 0x51, 0x8b, 0xc3, 0xf3, 0x39, 0x4e, 0x21, 0xf8,
- 0x21, 0xba, 0xe3, 0x58, 0xe7, 0x5f, 0x9e, 0x7b, 0xd4, 0xb7, 0x04, 0x65, 0x6e, 0x03, 0x6c, 0xe6,
- 0xb6, 0xb8, 0xba, 0x5a, 0x56, 0x76, 0xf2, 0xa6, 0x1a, 0xf6, 0x4b, 0x77, 0x1e, 0x4d, 0xd9, 0x27,
- 0x53, 0x55, 0xf8, 0x3e, 0x5a, 0xf7, 0x4e, 0xe9, 0x79, 0x3d, 0x68, 0xf6, 0xa8, 0x7d, 0x0c, 0x17,
- 0x6a, 0xa1, 0xac, 0xec, 0xac, 0x99, 0x5b, 0x61, 0xbf, 0xb4, 0x5e, 0x3f, 0x3e, 0xfa, 0x6e, 0xb0,
- 0x41, 0xc6, 0xf3, 0x70, 0x15, 0x6d, 0x79, 0x3e, 0x63, 0xed, 0xc7, 0xed, 0x3a, 0xe3, 0x1c, 0x38,
- 0xa7, 0xcc, 0x55, 0x91, 0x14, 0xdf, 0x8d, 0x2e, 0xa6, 0x3e, 0xb9, 0x49, 0xb2, 0xf9, 0x95, 0xbf,
- 0x17, 0xd1, 0x3b, 0xaf, 0xf8, 0x12, 0x60, 0x1b, 0xa1, 0xc8, 0x26, 0x8d, 0x1c, 0x73, 0x55, 0x91,
- 0x4f, 0xcf, 0x98, 0xef, 0x55, 0x57, 0x53, 0xdd, 0xf0, 0xa9, 0x0c, 0x42, 0x9c, 0x8c, 0x60, 0xf1,
- 0x21, 0xda, 0x1c, 0x79, 0xc1, 0xd5, 0xae, 0x45, 0xdd, 0xe4, 0xcd, 0xa8, 0x89, 0x72, 0xb3, 0x3a,
- 0xb1, 0x4f, 0x32, 0x0a, 0xfc, 0x2d, 0x2a, 0xb8, 0x4c, 0x98, 0xd0, 0x66, 0x7e, 0x3c, 0xef, 0xc5,
- 0xbd, 0x0f, 0xe6, 0x73, 0x7a, 0x42, 0x1d, 0x30, 0xd7, 0xc3, 0x7e, 0xa9, 0x50, 0x4b, 0x01, 0x64,
- 0xc8, 0xc2, 0x6d, 0xb4, 0xd1, 0x84, 0x0e, 0x75, 0x09, 0xb4, 0x7d, 0xe0, 0xdd, 0x03, 0x21, 0x9f,
- 0xc0, 0xeb, 0xd1, 0x71, 0xd8, 0x2f, 0x6d, 0x98, 0x63, 0x14, 0x32, 0x41, 0xc5, 0x27, 0xd1, 0xfc,
- 0x8b, 0x83, 0xb6, 0x00, 0x5f, 0xce, 0xff, 0xeb, 0x55, 0x58, 0x8b, 0xdf, 0x49, 0xac, 0x27, 0x03,
- 0x92, 0xf9, 0xd5, 0xe5, 0x95, 0x96, 0x7b, 0x7e, 0xa5, 0xe5, 0x5e, 0x5c, 0x69, 0xb9, 0x5f, 0x42,
- 0x4d, 0xb9, 0x0c, 0x35, 0xe5, 0x79, 0xa8, 0x29, 0x2f, 0x42, 0x4d, 0xf9, 0x27, 0xd4, 0x94, 0xdf,
- 0xfe, 0xd5, 0x72, 0x4f, 0xcb, 0xd7, 0xfd, 0xd9, 0xfc, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x6c,
- 0x5a, 0xc4, 0x8f, 0x0a, 0x00, 0x00,
-}
+func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} }
func (m *ClusterTrustBundle) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -441,261 +158,6 @@ func (m *ClusterTrustBundleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error)
return len(dAtA) - i, nil
}
-func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.ProofOfPossession != nil {
- i -= len(m.ProofOfPossession)
- copy(dAtA[i:], m.ProofOfPossession)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession)))
- i--
- dAtA[i] = 0x52
- }
- if m.PKIXPublicKey != nil {
- i -= len(m.PKIXPublicKey)
- copy(dAtA[i:], m.PKIXPublicKey)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey)))
- i--
- dAtA[i] = 0x4a
- }
- if m.MaxExpirationSeconds != nil {
- i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
- i--
- dAtA[i] = 0x40
- }
- i -= len(m.NodeUID)
- copy(dAtA[i:], m.NodeUID)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID)))
- i--
- dAtA[i] = 0x3a
- i -= len(m.NodeName)
- copy(dAtA[i:], m.NodeName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
- i--
- dAtA[i] = 0x32
- i -= len(m.ServiceAccountUID)
- copy(dAtA[i:], m.ServiceAccountUID)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID)))
- i--
- dAtA[i] = 0x2a
- i -= len(m.ServiceAccountName)
- copy(dAtA[i:], m.ServiceAccountName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
- i--
- dAtA[i] = 0x22
- i -= len(m.PodUID)
- copy(dAtA[i:], m.PodUID)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.PodName)
- copy(dAtA[i:], m.PodName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName)))
- i--
- dAtA[i] = 0x12
- i -= len(m.SignerName)
- copy(dAtA[i:], m.SignerName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.NotAfter != nil {
- {
- size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.BeginRefreshAt != nil {
- {
- size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.NotBefore != nil {
- {
- size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- i -= len(m.CertificateChain)
- copy(dAtA[i:], m.CertificateChain)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain)))
- i--
- dAtA[i] = 0x12
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
@@ -720,1140 +182,122 @@ func (m *ClusterTrustBundle) Size() (n int) {
return n
}
-func (m *ClusterTrustBundleList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *ClusterTrustBundleSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.SignerName)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.TrustBundle)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *PodCertificateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *PodCertificateRequestList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *PodCertificateRequestSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.SignerName)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.PodName)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.PodUID)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ServiceAccountName)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ServiceAccountUID)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.NodeName)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.NodeUID)
- n += 1 + l + sovGenerated(uint64(l))
- if m.MaxExpirationSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
- }
- if m.PKIXPublicKey != nil {
- l = len(m.PKIXPublicKey)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ProofOfPossession != nil {
- l = len(m.ProofOfPossession)
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func (m *PodCertificateRequestStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = len(m.CertificateChain)
- n += 1 + l + sovGenerated(uint64(l))
- if m.NotBefore != nil {
- l = m.NotBefore.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.BeginRefreshAt != nil {
- l = m.BeginRefreshAt.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.NotAfter != nil {
- l = m.NotAfter.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- return n
-}
-
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *ClusterTrustBundle) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ClusterTrustBundle{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ClusterTrustBundleList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]ClusterTrustBundle{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&ClusterTrustBundleList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *ClusterTrustBundleSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&ClusterTrustBundleSpec{`,
- `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
- `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodCertificateRequest) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PodCertificateRequest{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodCertificateRequestList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]PodCertificateRequest{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&PodCertificateRequestList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodCertificateRequestSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&PodCertificateRequestSpec{`,
- `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
- `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`,
- `PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`,
- `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
- `ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`,
- `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
- `NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`,
- `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
- `PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`,
- `ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *PodCertificateRequestStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConditions := "[]Condition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&PodCertificateRequestStatus{`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`,
- `NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`,
- `BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`,
- `NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
-}
-func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, ClusterTrustBundle{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SignerName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TrustBundle = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, PodCertificateRequest{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SignerName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PodName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
+func (m *ClusterTrustBundleList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterTrustBundleSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.TrustBundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ClusterTrustBundle) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterTrustBundle{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterTrustBundleSpec", "ClusterTrustBundleSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterTrustBundleList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]ClusterTrustBundle{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterTrustBundle", "ClusterTrustBundle", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&ClusterTrustBundleList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ClusterTrustBundleSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ClusterTrustBundleSpec{`,
+ `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+ `TrustBundle:` + fmt.Sprintf("%v", this.TrustBundle) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
}
- if postIndex > l {
+ if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
- m.MaxExpirationSeconds = &v
- case 9:
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var byteLen int
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1863,31 +307,30 @@ func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...)
- if m.PKIXPublicKey == nil {
- m.PKIXPublicKey = []byte{}
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
iNdEx = postIndex
- case 10:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
- var byteLen int
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1897,24 +340,23 @@ func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...)
- if m.ProofOfPossession == nil {
- m.ProofOfPossession = []byte{}
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
iNdEx = postIndex
default:
@@ -1938,7 +380,7 @@ func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
+func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1961,15 +403,15 @@ func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group")
+ return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1996,16 +438,15 @@ func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Conditions = append(m.Conditions, v1.Condition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2015,65 +456,81 @@ func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.CertificateChain = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ m.Items = append(m.Items, ClusterTrustBundle{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- if msglen < 0 {
- return ErrInvalidLengthGenerated
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
}
- postIndex := iNdEx + msglen
- if postIndex < 0 {
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
- if postIndex > l {
+ if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- if m.NotBefore == nil {
- m.NotBefore = &v1.Time{}
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
}
- if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
- iNdEx = postIndex
- case 5:
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2083,33 +540,29 @@ func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.BeginRefreshAt == nil {
- m.BeginRefreshAt = &v1.Time{}
- }
- if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.SignerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 6:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2119,27 +572,23 @@ func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.NotAfter == nil {
- m.NotAfter = &v1.Time{}
- }
- if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.TrustBundle = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.proto b/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
index 194bdbc1..7155f778 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.proto
@@ -101,208 +101,3 @@ message ClusterTrustBundleSpec {
optional string trustBundle = 2;
}
-// PodCertificateRequest encodes a pod requesting a certificate from a given
-// signer.
-//
-// Kubelets use this API to implement podCertificate projected volumes
-message PodCertificateRequest {
- // metadata contains the object metadata.
- //
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
- // spec contains the details about the certificate being requested.
- optional PodCertificateRequestSpec spec = 2;
-
- // status contains the issued certificate, and a standard set of conditions.
- // +optional
- optional PodCertificateRequestStatus status = 3;
-}
-
-// PodCertificateRequestList is a collection of PodCertificateRequest objects
-message PodCertificateRequestList {
- // metadata contains the list metadata.
- //
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
-
- // items is a collection of PodCertificateRequest objects
- repeated PodCertificateRequest items = 2;
-}
-
-// PodCertificateRequestSpec describes the certificate request. All fields are
-// immutable after creation.
-message PodCertificateRequestSpec {
- // signerName indicates the requested signer.
- //
- // All signer names beginning with `kubernetes.io` are reserved for use by
- // the Kubernetes project. There is currently one well-known signer
- // documented by the Kubernetes project,
- // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
- // certificates understood by kube-apiserver. It is currently
- // unimplemented.
- //
- // +required
- optional string signerName = 1;
-
- // podName is the name of the pod into which the certificate will be mounted.
- //
- // +required
- optional string podName = 2;
-
- // podUID is the UID of the pod into which the certificate will be mounted.
- //
- // +required
- optional string podUID = 3;
-
- // serviceAccountName is the name of the service account the pod is running as.
- //
- // +required
- optional string serviceAccountName = 4;
-
- // serviceAccountUID is the UID of the service account the pod is running as.
- //
- // +required
- optional string serviceAccountUID = 5;
-
- // nodeName is the name of the node the pod is assigned to.
- //
- // +required
- optional string nodeName = 6;
-
- // nodeUID is the UID of the node the pod is assigned to.
- //
- // +required
- optional string nodeUID = 7;
-
- // maxExpirationSeconds is the maximum lifetime permitted for the
- // certificate.
- //
- // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
- // will reject values shorter than 3600 (1 hour). The maximum allowable
- // value is 7862400 (91 days).
- //
- // The signer implementation is then free to issue a certificate with any
- // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
- // seconds (1 hour). This constraint is enforced by kube-apiserver.
- // `kubernetes.io` signers will never issue certificates with a lifetime
- // longer than 24 hours.
- //
- // +optional
- // +default=86400
- optional int32 maxExpirationSeconds = 8;
-
- // pkixPublicKey is the PKIX-serialized public key the signer will issue the
- // certificate to.
- //
- // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
- // or ED25519. Note that this list may be expanded in the future.
- //
- // Signer implementations do not need to support all key types supported by
- // kube-apiserver and kubelet. If a signer does not support the key type
- // used for a given PodCertificateRequest, it must deny the request by
- // setting a status.conditions entry with a type of "Denied" and a reason of
- // "UnsupportedKeyType". It may also suggest a key type that it does support
- // in the message field.
- //
- // +required
- optional bytes pkixPublicKey = 9;
-
- // proofOfPossession proves that the requesting kubelet holds the private
- // key corresponding to pkixPublicKey.
- //
- // It is contructed by signing the ASCII bytes of the pod's UID using
- // `pkixPublicKey`.
- //
- // kube-apiserver validates the proof of possession during creation of the
- // PodCertificateRequest.
- //
- // If the key is an RSA key, then the signature is over the ASCII bytes of
- // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
- // function crypto/rsa.SignPSS with nil options).
- //
- // If the key is an ECDSA key, then the signature is as described by [SEC 1,
- // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
- // golang library function crypto/ecdsa.SignASN1)
- //
- // If the key is an ED25519 key, the the signature is as described by the
- // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
- // the golang library crypto/ed25519.Sign).
- //
- // +required
- optional bytes proofOfPossession = 10;
-}
-
-// PodCertificateRequestStatus describes the status of the request, and holds
-// the certificate data if the request is issued.
-message PodCertificateRequestStatus {
- // conditions applied to the request.
- //
- // The types "Issued", "Denied", and "Failed" have special handling. At
- // most one of these conditions may be present, and they must have status
- // "True".
- //
- // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
- // suggest a key type that will work in the message field.
- //
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- // +optional
- repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
-
- // certificateChain is populated with an issued certificate by the signer.
- // This field is set via the /status subresource. Once populated, this field
- // is immutable.
- //
- // If the certificate signing request is denied, a condition of type
- // "Denied" is added and this field remains empty. If the signer cannot
- // issue the certificate, a condition of type "Failed" is added and this
- // field remains empty.
- //
- // Validation requirements:
- // 1. certificateChain must consist of one or more PEM-formatted certificates.
- // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
- // described in section 4 of RFC5280.
- //
- // If more than one block is present, and the definition of the requested
- // spec.signerName does not indicate otherwise, the first block is the
- // issued certificate, and subsequent blocks should be treated as
- // intermediate certificates and presented in TLS handshakes. When
- // projecting the chain into a pod volume, kubelet will drop any data
- // in-between the PEM blocks, as well as any PEM block headers.
- //
- // +optional
- optional string certificateChain = 2;
-
- // notBefore is the time at which the certificate becomes valid. The value
- // must be the same as the notBefore value in the leaf certificate in
- // certificateChain. This field is set via the /status subresource. Once
- // populated, it is immutable. The signer must set this field at the same
- // time it sets certificateChain.
- //
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4;
-
- // beginRefreshAt is the time at which the kubelet should begin trying to
- // refresh the certificate. This field is set via the /status subresource,
- // and must be set at the same time as certificateChain. Once populated,
- // this field is immutable.
- //
- // This field is only a hint. Kubelet may start refreshing before or after
- // this time if necessary.
- //
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5;
-
- // notAfter is the time at which the certificate expires. The value must be
- // the same as the notAfter value in the leaf certificate in
- // certificateChain. This field is set via the /status subresource. Once
- // populated, it is immutable. The signer must set this field at the same
- // time it sets certificateChain.
- //
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6;
-}
-
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..81dd80e2
--- /dev/null
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*ClusterTrustBundle) ProtoMessage() {}
+
+func (*ClusterTrustBundleList) ProtoMessage() {}
+
+func (*ClusterTrustBundleSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/register.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/register.go
index ae541e15..7288ed9a 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/register.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/register.go
@@ -53,8 +53,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ClusterTrustBundle{},
&ClusterTrustBundleList{},
- &PodCertificateRequest{},
- &PodCertificateRequestList{},
)
// Add the watch version that applies
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/types.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/types.go
index a5cb3809..beef0259 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/types.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/types.go
@@ -18,7 +18,6 @@ package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
)
// +genclient
@@ -107,233 +106,3 @@ type ClusterTrustBundleList struct {
// items is a collection of ClusterTrustBundle objects
Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
}
-
-// +genclient
-// +k8s:prerelease-lifecycle-gen:introduced=1.34
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// PodCertificateRequest encodes a pod requesting a certificate from a given
-// signer.
-//
-// Kubelets use this API to implement podCertificate projected volumes
-type PodCertificateRequest struct {
- metav1.TypeMeta `json:",inline"`
-
- // metadata contains the object metadata.
- //
- // +optional
- metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-
- // spec contains the details about the certificate being requested.
- Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
-
- // status contains the issued certificate, and a standard set of conditions.
- // +optional
- Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
-}
-
-// PodCertificateRequestSpec describes the certificate request. All fields are
-// immutable after creation.
-type PodCertificateRequestSpec struct {
- // signerName indicates the requested signer.
- //
- // All signer names beginning with `kubernetes.io` are reserved for use by
- // the Kubernetes project. There is currently one well-known signer
- // documented by the Kubernetes project,
- // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
- // certificates understood by kube-apiserver. It is currently
- // unimplemented.
- //
- // +required
- SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
-
- // podName is the name of the pod into which the certificate will be mounted.
- //
- // +required
- PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
- // podUID is the UID of the pod into which the certificate will be mounted.
- //
- // +required
- PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
-
- // serviceAccountName is the name of the service account the pod is running as.
- //
- // +required
- ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
- // serviceAccountUID is the UID of the service account the pod is running as.
- //
- // +required
- ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
-
- // nodeName is the name of the node the pod is assigned to.
- //
- // +required
- NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
- // nodeUID is the UID of the node the pod is assigned to.
- //
- // +required
- NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
-
- // maxExpirationSeconds is the maximum lifetime permitted for the
- // certificate.
- //
- // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
- // will reject values shorter than 3600 (1 hour). The maximum allowable
- // value is 7862400 (91 days).
- //
- // The signer implementation is then free to issue a certificate with any
- // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
- // seconds (1 hour). This constraint is enforced by kube-apiserver.
- // `kubernetes.io` signers will never issue certificates with a lifetime
- // longer than 24 hours.
- //
- // +optional
- // +default=86400
- MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
-
- // pkixPublicKey is the PKIX-serialized public key the signer will issue the
- // certificate to.
- //
- // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
- // or ED25519. Note that this list may be expanded in the future.
- //
- // Signer implementations do not need to support all key types supported by
- // kube-apiserver and kubelet. If a signer does not support the key type
- // used for a given PodCertificateRequest, it must deny the request by
- // setting a status.conditions entry with a type of "Denied" and a reason of
- // "UnsupportedKeyType". It may also suggest a key type that it does support
- // in the message field.
- //
- // +required
- PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
-
- // proofOfPossession proves that the requesting kubelet holds the private
- // key corresponding to pkixPublicKey.
- //
- // It is contructed by signing the ASCII bytes of the pod's UID using
- // `pkixPublicKey`.
- //
- // kube-apiserver validates the proof of possession during creation of the
- // PodCertificateRequest.
- //
- // If the key is an RSA key, then the signature is over the ASCII bytes of
- // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
- // function crypto/rsa.SignPSS with nil options).
- //
- // If the key is an ECDSA key, then the signature is as described by [SEC 1,
- // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
- // golang library function crypto/ecdsa.SignASN1)
- //
- // If the key is an ED25519 key, the the signature is as described by the
- // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
- // the golang library crypto/ed25519.Sign).
- //
- // +required
- ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
-}
-
-// PodCertificateRequestStatus describes the status of the request, and holds
-// the certificate data if the request is issued.
-type PodCertificateRequestStatus struct {
- // conditions applied to the request.
- //
- // The types "Issued", "Denied", and "Failed" have special handling. At
- // most one of these conditions may be present, and they must have status
- // "True".
- //
- // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
- // suggest a key type that will work in the message field.
- //
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- // +optional
- Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
-
- // certificateChain is populated with an issued certificate by the signer.
- // This field is set via the /status subresource. Once populated, this field
- // is immutable.
- //
- // If the certificate signing request is denied, a condition of type
- // "Denied" is added and this field remains empty. If the signer cannot
- // issue the certificate, a condition of type "Failed" is added and this
- // field remains empty.
- //
- // Validation requirements:
- // 1. certificateChain must consist of one or more PEM-formatted certificates.
- // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
- // described in section 4 of RFC5280.
- //
- // If more than one block is present, and the definition of the requested
- // spec.signerName does not indicate otherwise, the first block is the
- // issued certificate, and subsequent blocks should be treated as
- // intermediate certificates and presented in TLS handshakes. When
- // projecting the chain into a pod volume, kubelet will drop any data
- // in-between the PEM blocks, as well as any PEM block headers.
- //
- // +optional
- CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
-
- // notBefore is the time at which the certificate becomes valid. The value
- // must be the same as the notBefore value in the leaf certificate in
- // certificateChain. This field is set via the /status subresource. Once
- // populated, it is immutable. The signer must set this field at the same
- // time it sets certificateChain.
- //
- // +optional
- NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
-
- // beginRefreshAt is the time at which the kubelet should begin trying to
- // refresh the certificate. This field is set via the /status subresource,
- // and must be set at the same time as certificateChain. Once populated,
- // this field is immutable.
- //
- // This field is only a hint. Kubelet may start refreshing before or after
- // this time if necessary.
- //
- // +optional
- BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
-
- // notAfter is the time at which the certificate expires. The value must be
- // the same as the notAfter value in the leaf certificate in
- // certificateChain. This field is set via the /status subresource. Once
- // populated, it is immutable. The signer must set this field at the same
- // time it sets certificateChain.
- //
- // +optional
- NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
-}
-
-// Well-known condition types for PodCertificateRequests
-const (
- // Denied indicates the request was denied by the signer.
- PodCertificateRequestConditionTypeDenied string = "Denied"
- // Failed indicates the signer failed to issue the certificate.
- PodCertificateRequestConditionTypeFailed string = "Failed"
- // Issued indicates the certificate has been issued.
- PodCertificateRequestConditionTypeIssued string = "Issued"
-)
-
-// Well-known condition reasons for PodCertificateRequests
-const (
- // UnsupportedKeyType should be set on "Denied" conditions when the signer
- // doesn't support the key type of publicKey.
- PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
-)
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.34
-
-// PodCertificateRequestList is a collection of PodCertificateRequest objects
-type PodCertificateRequestList struct {
- metav1.TypeMeta `json:",inline"`
-
- // metadata contains the list metadata.
- //
- // +optional
- metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-
- // items is a collection of PodCertificateRequest objects
- Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
-}
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
index d29f2d85..bff649e3 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/types_swagger_doc_generated.go
@@ -57,56 +57,4 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
return map_ClusterTrustBundleSpec
}
-var map_PodCertificateRequest = map[string]string{
- "": "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes",
- "metadata": "metadata contains the object metadata.",
- "spec": "spec contains the details about the certificate being requested.",
- "status": "status contains the issued certificate, and a standard set of conditions.",
-}
-
-func (PodCertificateRequest) SwaggerDoc() map[string]string {
- return map_PodCertificateRequest
-}
-
-var map_PodCertificateRequestList = map[string]string{
- "": "PodCertificateRequestList is a collection of PodCertificateRequest objects",
- "metadata": "metadata contains the list metadata.",
- "items": "items is a collection of PodCertificateRequest objects",
-}
-
-func (PodCertificateRequestList) SwaggerDoc() map[string]string {
- return map_PodCertificateRequestList
-}
-
-var map_PodCertificateRequestSpec = map[string]string{
- "": "PodCertificateRequestSpec describes the certificate request. All fields are immutable after creation.",
- "signerName": "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented.",
- "podName": "podName is the name of the pod into which the certificate will be mounted.",
- "podUID": "podUID is the UID of the pod into which the certificate will be mounted.",
- "serviceAccountName": "serviceAccountName is the name of the service account the pod is running as.",
- "serviceAccountUID": "serviceAccountUID is the UID of the service account the pod is running as.",
- "nodeName": "nodeName is the name of the node the pod is assigned to.",
- "nodeUID": "nodeUID is the UID of the node the pod is assigned to.",
- "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
- "pkixPublicKey": "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.",
- "proofOfPossession": "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).",
-}
-
-func (PodCertificateRequestSpec) SwaggerDoc() map[string]string {
- return map_PodCertificateRequestSpec
-}
-
-var map_PodCertificateRequestStatus = map[string]string{
- "": "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.",
- "conditions": "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling. At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.",
- "certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.",
- "notBefore": "notBefore is the time at which the certificate becomes valid. The value must be the same as the notBefore value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
- "beginRefreshAt": "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate. This field is set via the /status subresource, and must be set at the same time as certificateChain. Once populated, this field is immutable.\n\nThis field is only a hint. Kubelet may start refreshing before or after this time if necessary.",
- "notAfter": "notAfter is the time at which the certificate expires. The value must be the same as the notAfter value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
-}
-
-func (PodCertificateRequestStatus) SwaggerDoc() map[string]string {
- return map_PodCertificateRequestStatus
-}
-
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
index 25bc0ed6..30a4dc1e 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.deepcopy.go
@@ -22,7 +22,6 @@ limitations under the License.
package v1alpha1
import (
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -101,130 +100,3 @@ func (in *ClusterTrustBundleSpec) DeepCopy() *ClusterTrustBundleSpec {
in.DeepCopyInto(out)
return out
}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- in.Spec.DeepCopyInto(&out.Spec)
- in.Status.DeepCopyInto(&out.Status)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
-func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
- if in == nil {
- return nil
- }
- out := new(PodCertificateRequest)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ListMeta.DeepCopyInto(&out.ListMeta)
- if in.Items != nil {
- in, out := &in.Items, &out.Items
- *out = make([]PodCertificateRequest, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
-func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
- if in == nil {
- return nil
- }
- out := new(PodCertificateRequestList)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
- *out = *in
- if in.MaxExpirationSeconds != nil {
- in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
- *out = new(int32)
- **out = **in
- }
- if in.PKIXPublicKey != nil {
- in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- if in.ProofOfPossession != nil {
- in, out := &in.ProofOfPossession, &out.ProofOfPossession
- *out = make([]byte, len(*in))
- copy(*out, *in)
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
-func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
- if in == nil {
- return nil
- }
- out := new(PodCertificateRequestSpec)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
- *out = *in
- if in.Conditions != nil {
- in, out := &in.Conditions, &out.Conditions
- *out = make([]v1.Condition, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- if in.NotBefore != nil {
- in, out := &in.NotBefore, &out.NotBefore
- *out = (*in).DeepCopy()
- }
- if in.BeginRefreshAt != nil {
- in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
- *out = (*in).DeepCopy()
- }
- if in.NotAfter != nil {
- in, out := &in.NotAfter, &out.NotAfter
- *out = (*in).DeepCopy()
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
-func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
- if in == nil {
- return nil
- }
- out := new(PodCertificateRequestStatus)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..cec3b649
--- /dev/null
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundle) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundleList) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1alpha1.ClusterTrustBundleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec"
+}
diff --git a/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go b/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
index edbfce79..3121a87d 100644
--- a/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ b/operator/vendor/k8s.io/api/certificates/v1alpha1/zz_generated.prerelease-lifecycle.go
@@ -56,39 +56,3 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
return 1, 37
}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) {
- return 1, 34
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) {
- return 1, 37
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) {
- return 1, 40
-}
-
-// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
-func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) {
- return 1, 34
-}
-
-// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
-func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) {
- return 1, 37
-}
-
-// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
-// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
-func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) {
- return 1, 40
-}
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/doc.go b/operator/vendor/k8s.io/api/certificates/v1beta1/doc.go
index 81608a55..dddb7cb9 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.certificates.v1beta1
// +groupName=certificates.k8s.io
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
index 199a5449..5a8b8f2d 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
@@ -23,363 +23,43 @@ import (
fmt "fmt"
io "io"
-
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+ "sort"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} }
-func (*CertificateSigningRequest) ProtoMessage() {}
-func (*CertificateSigningRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{0}
-}
-func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequest.Merge(m, src)
-}
-func (m *CertificateSigningRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo
-
-func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} }
-func (*CertificateSigningRequestCondition) ProtoMessage() {}
-func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{1}
-}
-func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src)
-}
-func (m *CertificateSigningRequestCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m)
-}
-var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
-func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} }
-func (*CertificateSigningRequestList) ProtoMessage() {}
-func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{2}
-}
-func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestList.Merge(m, src)
-}
-func (m *CertificateSigningRequestList) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestList) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m)
-}
+func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} }
-var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo
+func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} }
-func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} }
-func (*CertificateSigningRequestSpec) ProtoMessage() {}
-func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{3}
-}
-func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src)
-}
-func (m *CertificateSigningRequestSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m)
-}
+func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} }
-var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo
+func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} }
-func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} }
-func (*CertificateSigningRequestStatus) ProtoMessage() {}
-func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{4}
-}
-func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src)
-}
-func (m *CertificateSigningRequestStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m)
-}
+func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} }
-var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo
+func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} }
-func (m *ClusterTrustBundle) Reset() { *m = ClusterTrustBundle{} }
-func (*ClusterTrustBundle) ProtoMessage() {}
-func (*ClusterTrustBundle) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{5}
-}
-func (m *ClusterTrustBundle) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundle) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundle.Merge(m, src)
-}
-func (m *ClusterTrustBundle) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundle) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundle.DiscardUnknown(m)
-}
+func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} }
-var xxx_messageInfo_ClusterTrustBundle proto.InternalMessageInfo
+func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} }
-func (m *ClusterTrustBundleList) Reset() { *m = ClusterTrustBundleList{} }
-func (*ClusterTrustBundleList) ProtoMessage() {}
-func (*ClusterTrustBundleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{6}
-}
-func (m *ClusterTrustBundleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundleList.Merge(m, src)
-}
-func (m *ClusterTrustBundleList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundleList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundleList.DiscardUnknown(m)
-}
+func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-var xxx_messageInfo_ClusterTrustBundleList proto.InternalMessageInfo
+func (m *PodCertificateRequest) Reset() { *m = PodCertificateRequest{} }
-func (m *ClusterTrustBundleSpec) Reset() { *m = ClusterTrustBundleSpec{} }
-func (*ClusterTrustBundleSpec) ProtoMessage() {}
-func (*ClusterTrustBundleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{7}
-}
-func (m *ClusterTrustBundleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundleSpec.Merge(m, src)
-}
-func (m *ClusterTrustBundleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundleSpec.DiscardUnknown(m)
-}
+func (m *PodCertificateRequestList) Reset() { *m = PodCertificateRequestList{} }
-var xxx_messageInfo_ClusterTrustBundleSpec proto.InternalMessageInfo
+func (m *PodCertificateRequestSpec) Reset() { *m = PodCertificateRequestSpec{} }
-func (m *ExtraValue) Reset() { *m = ExtraValue{} }
-func (*ExtraValue) ProtoMessage() {}
-func (*ExtraValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_6529c11a462c48a5, []int{8}
-}
-func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExtraValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtraValue.Merge(m, src)
-}
-func (m *ExtraValue) XXX_Size() int {
- return m.Size()
-}
-func (m *ExtraValue) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtraValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest")
- proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition")
- proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList")
- proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec")
- proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry")
- proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus")
- proto.RegisterType((*ClusterTrustBundle)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundle")
- proto.RegisterType((*ClusterTrustBundleList)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleList")
- proto.RegisterType((*ClusterTrustBundleSpec)(nil), "k8s.io.api.certificates.v1beta1.ClusterTrustBundleSpec")
- proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_6529c11a462c48a5)
-}
-
-var fileDescriptor_6529c11a462c48a5 = []byte{
- // 991 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44,
- 0x14, 0x8f, 0x9b, 0x3f, 0x4d, 0x26, 0xa5, 0xbb, 0x3b, 0x40, 0x65, 0x22, 0x6d, 0x1c, 0x59, 0x80,
- 0xca, 0x3f, 0x9b, 0x96, 0x85, 0xad, 0x7a, 0x40, 0xe0, 0x50, 0xa1, 0x8a, 0x2e, 0x48, 0xd3, 0x16,
- 0x01, 0x42, 0x62, 0xa7, 0xce, 0x5b, 0xd7, 0xdb, 0xc6, 0x36, 0x9e, 0x71, 0xd8, 0xdc, 0x56, 0xe2,
- 0x0b, 0x70, 0xe4, 0xc8, 0x77, 0xe0, 0x4b, 0x94, 0x03, 0x52, 0xb9, 0xed, 0x01, 0x45, 0x34, 0xfb,
- 0x2d, 0x7a, 0x42, 0x33, 0x9e, 0x38, 0x4e, 0xd2, 0x90, 0xa5, 0x2b, 0xed, 0x2d, 0xf3, 0xe6, 0xfd,
- 0x7e, 0xbf, 0xf7, 0x9e, 0xdf, 0x7b, 0x13, 0x64, 0x9f, 0x6c, 0x31, 0xcb, 0x0f, 0x6d, 0x1a, 0xf9,
- 0xb6, 0x0b, 0x31, 0xf7, 0x1f, 0xf8, 0x2e, 0xe5, 0xc0, 0xec, 0xde, 0xc6, 0x11, 0x70, 0xba, 0x61,
- 0x7b, 0x10, 0x40, 0x4c, 0x39, 0x74, 0xac, 0x28, 0x0e, 0x79, 0x88, 0x8d, 0x14, 0x60, 0xd1, 0xc8,
- 0xb7, 0xf2, 0x00, 0x4b, 0x01, 0x1a, 0xef, 0x79, 0x3e, 0x3f, 0x4e, 0x8e, 0x2c, 0x37, 0xec, 0xda,
- 0x5e, 0xe8, 0x85, 0xb6, 0xc4, 0x1d, 0x25, 0x0f, 0xe4, 0x49, 0x1e, 0xe4, 0xaf, 0x94, 0xaf, 0x61,
- 0xe6, 0x03, 0x08, 0x63, 0xb0, 0x7b, 0x33, 0x9a, 0x8d, 0x3b, 0x63, 0x9f, 0x2e, 0x75, 0x8f, 0xfd,
- 0x00, 0xe2, 0xbe, 0x1d, 0x9d, 0x78, 0xc2, 0xc0, 0xec, 0x2e, 0x70, 0x7a, 0x15, 0xca, 0x9e, 0x87,
- 0x8a, 0x93, 0x80, 0xfb, 0x5d, 0x98, 0x01, 0x7c, 0xb4, 0x08, 0xc0, 0xdc, 0x63, 0xe8, 0xd2, 0x69,
- 0x9c, 0xf9, 0xc7, 0x12, 0x7a, 0xad, 0x3d, 0x2e, 0xc5, 0xbe, 0xef, 0x05, 0x7e, 0xe0, 0x11, 0xf8,
- 0x31, 0x01, 0xc6, 0xf1, 0x7d, 0x54, 0x15, 0x11, 0x76, 0x28, 0xa7, 0xba, 0xd6, 0xd2, 0xd6, 0xeb,
- 0x9b, 0xef, 0x5b, 0xe3, 0x1a, 0x66, 0x42, 0x56, 0x74, 0xe2, 0x09, 0x03, 0xb3, 0x84, 0xb7, 0xd5,
- 0xdb, 0xb0, 0xbe, 0x3a, 0x7a, 0x08, 0x2e, 0xbf, 0x07, 0x9c, 0x3a, 0xf8, 0x6c, 0x60, 0x14, 0x86,
- 0x03, 0x03, 0x8d, 0x6d, 0x24, 0x63, 0xc5, 0xf7, 0x51, 0x89, 0x45, 0xe0, 0xea, 0x4b, 0x92, 0xfd,
- 0x63, 0x6b, 0xc1, 0x17, 0xb2, 0xe6, 0xc6, 0xba, 0x1f, 0x81, 0xeb, 0xac, 0x28, 0xad, 0x92, 0x38,
- 0x11, 0xc9, 0x8c, 0x8f, 0x51, 0x85, 0x71, 0xca, 0x13, 0xa6, 0x17, 0xa5, 0xc6, 0x27, 0xcf, 0xa1,
- 0x21, 0x79, 0x9c, 0x55, 0xa5, 0x52, 0x49, 0xcf, 0x44, 0xf1, 0x9b, 0x4f, 0x8b, 0xc8, 0x9c, 0x8b,
- 0x6d, 0x87, 0x41, 0xc7, 0xe7, 0x7e, 0x18, 0xe0, 0x2d, 0x54, 0xe2, 0xfd, 0x08, 0x64, 0x41, 0x6b,
- 0xce, 0xeb, 0xa3, 0x90, 0x0f, 0xfa, 0x11, 0x5c, 0x0e, 0x8c, 0x57, 0xa6, 0xfd, 0x85, 0x9d, 0x48,
- 0x04, 0xde, 0xcb, 0x52, 0xa9, 0x48, 0xec, 0x9d, 0xc9, 0x40, 0x2e, 0x07, 0xc6, 0x15, 0x1d, 0x69,
- 0x65, 0x4c, 0x93, 0xe1, 0xe2, 0x37, 0x51, 0x25, 0x06, 0xca, 0xc2, 0x40, 0x16, 0xbf, 0x36, 0x4e,
- 0x8b, 0x48, 0x2b, 0x51, 0xb7, 0xf8, 0x2d, 0xb4, 0xdc, 0x05, 0xc6, 0xa8, 0x07, 0xb2, 0x82, 0x35,
- 0xe7, 0x86, 0x72, 0x5c, 0xbe, 0x97, 0x9a, 0xc9, 0xe8, 0x1e, 0x3f, 0x44, 0xab, 0xa7, 0x94, 0xf1,
- 0xc3, 0xa8, 0x43, 0x39, 0x1c, 0xf8, 0x5d, 0xd0, 0x4b, 0xb2, 0xe6, 0x6f, 0x3f, 0x5b, 0xd7, 0x08,
- 0x84, 0xb3, 0xa6, 0xd8, 0x57, 0xf7, 0x26, 0x98, 0xc8, 0x14, 0x33, 0xee, 0x21, 0x2c, 0x2c, 0x07,
- 0x31, 0x0d, 0x58, 0x5a, 0x28, 0xa1, 0x57, 0xfe, 0xdf, 0x7a, 0x0d, 0xa5, 0x87, 0xf7, 0x66, 0xd8,
- 0xc8, 0x15, 0x0a, 0xe6, 0x40, 0x43, 0xb7, 0xe7, 0x7e, 0xe5, 0x3d, 0x9f, 0x71, 0xfc, 0xfd, 0xcc,
- 0xd4, 0x58, 0xcf, 0x16, 0x8f, 0x40, 0xcb, 0x99, 0xb9, 0xa9, 0x62, 0xaa, 0x8e, 0x2c, 0xb9, 0x89,
- 0xf9, 0x01, 0x95, 0x7d, 0x0e, 0x5d, 0xa6, 0x2f, 0xb5, 0x8a, 0xeb, 0xf5, 0xcd, 0xed, 0xeb, 0xb7,
- 0xb3, 0xf3, 0x92, 0x92, 0x29, 0xef, 0x0a, 0x42, 0x92, 0xf2, 0x9a, 0xbf, 0x97, 0xfe, 0x23, 0x41,
- 0x31, 0x58, 0xf8, 0x0d, 0xb4, 0x1c, 0xa7, 0x47, 0x99, 0xdf, 0x8a, 0x53, 0x17, 0xdd, 0xa0, 0x3c,
- 0xc8, 0xe8, 0x0e, 0x5b, 0x08, 0x31, 0xdf, 0x0b, 0x20, 0xfe, 0x92, 0x76, 0x41, 0x5f, 0x4e, 0x9b,
- 0x4c, 0x6c, 0x82, 0xfd, 0xcc, 0x4a, 0x72, 0x1e, 0xb8, 0x8d, 0x6e, 0xc1, 0xa3, 0xc8, 0x8f, 0xa9,
- 0x6c, 0x56, 0x70, 0xc3, 0xa0, 0xc3, 0xf4, 0x6a, 0x4b, 0x5b, 0x2f, 0x3b, 0xaf, 0x0e, 0x07, 0xc6,
- 0xad, 0x9d, 0xe9, 0x4b, 0x32, 0xeb, 0x8f, 0x2d, 0x54, 0x49, 0x44, 0x2f, 0x32, 0xbd, 0xdc, 0x2a,
- 0xae, 0xd7, 0x9c, 0x35, 0xd1, 0xd1, 0x87, 0xd2, 0x72, 0x39, 0x30, 0xaa, 0x5f, 0x40, 0x5f, 0x1e,
- 0x88, 0xf2, 0xc2, 0xef, 0xa2, 0x6a, 0xc2, 0x20, 0x0e, 0x44, 0x88, 0xe9, 0x1c, 0x64, 0xc5, 0x3f,
- 0x54, 0x76, 0x92, 0x79, 0xe0, 0xdb, 0xa8, 0x98, 0xf8, 0x1d, 0x35, 0x07, 0x75, 0xe5, 0x58, 0x3c,
- 0xdc, 0xfd, 0x8c, 0x08, 0x3b, 0x36, 0x51, 0xc5, 0x8b, 0xc3, 0x24, 0x62, 0x7a, 0x49, 0x8a, 0x23,
- 0x21, 0xfe, 0xb9, 0xb4, 0x10, 0x75, 0x83, 0x03, 0x54, 0x86, 0x47, 0x3c, 0xa6, 0x7a, 0x45, 0x7e,
- 0xbf, 0xdd, 0xe7, 0x5b, 0x79, 0xd6, 0x8e, 0xe0, 0xda, 0x09, 0x78, 0xdc, 0x1f, 0x7f, 0x4e, 0x69,
- 0x23, 0xa9, 0x4c, 0x03, 0x10, 0x1a, 0xfb, 0xe0, 0x9b, 0xa8, 0x78, 0x02, 0xfd, 0x74, 0xf7, 0x10,
- 0xf1, 0x13, 0x7f, 0x8a, 0xca, 0x3d, 0x7a, 0x9a, 0x80, 0x5a, 0xc1, 0xef, 0x2c, 0x8c, 0x47, 0xb2,
- 0x7d, 0x2d, 0x20, 0x24, 0x45, 0x6e, 0x2f, 0x6d, 0x69, 0xe6, 0x9f, 0x1a, 0x32, 0x16, 0x2c, 0x4e,
- 0xfc, 0x13, 0x42, 0xee, 0x68, 0x19, 0x31, 0x5d, 0x93, 0xf9, 0xb7, 0xaf, 0x9f, 0x7f, 0xb6, 0xd8,
- 0xc6, 0x6f, 0x4c, 0x66, 0x62, 0x24, 0x27, 0x85, 0x37, 0x50, 0x3d, 0x47, 0x2d, 0x33, 0x5d, 0x71,
- 0x6e, 0x0c, 0x07, 0x46, 0x3d, 0x47, 0x4e, 0xf2, 0x3e, 0xe6, 0x5f, 0x1a, 0xc2, 0xed, 0xd3, 0x84,
- 0x71, 0x88, 0x0f, 0xe2, 0x84, 0x71, 0x27, 0x09, 0x3a, 0xa7, 0xf0, 0x02, 0x5e, 0xc4, 0x6f, 0x27,
- 0x5e, 0xc4, 0xbb, 0x8b, 0xcb, 0x33, 0x13, 0xe4, 0xbc, 0xa7, 0xd0, 0x3c, 0xd7, 0xd0, 0xda, 0xac,
- 0xfb, 0x0b, 0xd8, 0x59, 0xdf, 0x4c, 0xee, 0xac, 0x0f, 0xae, 0x91, 0xd4, 0x9c, 0x65, 0xf5, 0xf3,
- 0x95, 0x29, 0xc9, 0x2d, 0xb5, 0x39, 0xb1, 0x7e, 0xd2, 0xd7, 0x36, 0x2b, 0xfd, 0x9c, 0x15, 0xf4,
- 0x21, 0xaa, 0xf3, 0x31, 0x8d, 0x5a, 0x08, 0x2f, 0x2b, 0x50, 0x3d, 0xa7, 0x40, 0xf2, 0x7e, 0xe6,
- 0x5d, 0x35, 0x63, 0x72, 0x2a, 0xb0, 0x31, 0xca, 0x56, 0x93, 0x4b, 0xa0, 0x36, 0x1d, 0xf4, 0x76,
- 0xf5, 0xd7, 0xdf, 0x8c, 0xc2, 0xe3, 0xbf, 0x5b, 0x05, 0x67, 0xe7, 0xec, 0xa2, 0x59, 0x38, 0xbf,
- 0x68, 0x16, 0x9e, 0x5c, 0x34, 0x0b, 0x8f, 0x87, 0x4d, 0xed, 0x6c, 0xd8, 0xd4, 0xce, 0x87, 0x4d,
- 0xed, 0xc9, 0xb0, 0xa9, 0xfd, 0x33, 0x6c, 0x6a, 0xbf, 0x3c, 0x6d, 0x16, 0xbe, 0x33, 0x16, 0xfc,
- 0xd1, 0xfd, 0x37, 0x00, 0x00, 0xff, 0xff, 0x17, 0xbe, 0xe3, 0x02, 0x0a, 0x0b, 0x00, 0x00,
-}
+func (m *PodCertificateRequestStatus) Reset() { *m = PodCertificateRequestStatus{} }
func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -581,7 +261,7 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int,
for k := range m.Extra {
keysForExtra = append(keysForExtra, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
v := m.Extra[string(keysForExtra[iNdEx])]
baseI := i
@@ -842,151 +522,336 @@ func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *CertificateSigningRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *CertificateSigningRequestCondition) Size() (n int) {
- if m == nil {
- return 0
+func (m *PodCertificateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- l = len(m.Type)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Reason)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Message)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.LastUpdateTime.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.LastTransitionTime.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Status)
- n += 1 + l + sovGenerated(uint64(l))
- return n
+ return dAtA[:n], nil
}
-func (m *CertificateSigningRequestList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
+func (m *PodCertificateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *CertificateSigningRequestSpec) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *PodCertificateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if m.Request != nil {
- l = len(m.Request)
- n += 1 + l + sovGenerated(uint64(l))
- }
- l = len(m.Username)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.UID)
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Groups) > 0 {
- for _, s := range m.Groups {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if len(m.Usages) > 0 {
- for _, s := range m.Usages {
- l = len(s)
- n += 1 + l + sovGenerated(uint64(l))
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if len(m.Extra) > 0 {
- for k, v := range m.Extra {
- _ = k
- _ = v
- l = v.Size()
- mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
- n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
}
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
}
- if m.SignerName != nil {
- l = len(*m.SignerName)
- n += 1 + l + sovGenerated(uint64(l))
- }
- if m.ExpirationSeconds != nil {
- n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
- }
- return n
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *CertificateSigningRequestStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- if m.Certificate != nil {
- l = len(m.Certificate)
- n += 1 + l + sovGenerated(uint64(l))
+func (m *PodCertificateRequestList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *ClusterTrustBundle) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
+func (m *PodCertificateRequestList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ClusterTrustBundleList) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *PodCertificateRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.UnverifiedUserAnnotations) > 0 {
+ keysForUnverifiedUserAnnotations := make([]string, 0, len(m.UnverifiedUserAnnotations))
+ for k := range m.UnverifiedUserAnnotations {
+ keysForUnverifiedUserAnnotations = append(keysForUnverifiedUserAnnotations, string(k))
+ }
+ sort.Strings(keysForUnverifiedUserAnnotations)
+ for iNdEx := len(keysForUnverifiedUserAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.UnverifiedUserAnnotations[string(keysForUnverifiedUserAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForUnverifiedUserAnnotations[iNdEx])
+ copy(dAtA[i:], keysForUnverifiedUserAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUnverifiedUserAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x5a
+ }
+ }
+ if m.ProofOfPossession != nil {
+ i -= len(m.ProofOfPossession)
+ copy(dAtA[i:], m.ProofOfPossession)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProofOfPossession)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.PKIXPublicKey != nil {
+ i -= len(m.PKIXPublicKey)
+ copy(dAtA[i:], m.PKIXPublicKey)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PKIXPublicKey)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.MaxExpirationSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxExpirationSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
+ i -= len(m.NodeUID)
+ copy(dAtA[i:], m.NodeUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeUID)))
+ i--
+ dAtA[i] = 0x3a
+ i -= len(m.NodeName)
+ copy(dAtA[i:], m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
+ i--
+ dAtA[i] = 0x32
+ i -= len(m.ServiceAccountUID)
+ copy(dAtA[i:], m.ServiceAccountUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountUID)))
+ i--
+ dAtA[i] = 0x2a
+ i -= len(m.ServiceAccountName)
+ copy(dAtA[i:], m.ServiceAccountName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.PodUID)
+ copy(dAtA[i:], m.PodUID)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodUID)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.PodName)
+ copy(dAtA[i:], m.PodName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodName)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.SignerName)
+ copy(dAtA[i:], m.SignerName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *PodCertificateRequestStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PodCertificateRequestStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCertificateRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NotAfter != nil {
+ {
+ size, err := m.NotAfter.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.BeginRefreshAt != nil {
+ {
+ size, err := m.BeginRefreshAt.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.NotBefore != nil {
+ {
+ size, err := m.NotBefore.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.CertificateChain)
+ copy(dAtA[i:], m.CertificateChain)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChain)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CertificateSigningRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CertificateSigningRequestCondition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Reason)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Message)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastUpdateTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.LastTransitionTime.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Status)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CertificateSigningRequestList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
var l int
_ = l
l = m.ListMeta.Size()
@@ -1000,43 +865,240 @@ func (m *ClusterTrustBundleList) Size() (n int) {
return n
}
-func (m *ClusterTrustBundleSpec) Size() (n int) {
+func (m *CertificateSigningRequestSpec) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = len(m.SignerName)
+ if m.Request != nil {
+ l = len(m.Request)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Username)
n += 1 + l + sovGenerated(uint64(l))
- l = len(m.TrustBundle)
+ l = len(m.UID)
n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Groups) > 0 {
+ for _, s := range m.Groups {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Usages) > 0 {
+ for _, s := range m.Usages {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Extra) > 0 {
+ for k, v := range m.Extra {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.SignerName != nil {
+ l = len(*m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
+ }
return n
}
-func (m ExtraValue) Size() (n int) {
+func (m *CertificateSigningRequestStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if len(m) > 0 {
- for _, s := range m {
- l = len(s)
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
+ if m.Certificate != nil {
+ l = len(m.Certificate)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+func (m *ClusterTrustBundle) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
}
-func (this *CertificateSigningRequest) String() string {
- if this == nil {
- return "nil"
+
+func (m *ClusterTrustBundleList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ClusterTrustBundleSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.TrustBundle)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m ExtraValue) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m) > 0 {
+ for _, s := range m {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodCertificateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodCertificateRequestList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *PodCertificateRequestSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SignerName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ServiceAccountName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.ServiceAccountUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.NodeUID)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.MaxExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.MaxExpirationSeconds))
+ }
+ if m.PKIXPublicKey != nil {
+ l = len(m.PKIXPublicKey)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.ProofOfPossession != nil {
+ l = len(m.ProofOfPossession)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.UnverifiedUserAnnotations) > 0 {
+ for k, v := range m.UnverifiedUserAnnotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *PodCertificateRequestStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.CertificateChain)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.NotBefore != nil {
+ l = m.NotBefore.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.BeginRefreshAt != nil {
+ l = m.BeginRefreshAt.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NotAfter != nil {
+ l = m.NotAfter.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CertificateSigningRequest) String() string {
+ if this == nil {
+ return "nil"
}
s := strings.Join([]string{`&CertificateSigningRequest{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
@@ -1085,7 +1147,7 @@ func (this *CertificateSigningRequestSpec) String() string {
for k := range this.Extra {
keysForExtra = append(keysForExtra, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+ sort.Strings(keysForExtra)
mapStringForExtra := "map[string]ExtraValue{"
for _, k := range keysForExtra {
mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
@@ -1158,6 +1220,83 @@ func (this *ClusterTrustBundleSpec) String() string {
}, "")
return s
}
+func (this *PodCertificateRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodCertificateRequest{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodCertificateRequestSpec", "PodCertificateRequestSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodCertificateRequestStatus", "PodCertificateRequestStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodCertificateRequestList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]PodCertificateRequest{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodCertificateRequest", "PodCertificateRequest", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&PodCertificateRequestList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodCertificateRequestSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForUnverifiedUserAnnotations := make([]string, 0, len(this.UnverifiedUserAnnotations))
+ for k := range this.UnverifiedUserAnnotations {
+ keysForUnverifiedUserAnnotations = append(keysForUnverifiedUserAnnotations, k)
+ }
+ sort.Strings(keysForUnverifiedUserAnnotations)
+ mapStringForUnverifiedUserAnnotations := "map[string]string{"
+ for _, k := range keysForUnverifiedUserAnnotations {
+ mapStringForUnverifiedUserAnnotations += fmt.Sprintf("%v: %v,", k, this.UnverifiedUserAnnotations[k])
+ }
+ mapStringForUnverifiedUserAnnotations += "}"
+ s := strings.Join([]string{`&PodCertificateRequestSpec{`,
+ `SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+ `PodName:` + fmt.Sprintf("%v", this.PodName) + `,`,
+ `PodUID:` + fmt.Sprintf("%v", this.PodUID) + `,`,
+ `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
+ `ServiceAccountUID:` + fmt.Sprintf("%v", this.ServiceAccountUID) + `,`,
+ `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+ `NodeUID:` + fmt.Sprintf("%v", this.NodeUID) + `,`,
+ `MaxExpirationSeconds:` + valueToStringGenerated(this.MaxExpirationSeconds) + `,`,
+ `PKIXPublicKey:` + valueToStringGenerated(this.PKIXPublicKey) + `,`,
+ `ProofOfPossession:` + valueToStringGenerated(this.ProofOfPossession) + `,`,
+ `UnverifiedUserAnnotations:` + mapStringForUnverifiedUserAnnotations + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodCertificateRequestStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&PodCertificateRequestStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `CertificateChain:` + fmt.Sprintf("%v", this.CertificateChain) + `,`,
+ `NotBefore:` + strings.Replace(fmt.Sprintf("%v", this.NotBefore), "Time", "v1.Time", 1) + `,`,
+ `BeginRefreshAt:` + strings.Replace(fmt.Sprintf("%v", this.BeginRefreshAt), "Time", "v1.Time", 1) + `,`,
+ `NotAfter:` + strings.Replace(fmt.Sprintf("%v", this.NotAfter), "Time", "v1.Time", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -1176,30 +1315,670 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error {
if shift >= 64 {
return ErrIntOverflowGenerated
}
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = RequestConditionType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Reason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CertificateSigningRequest{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Request = append(m.Request[:0], dAtA[iNdEx:postIndex]...)
+ if m.Request == nil {
+ m.Request = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Username = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.UID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1209,28 +1988,27 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Usages = append(m.Usages, KeyUsage(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
- case 2:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1257,15 +2035,111 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ if m.Extra == nil {
+ m.Extra = make(map[string]ExtraValue)
+ }
+ var mapkey string
+ mapvalue := &ExtraValue{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &ExtraValue{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
+ m.Extra[mapkey] = *mapvalue
iNdEx = postIndex
- case 3:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1275,25 +2149,45 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ s := string(dAtA[iNdEx:postIndex])
+ m.SignerName = &s
iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ExpirationSeconds = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -1315,7 +2209,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
+func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1338,17 +2232,17 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group")
+ return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1358,29 +2252,31 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Type = RequestConditionType(dAtA[iNdEx:postIndex])
+ m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
}
- var stringLen uint64
+ var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1390,29 +2286,81 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if byteLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
- if postIndex > l {
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...)
+ if m.Certificate == nil {
+ m.Certificate = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
return io.ErrUnexpectedEOF
}
- m.Reason = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1422,27 +2370,28 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Message = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 4:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1469,13 +2418,63 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 5:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1502,15 +2501,15 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 6:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1520,23 +2519,25 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
+ m.Items = append(m.Items, ClusterTrustBundle{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -1559,7 +2560,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error {
+func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1582,17 +2583,17 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group")
+ return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1602,30 +2603,29 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.SignerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1635,25 +2635,23 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, CertificateSigningRequest{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.TrustBundle = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -1676,7 +2674,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
+func (m *ExtraValue) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1699,17 +2697,17 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group")
+ return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
- var byteLen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1719,31 +2717,79 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Request = append(m.Request[:0], dAtA[iNdEx:postIndex]...)
- if m.Request == nil {
- m.Request = []byte{}
- }
+ *m = append(*m, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
- case 2:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCertificateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1753,29 +2799,30 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Username = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 3:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1785,29 +2832,30 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.UID = string(dAtA[iNdEx:postIndex])
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 4:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1817,29 +2865,80 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 5:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCertificateRequestList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequestList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequestList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1849,27 +2948,28 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Usages = append(m.Usages, KeyUsage(dAtA[iNdEx:postIndex]))
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 6:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -1896,109 +2996,96 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Extra == nil {
- m.Extra = make(map[string]ExtraValue)
+ m.Items = append(m.Items, PodCertificateRequest{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- var mapkey string
- mapvalue := &ExtraValue{}
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodCertificateRequestSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodCertificateRequestSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodCertificateRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
}
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthGenerated
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &ExtraValue{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
}
- m.Extra[mapkey] = *mapvalue
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SignerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 7:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -2026,14 +3113,13 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.SignerName = &s
+ m.PodName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodUID", wireType)
}
- var v int32
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2043,67 +3129,61 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int32(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.ExpirationSeconds = &v
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
return ErrInvalidLengthGenerated
}
- if (iNdEx + skippy) > l {
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ m.PodUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
}
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountUID", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2113,31 +3193,29 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.ServiceAccountUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
}
- var byteLen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2147,81 +3225,115 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...)
- if m.Certificate == nil {
- m.Certificate = []byte{}
- }
+ m.NodeName = k8s_io_apimachinery_pkg_types.NodeName(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeUID", wireType)
}
- if (skippy < 0) || (iNdEx+skippy) < 0 {
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- if (iNdEx + skippy) > l {
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
+ m.NodeUID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
}
- if iNdEx >= l {
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxExpirationSeconds = &v
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PKIXPublicKey", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
return io.ErrUnexpectedEOF
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ m.PKIXPublicKey = append(m.PKIXPublicKey[:0], dAtA[iNdEx:postIndex]...)
+ if m.PKIXPublicKey == nil {
+ m.PKIXPublicKey = []byte{}
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterTrustBundle: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterTrustBundle: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ iNdEx = postIndex
+ case 10:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ProofOfPossession", wireType)
}
- var msglen int
+ var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2231,28 +3343,29 @@ func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ if byteLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ m.ProofOfPossession = append(m.ProofOfPossession[:0], dAtA[iNdEx:postIndex]...)
+ if m.ProofOfPossession == nil {
+ m.ProofOfPossession = []byte{}
}
iNdEx = postIndex
- case 2:
+ case 11:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field UnverifiedUserAnnotations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2279,9 +3392,103 @@ func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ if m.UnverifiedUserAnnotations == nil {
+ m.UnverifiedUserAnnotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
+ m.UnverifiedUserAnnotations[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -2304,7 +3511,7 @@ func (m *ClusterTrustBundle) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
+func (m *PodCertificateRequestStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2327,15 +3534,15 @@ func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ClusterTrustBundleList: wiretype end group for non-group")
+ return fmt.Errorf("proto: PodCertificateRequestStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterTrustBundleList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: PodCertificateRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2362,15 +3569,16 @@ func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CertificateChain", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2380,81 +3588,29 @@ func (m *ClusterTrustBundleList) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, ClusterTrustBundle{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.CertificateChain = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ClusterTrustBundleSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ClusterTrustBundleSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NotBefore", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2464,29 +3620,33 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.SignerName = string(dAtA[iNdEx:postIndex])
+ if m.NotBefore == nil {
+ m.NotBefore = &v1.Time{}
+ }
+ if err := m.NotBefore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 2:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TrustBundle", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field BeginRefreshAt", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2496,79 +3656,33 @@ func (m *ClusterTrustBundleSpec) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.TrustBundle = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ExtraValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ if m.BeginRefreshAt == nil {
+ m.BeginRefreshAt = &v1.Time{}
}
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
+ if err := m.BeginRefreshAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ iNdEx = postIndex
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NotAfter", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -2578,23 +3692,27 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- *m = append(*m, string(dAtA[iNdEx:postIndex]))
+ if m.NotAfter == nil {
+ m.NotAfter = &v1.Time{}
+ }
+ if err := m.NotAfter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/operator/vendor/k8s.io/api/certificates/v1beta1/generated.proto
index 4c9385c1..a8ffad47 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -86,7 +86,6 @@ message CertificateSigningRequestList {
// CertificateSigningRequestSpec contains the certificate request.
message CertificateSigningRequestSpec {
// Base64-encoded PKCS#10 CSR data
- // +listType=atomic
optional bytes request = 1;
// Requested signer for the request. It is a qualified name in the form:
@@ -186,13 +185,13 @@ message CertificateSigningRequestStatus {
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
+ // +k8s:customUnique
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
repeated CertificateSigningRequestCondition conditions = 1;
// If request was approved, the controller will place the issued certificate here.
- // +listType=atomic
// +optional
optional bytes certificate = 2;
}
@@ -279,3 +278,220 @@ message ExtraValue {
repeated string items = 1;
}
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
+message PodCertificateRequest {
+ // metadata contains the object metadata.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec contains the details about the certificate being requested.
+ optional PodCertificateRequestSpec spec = 2;
+
+ // status contains the issued certificate, and a standard set of conditions.
+ // +optional
+ optional PodCertificateRequestStatus status = 3;
+}
+
+// PodCertificateRequestList is a collection of PodCertificateRequest objects
+message PodCertificateRequestList {
+ // metadata contains the list metadata.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a collection of PodCertificateRequest objects
+ repeated PodCertificateRequest items = 2;
+}
+
+// PodCertificateRequestSpec describes the certificate request. All fields are
+// immutable after creation.
+message PodCertificateRequestSpec {
+ // signerName indicates the requested signer.
+ //
+ // All signer names beginning with `kubernetes.io` are reserved for use by
+ // the Kubernetes project. There is currently one well-known signer
+ // documented by the Kubernetes project,
+ // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+ // certificates understood by kube-apiserver. It is currently
+ // unimplemented.
+ //
+ // +required
+ optional string signerName = 1;
+
+ // podName is the name of the pod into which the certificate will be mounted.
+ //
+ // +required
+ optional string podName = 2;
+
+ // podUID is the UID of the pod into which the certificate will be mounted.
+ //
+ // +required
+ optional string podUID = 3;
+
+ // serviceAccountName is the name of the service account the pod is running as.
+ //
+ // +required
+ optional string serviceAccountName = 4;
+
+ // serviceAccountUID is the UID of the service account the pod is running as.
+ //
+ // +required
+ optional string serviceAccountUID = 5;
+
+ // nodeName is the name of the node the pod is assigned to.
+ //
+ // +required
+ optional string nodeName = 6;
+
+ // nodeUID is the UID of the node the pod is assigned to.
+ //
+ // +required
+ optional string nodeUID = 7;
+
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ //
+ // +optional
+ // +default=86400
+ optional int32 maxExpirationSeconds = 8;
+
+ // pkixPublicKey is the PKIX-serialized public key the signer will issue the
+ // certificate to.
+ //
+ // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+ // or ED25519. Note that this list may be expanded in the future.
+ //
+ // Signer implementations do not need to support all key types supported by
+ // kube-apiserver and kubelet. If a signer does not support the key type
+ // used for a given PodCertificateRequest, it must deny the request by
+ // setting a status.conditions entry with a type of "Denied" and a reason of
+ // "UnsupportedKeyType". It may also suggest a key type that it does support
+ // in the message field.
+ //
+ // +required
+ optional bytes pkixPublicKey = 9;
+
+ // proofOfPossession proves that the requesting kubelet holds the private
+ // key corresponding to pkixPublicKey.
+ //
+ // It is contructed by signing the ASCII bytes of the pod's UID using
+ // `pkixPublicKey`.
+ //
+ // kube-apiserver validates the proof of possession during creation of the
+ // PodCertificateRequest.
+ //
+ // If the key is an RSA key, then the signature is over the ASCII bytes of
+ // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+ // function crypto/rsa.SignPSS with nil options).
+ //
+ // If the key is an ECDSA key, then the signature is as described by [SEC 1,
+ // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+ // golang library function crypto/ecdsa.SignASN1)
+ //
+ // If the key is an ED25519 key, the the signature is as described by the
+ // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+ // the golang library crypto/ed25519.Sign).
+ //
+ // +required
+ optional bytes proofOfPossession = 10;
+
+ // unverifiedUserAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ map unverifiedUserAnnotations = 11;
+}
+
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
+message PodCertificateRequestStatus {
+ // conditions applied to the request.
+ //
+ // The types "Issued", "Denied", and "Failed" have special handling. At
+ // most one of these conditions may be present, and they must have status
+ // "True".
+ //
+ // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+ // suggest a key type that will work in the message field.
+ //
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
+
+ // certificateChain is populated with an issued certificate by the signer.
+ // This field is set via the /status subresource. Once populated, this field
+ // is immutable.
+ //
+ // If the certificate signing request is denied, a condition of type
+ // "Denied" is added and this field remains empty. If the signer cannot
+ // issue the certificate, a condition of type "Failed" is added and this
+ // field remains empty.
+ //
+ // Validation requirements:
+ // 1. certificateChain must consist of one or more PEM-formatted certificates.
+ // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+ // described in section 4 of RFC5280.
+ //
+ // If more than one block is present, and the definition of the requested
+ // spec.signerName does not indicate otherwise, the first block is the
+ // issued certificate, and subsequent blocks should be treated as
+ // intermediate certificates and presented in TLS handshakes. When
+ // projecting the chain into a pod volume, kubelet will drop any data
+ // in-between the PEM blocks, as well as any PEM block headers.
+ //
+ // +optional
+ optional string certificateChain = 2;
+
+ // notBefore is the time at which the certificate becomes valid. The value
+ // must be the same as the notBefore value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notBefore = 4;
+
+ // beginRefreshAt is the time at which the kubelet should begin trying to
+ // refresh the certificate. This field is set via the /status subresource,
+ // and must be set at the same time as certificateChain. Once populated,
+ // this field is immutable.
+ //
+ // This field is only a hint. Kubelet may start refreshing before or after
+ // this time if necessary.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time beginRefreshAt = 5;
+
+ // notAfter is the time at which the certificate expires. The value must be
+ // the same as the notAfter value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time notAfter = 6;
+}
+
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/certificates/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..00ba7faa
--- /dev/null
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,48 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*CertificateSigningRequest) ProtoMessage() {}
+
+func (*CertificateSigningRequestCondition) ProtoMessage() {}
+
+func (*CertificateSigningRequestList) ProtoMessage() {}
+
+func (*CertificateSigningRequestSpec) ProtoMessage() {}
+
+func (*CertificateSigningRequestStatus) ProtoMessage() {}
+
+func (*ClusterTrustBundle) ProtoMessage() {}
+
+func (*ClusterTrustBundleList) ProtoMessage() {}
+
+func (*ClusterTrustBundleSpec) ProtoMessage() {}
+
+func (*ExtraValue) ProtoMessage() {}
+
+func (*PodCertificateRequest) ProtoMessage() {}
+
+func (*PodCertificateRequestList) ProtoMessage() {}
+
+func (*PodCertificateRequestSpec) ProtoMessage() {}
+
+func (*PodCertificateRequestStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/register.go b/operator/vendor/k8s.io/api/certificates/v1beta1/register.go
index 800dccd0..ee985132 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/register.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/register.go
@@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&CertificateSigningRequestList{},
&ClusterTrustBundle{},
&ClusterTrustBundleList{},
+ &PodCertificateRequest{},
+ &PodCertificateRequestList{},
)
// Add the watch version that applies
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/types.go b/operator/vendor/k8s.io/api/certificates/v1beta1/types.go
index fadb7e08..acfabbfe 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -21,6 +21,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
)
// +genclient
@@ -51,7 +52,6 @@ type CertificateSigningRequest struct {
// CertificateSigningRequestSpec contains the certificate request.
type CertificateSigningRequestSpec struct {
// Base64-encoded PKCS#10 CSR data
- // +listType=atomic
Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"`
// Requested signer for the request. It is a qualified name in the form:
@@ -179,13 +179,13 @@ type CertificateSigningRequestStatus struct {
// +optional
// +k8s:listType=map
// +k8s:listMapKey=type
+ // +k8s:customUnique
// +k8s:optional
// +k8s:item(type: "Approved")=+k8s:zeroOrOneOfMember
// +k8s:item(type: "Denied")=+k8s:zeroOrOneOfMember
Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"`
// If request was approved, the controller will place the issued certificate here.
- // +listType=atomic
// +optional
Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"`
}
@@ -354,3 +354,250 @@ type ClusterTrustBundleList struct {
// items is a collection of ClusterTrustBundle objects
Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// +genclient
+// +k8s:prerelease-lifecycle-gen:introduced=1.35
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
+type PodCertificateRequest struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata contains the object metadata.
+ //
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec contains the details about the certificate being requested.
+ Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // status contains the issued certificate, and a standard set of conditions.
+ // +optional
+ Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PodCertificateRequestSpec describes the certificate request. All fields are
+// immutable after creation.
+type PodCertificateRequestSpec struct {
+ // signerName indicates the requested signer.
+ //
+ // All signer names beginning with `kubernetes.io` are reserved for use by
+ // the Kubernetes project. There is currently one well-known signer
+ // documented by the Kubernetes project,
+ // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+ // certificates understood by kube-apiserver. It is currently
+ // unimplemented.
+ //
+ // +required
+ SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
+
+ // podName is the name of the pod into which the certificate will be mounted.
+ //
+ // +required
+ PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
+ // podUID is the UID of the pod into which the certificate will be mounted.
+ //
+ // +required
+ PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
+
+ // serviceAccountName is the name of the service account the pod is running as.
+ //
+ // +required
+ ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
+ // serviceAccountUID is the UID of the service account the pod is running as.
+ //
+ // +required
+ ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
+
+ // nodeName is the name of the node the pod is assigned to.
+ //
+ // +required
+ NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
+ // nodeUID is the UID of the node the pod is assigned to.
+ //
+ // +required
+ NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
+
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ //
+ // +optional
+ // +default=86400
+ MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
+
+ // pkixPublicKey is the PKIX-serialized public key the signer will issue the
+ // certificate to.
+ //
+ // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+ // or ED25519. Note that this list may be expanded in the future.
+ //
+ // Signer implementations do not need to support all key types supported by
+ // kube-apiserver and kubelet. If a signer does not support the key type
+ // used for a given PodCertificateRequest, it must deny the request by
+ // setting a status.conditions entry with a type of "Denied" and a reason of
+ // "UnsupportedKeyType". It may also suggest a key type that it does support
+ // in the message field.
+ //
+ // +required
+ PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
+
+ // proofOfPossession proves that the requesting kubelet holds the private
+ // key corresponding to pkixPublicKey.
+ //
+ // It is contructed by signing the ASCII bytes of the pod's UID using
+ // `pkixPublicKey`.
+ //
+ // kube-apiserver validates the proof of possession during creation of the
+ // PodCertificateRequest.
+ //
+ // If the key is an RSA key, then the signature is over the ASCII bytes of
+ // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+ // function crypto/rsa.SignPSS with nil options).
+ //
+ // If the key is an ECDSA key, then the signature is as described by [SEC 1,
+ // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+ // golang library function crypto/ecdsa.SignASN1)
+ //
+ // If the key is an ED25519 key, the the signature is as described by the
+ // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+ // the golang library crypto/ed25519.Sign).
+ //
+ // +required
+ ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
+
+ // unverifiedUserAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ UnverifiedUserAnnotations map[string]string `json:"unverifiedUserAnnotations,omitempty" protobuf:"bytes,11,opt,name=unverifiedUserAnnotations"`
+}
+
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
+type PodCertificateRequestStatus struct {
+ // conditions applied to the request.
+ //
+ // The types "Issued", "Denied", and "Failed" have special handling. At
+ // most one of these conditions may be present, and they must have status
+ // "True".
+ //
+ // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+ // suggest a key type that will work in the message field.
+ //
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+
+ // certificateChain is populated with an issued certificate by the signer.
+ // This field is set via the /status subresource. Once populated, this field
+ // is immutable.
+ //
+ // If the certificate signing request is denied, a condition of type
+ // "Denied" is added and this field remains empty. If the signer cannot
+ // issue the certificate, a condition of type "Failed" is added and this
+ // field remains empty.
+ //
+ // Validation requirements:
+ // 1. certificateChain must consist of one or more PEM-formatted certificates.
+ // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+ // described in section 4 of RFC5280.
+ //
+ // If more than one block is present, and the definition of the requested
+ // spec.signerName does not indicate otherwise, the first block is the
+ // issued certificate, and subsequent blocks should be treated as
+ // intermediate certificates and presented in TLS handshakes. When
+ // projecting the chain into a pod volume, kubelet will drop any data
+ // in-between the PEM blocks, as well as any PEM block headers.
+ //
+ // +optional
+ CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
+
+ // notBefore is the time at which the certificate becomes valid. The value
+ // must be the same as the notBefore value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
+
+ // beginRefreshAt is the time at which the kubelet should begin trying to
+ // refresh the certificate. This field is set via the /status subresource,
+ // and must be set at the same time as certificateChain. Once populated,
+ // this field is immutable.
+ //
+ // This field is only a hint. Kubelet may start refreshing before or after
+ // this time if necessary.
+ //
+ // +optional
+ BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
+
+ // notAfter is the time at which the certificate expires. The value must be
+ // the same as the notAfter value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ //
+ // +optional
+ NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
+}
+
+// Well-known condition types for PodCertificateRequests
+const (
+ // Denied indicates the request was denied by the signer.
+ PodCertificateRequestConditionTypeDenied string = "Denied"
+ // Failed indicates the signer failed to issue the certificate.
+ PodCertificateRequestConditionTypeFailed string = "Failed"
+ // Issued indicates the certificate has been issued.
+ PodCertificateRequestConditionTypeIssued string = "Issued"
+)
+
+// Well-known condition reasons for PodCertificateRequests
+const (
+ // UnsupportedKeyType should be set on "Denied" conditions when the signer
+ // doesn't support the key type of publicKey.
+ PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
+
+ // InvalidUnverifiedUserAnnotations should be set on "Denied" conditions when the signer
+ // does not recognize one of the keys passed in userConfig, or if the signer
+ // otherwise considers the userConfig of the request to be invalid.
+ PodCertificateRequestConditionInvalidUserConfig string = "InvalidUnverifiedUserAnnotations"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.35
+
+// PodCertificateRequestList is a collection of PodCertificateRequest objects
+type PodCertificateRequestList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // metadata contains the list metadata.
+ //
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a collection of PodCertificateRequest objects
+ Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
index 58c69e54..bd829fb2 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
@@ -105,4 +105,57 @@ func (ClusterTrustBundleSpec) SwaggerDoc() map[string]string {
return map_ClusterTrustBundleSpec
}
+var map_PodCertificateRequest = map[string]string{
+ "": "PodCertificateRequest encodes a pod requesting a certificate from a given signer.\n\nKubelets use this API to implement podCertificate projected volumes",
+ "metadata": "metadata contains the object metadata.",
+ "spec": "spec contains the details about the certificate being requested.",
+ "status": "status contains the issued certificate, and a standard set of conditions.",
+}
+
+func (PodCertificateRequest) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequest
+}
+
+var map_PodCertificateRequestList = map[string]string{
+ "": "PodCertificateRequestList is a collection of PodCertificateRequest objects",
+ "metadata": "metadata contains the list metadata.",
+ "items": "items is a collection of PodCertificateRequest objects",
+}
+
+func (PodCertificateRequestList) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequestList
+}
+
+var map_PodCertificateRequestSpec = map[string]string{
+ "": "PodCertificateRequestSpec describes the certificate request. All fields are immutable after creation.",
+ "signerName": "signerName indicates the requested signer.\n\nAll signer names beginning with `kubernetes.io` are reserved for use by the Kubernetes project. There is currently one well-known signer documented by the Kubernetes project, `kubernetes.io/kube-apiserver-client-pod`, which will issue client certificates understood by kube-apiserver. It is currently unimplemented.",
+ "podName": "podName is the name of the pod into which the certificate will be mounted.",
+ "podUID": "podUID is the UID of the pod into which the certificate will be mounted.",
+ "serviceAccountName": "serviceAccountName is the name of the service account the pod is running as.",
+ "serviceAccountUID": "serviceAccountUID is the UID of the service account the pod is running as.",
+ "nodeName": "nodeName is the name of the node the pod is assigned to.",
+ "nodeUID": "nodeUID is the UID of the node the pod is assigned to.",
+ "maxExpirationSeconds": "maxExpirationSeconds is the maximum lifetime permitted for the certificate.\n\nIf omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver will reject values shorter than 3600 (1 hour). The maximum allowable value is 7862400 (91 days).\n\nThe signer implementation is then free to issue a certificate with any lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 seconds (1 hour). This constraint is enforced by kube-apiserver. `kubernetes.io` signers will never issue certificates with a lifetime longer than 24 hours.",
+ "pkixPublicKey": "pkixPublicKey is the PKIX-serialized public key the signer will issue the certificate to.\n\nThe key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521, or ED25519. Note that this list may be expanded in the future.\n\nSigner implementations do not need to support all key types supported by kube-apiserver and kubelet. If a signer does not support the key type used for a given PodCertificateRequest, it must deny the request by setting a status.conditions entry with a type of \"Denied\" and a reason of \"UnsupportedKeyType\". It may also suggest a key type that it does support in the message field.",
+ "proofOfPossession": "proofOfPossession proves that the requesting kubelet holds the private key corresponding to pkixPublicKey.\n\nIt is contructed by signing the ASCII bytes of the pod's UID using `pkixPublicKey`.\n\nkube-apiserver validates the proof of possession during creation of the PodCertificateRequest.\n\nIf the key is an RSA key, then the signature is over the ASCII bytes of the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang function crypto/rsa.SignPSS with nil options).\n\nIf the key is an ECDSA key, then the signature is as described by [SEC 1, Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the golang library function crypto/ecdsa.SignASN1)\n\nIf the key is an ED25519 key, the the signature is as described by the [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the golang library crypto/ed25519.Sign).",
+ "unverifiedUserAnnotations": "unverifiedUserAnnotations allow pod authors to pass additional information to the signer implementation. Kubernetes does not restrict or validate this metadata in any way.\n\nEntries are subject to the same validation as object metadata annotations, with the addition that all keys must be domain-prefixed. No restrictions are placed on values, except an overall size limitation on the entire field.\n\nSigners should document the keys and values they support. Signers should deny requests that contain keys they do not recognize.",
+}
+
+func (PodCertificateRequestSpec) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequestSpec
+}
+
+var map_PodCertificateRequestStatus = map[string]string{
+ "": "PodCertificateRequestStatus describes the status of the request, and holds the certificate data if the request is issued.",
+ "conditions": "conditions applied to the request.\n\nThe types \"Issued\", \"Denied\", and \"Failed\" have special handling. At most one of these conditions may be present, and they must have status \"True\".\n\nIf the request is denied with `Reason=UnsupportedKeyType`, the signer may suggest a key type that will work in the message field.",
+ "certificateChain": "certificateChain is populated with an issued certificate by the signer. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificateChain must consist of one or more PEM-formatted certificates.\n 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as\n described in section 4 of RFC5280.\n\nIf more than one block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes. When projecting the chain into a pod volume, kubelet will drop any data in-between the PEM blocks, as well as any PEM block headers.",
+ "notBefore": "notBefore is the time at which the certificate becomes valid. The value must be the same as the notBefore value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
+ "beginRefreshAt": "beginRefreshAt is the time at which the kubelet should begin trying to refresh the certificate. This field is set via the /status subresource, and must be set at the same time as certificateChain. Once populated, this field is immutable.\n\nThis field is only a hint. Kubelet may start refreshing before or after this time if necessary.",
+ "notAfter": "notAfter is the time at which the certificate expires. The value must be the same as the notAfter value in the leaf certificate in certificateChain. This field is set via the /status subresource. Once populated, it is immutable. The signer must set this field at the same time it sets certificateChain.",
+}
+
+func (PodCertificateRequestStatus) SwaggerDoc() map[string]string {
+ return map_PodCertificateRequestStatus
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
index 854e8347..20b5c2a2 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
@@ -22,6 +22,7 @@ limitations under the License.
package v1beta1
import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -283,3 +284,137 @@ func (in ExtraValue) DeepCopy() ExtraValue {
in.DeepCopyInto(out)
return *out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequest) DeepCopyInto(out *PodCertificateRequest) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequest.
+func (in *PodCertificateRequest) DeepCopy() *PodCertificateRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodCertificateRequest) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestList) DeepCopyInto(out *PodCertificateRequestList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PodCertificateRequest, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestList.
+func (in *PodCertificateRequestList) DeepCopy() *PodCertificateRequestList {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequestList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodCertificateRequestList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestSpec) DeepCopyInto(out *PodCertificateRequestSpec) {
+ *out = *in
+ if in.MaxExpirationSeconds != nil {
+ in, out := &in.MaxExpirationSeconds, &out.MaxExpirationSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.PKIXPublicKey != nil {
+ in, out := &in.PKIXPublicKey, &out.PKIXPublicKey
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.ProofOfPossession != nil {
+ in, out := &in.ProofOfPossession, &out.ProofOfPossession
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ if in.UnverifiedUserAnnotations != nil {
+ in, out := &in.UnverifiedUserAnnotations, &out.UnverifiedUserAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestSpec.
+func (in *PodCertificateRequestSpec) DeepCopy() *PodCertificateRequestSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequestSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCertificateRequestStatus) DeepCopyInto(out *PodCertificateRequestStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.NotBefore != nil {
+ in, out := &in.NotBefore, &out.NotBefore
+ *out = (*in).DeepCopy()
+ }
+ if in.BeginRefreshAt != nil {
+ in, out := &in.BeginRefreshAt, &out.BeginRefreshAt
+ *out = (*in).DeepCopy()
+ }
+ if in.NotAfter != nil {
+ in, out := &in.NotAfter, &out.NotAfter
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCertificateRequestStatus.
+func (in *PodCertificateRequestStatus) DeepCopy() *PodCertificateRequestStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCertificateRequestStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..5c0b5ee5
--- /dev/null
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,82 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequest) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.CertificateSigningRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestCondition) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.CertificateSigningRequestCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestList) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.CertificateSigningRequestList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestSpec) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CertificateSigningRequestStatus) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.CertificateSigningRequestStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundle) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.ClusterTrustBundle"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundleList) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.ClusterTrustBundleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.ClusterTrustBundleSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodCertificateRequest) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.PodCertificateRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodCertificateRequestList) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.PodCertificateRequestList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodCertificateRequestSpec) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.PodCertificateRequestSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodCertificateRequestStatus) OpenAPIModelName() string {
+ return "io.k8s.api.certificates.v1beta1.PodCertificateRequestStatus"
+}
diff --git a/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go b/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
index 062b46f1..f95e329e 100644
--- a/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
+++ b/operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -108,3 +108,39 @@ func (in *ClusterTrustBundleList) APILifecycleDeprecated() (major, minor int) {
func (in *ClusterTrustBundleList) APILifecycleRemoved() (major, minor int) {
return 1, 39
}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodCertificateRequest) APILifecycleIntroduced() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *PodCertificateRequest) APILifecycleDeprecated() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *PodCertificateRequest) APILifecycleRemoved() (major, minor int) {
+ return 1, 41
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodCertificateRequestList) APILifecycleIntroduced() (major, minor int) {
+ return 1, 35
+}
+
+// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
+func (in *PodCertificateRequestList) APILifecycleDeprecated() (major, minor int) {
+ return 1, 38
+}
+
+// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
+func (in *PodCertificateRequestList) APILifecycleRemoved() (major, minor int) {
+ return 1, 41
+}
diff --git a/operator/vendor/k8s.io/api/coordination/v1/doc.go b/operator/vendor/k8s.io/api/coordination/v1/doc.go
index 82ae6340..fc427222 100644
--- a/operator/vendor/k8s.io/api/coordination/v1/doc.go
+++ b/operator/vendor/k8s.io/api/coordination/v1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.coordination.v1
// +groupName=coordination.k8s.io
diff --git a/operator/vendor/k8s.io/api/coordination/v1/generated.pb.go b/operator/vendor/k8s.io/api/coordination/v1/generated.pb.go
index cf6702ae..2c32ca48 100644
--- a/operator/vendor/k8s.io/api/coordination/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/coordination/v1/generated.pb.go
@@ -24,160 +24,18 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Lease) Reset() { *m = Lease{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *LeaseList) Reset() { *m = LeaseList{} }
-func (m *Lease) Reset() { *m = Lease{} }
-func (*Lease) ProtoMessage() {}
-func (*Lease) Descriptor() ([]byte, []int) {
- return fileDescriptor_239d5a4df3139dce, []int{0}
-}
-func (m *Lease) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Lease) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Lease.Merge(m, src)
-}
-func (m *Lease) XXX_Size() int {
- return m.Size()
-}
-func (m *Lease) XXX_DiscardUnknown() {
- xxx_messageInfo_Lease.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Lease proto.InternalMessageInfo
-
-func (m *LeaseList) Reset() { *m = LeaseList{} }
-func (*LeaseList) ProtoMessage() {}
-func (*LeaseList) Descriptor() ([]byte, []int) {
- return fileDescriptor_239d5a4df3139dce, []int{1}
-}
-func (m *LeaseList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseList.Merge(m, src)
-}
-func (m *LeaseList) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseList) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseList proto.InternalMessageInfo
-
-func (m *LeaseSpec) Reset() { *m = LeaseSpec{} }
-func (*LeaseSpec) ProtoMessage() {}
-func (*LeaseSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_239d5a4df3139dce, []int{2}
-}
-func (m *LeaseSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseSpec.Merge(m, src)
-}
-func (m *LeaseSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease")
- proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList")
- proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/coordination/v1/generated.proto", fileDescriptor_239d5a4df3139dce)
-}
-
-var fileDescriptor_239d5a4df3139dce = []byte{
- // 588 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x4e, 0xd4, 0x40,
- 0x14, 0xc6, 0xb7, 0xb0, 0xab, 0xec, 0xac, 0xfc, 0xc9, 0xc8, 0x45, 0xb3, 0x17, 0x2d, 0x92, 0x98,
- 0x10, 0x13, 0xa7, 0x42, 0x8c, 0x31, 0x26, 0x26, 0x58, 0x89, 0x4a, 0xb2, 0x44, 0x53, 0xb8, 0x32,
- 0x5c, 0x38, 0xdb, 0x1e, 0xba, 0x23, 0xb4, 0x53, 0x67, 0x66, 0x31, 0xdc, 0xf9, 0x08, 0x3e, 0x81,
- 0xef, 0xa0, 0x4f, 0xc1, 0x25, 0x97, 0x5c, 0x35, 0x32, 0xbe, 0x85, 0x57, 0x66, 0x66, 0x0b, 0x0b,
- 0xcb, 0x6e, 0x20, 0xde, 0x75, 0xce, 0x39, 0xdf, 0xef, 0x7c, 0x73, 0x4e, 0x5b, 0xf4, 0x68, 0xff,
- 0xb9, 0x24, 0x8c, 0x07, 0xb4, 0x60, 0x41, 0xcc, 0xb9, 0x48, 0x58, 0x4e, 0x15, 0xe3, 0x79, 0x70,
- 0xb8, 0x1a, 0xa4, 0x90, 0x83, 0xa0, 0x0a, 0x12, 0x52, 0x08, 0xae, 0x38, 0x6e, 0x0f, 0x6a, 0x09,
- 0x2d, 0x18, 0xb9, 0x5c, 0x4b, 0x0e, 0x57, 0xdb, 0x8f, 0x53, 0xa6, 0x7a, 0xfd, 0x2e, 0x89, 0x79,
- 0x16, 0xa4, 0x3c, 0xe5, 0x81, 0x95, 0x74, 0xfb, 0x7b, 0xf6, 0x64, 0x0f, 0xf6, 0x69, 0x80, 0x6a,
- 0x3f, 0x1d, 0xb6, 0xcd, 0x68, 0xdc, 0x63, 0x39, 0x88, 0xa3, 0xa0, 0xd8, 0x4f, 0x4d, 0x40, 0x06,
- 0x19, 0x28, 0x3a, 0xc6, 0x40, 0x3b, 0x98, 0xa4, 0x12, 0xfd, 0x5c, 0xb1, 0x0c, 0xae, 0x09, 0x9e,
- 0xdd, 0x24, 0x90, 0x71, 0x0f, 0x32, 0x3a, 0xaa, 0x5b, 0xfe, 0xe5, 0xa0, 0x46, 0x07, 0xa8, 0x04,
- 0xfc, 0x09, 0xcd, 0x18, 0x37, 0x09, 0x55, 0xd4, 0x75, 0x96, 0x9c, 0x95, 0xd6, 0xda, 0x13, 0x32,
- 0x1c, 0xc3, 0x05, 0x94, 0x14, 0xfb, 0xa9, 0x09, 0x48, 0x62, 0xaa, 0xc9, 0xe1, 0x2a, 0x79, 0xdf,
- 0xfd, 0x0c, 0xb1, 0xda, 0x02, 0x45, 0x43, 0x7c, 0x5c, 0xfa, 0x35, 0x5d, 0xfa, 0x68, 0x18, 0x8b,
- 0x2e, 0xa8, 0xf8, 0x2d, 0xaa, 0xcb, 0x02, 0x62, 0x77, 0xca, 0xd2, 0x1f, 0x92, 0xc9, 0x43, 0x26,
- 0xd6, 0xd2, 0x76, 0x01, 0x71, 0x78, 0xaf, 0x42, 0xd6, 0xcd, 0x29, 0xb2, 0x80, 0xe5, 0x9f, 0x0e,
- 0x6a, 0xda, 0x8a, 0x0e, 0x93, 0x0a, 0xef, 0x5e, 0x33, 0x4e, 0x6e, 0x67, 0xdc, 0xa8, 0xad, 0xed,
- 0x85, 0xaa, 0xc7, 0xcc, 0x79, 0xe4, 0x92, 0xe9, 0x37, 0xa8, 0xc1, 0x14, 0x64, 0xd2, 0x9d, 0x5a,
- 0x9a, 0x5e, 0x69, 0xad, 0x3d, 0xb8, 0xd1, 0x75, 0x38, 0x5b, 0xd1, 0x1a, 0x9b, 0x46, 0x17, 0x0d,
- 0xe4, 0xcb, 0x3f, 0xea, 0x95, 0x67, 0x73, 0x0f, 0xfc, 0x02, 0xcd, 0xf5, 0xf8, 0x41, 0x02, 0x62,
- 0x33, 0x81, 0x5c, 0x31, 0x75, 0x64, 0x9d, 0x37, 0x43, 0xac, 0x4b, 0x7f, 0xee, 0xdd, 0x95, 0x4c,
- 0x34, 0x52, 0x89, 0x3b, 0x68, 0xf1, 0xc0, 0x80, 0x36, 0xfa, 0xc2, 0x76, 0xde, 0x86, 0x98, 0xe7,
- 0x89, 0xb4, 0x63, 0x6d, 0x84, 0xae, 0x2e, 0xfd, 0xc5, 0xce, 0x98, 0x7c, 0x34, 0x56, 0x85, 0xbb,
- 0xa8, 0x45, 0xe3, 0x2f, 0x7d, 0x26, 0x60, 0x87, 0x65, 0xe0, 0x4e, 0xdb, 0x01, 0x06, 0xb7, 0x1b,
- 0xe0, 0x16, 0x8b, 0x05, 0x37, 0xb2, 0x70, 0x5e, 0x97, 0x7e, 0xeb, 0xd5, 0x90, 0x13, 0x5d, 0x86,
- 0xe2, 0x5d, 0xd4, 0x14, 0x90, 0xc3, 0x57, 0xdb, 0xa1, 0xfe, 0x7f, 0x1d, 0x66, 0x75, 0xe9, 0x37,
- 0xa3, 0x73, 0x4a, 0x34, 0x04, 0xe2, 0x75, 0xb4, 0x60, 0x6f, 0xb6, 0x23, 0x68, 0x2e, 0x99, 0xb9,
- 0x9b, 0x74, 0x1b, 0x76, 0x16, 0x8b, 0xba, 0xf4, 0x17, 0x3a, 0x23, 0xb9, 0xe8, 0x5a, 0x35, 0xde,
- 0x40, 0x33, 0x52, 0x99, 0xaf, 0x22, 0x3d, 0x72, 0xef, 0xd8, 0x3d, 0xac, 0x98, 0xb7, 0x61, 0xbb,
- 0x8a, 0xfd, 0x2d, 0x7d, 0xf7, 0xf5, 0xf9, 0xaa, 0x21, 0x19, 0x6c, 0xb1, 0xca, 0x45, 0x17, 0x4a,
- 0xfc, 0x12, 0xcd, 0x17, 0x02, 0xf6, 0x40, 0x08, 0x48, 0x06, 0x2b, 0x74, 0xef, 0x5a, 0xd8, 0x7d,
- 0x5d, 0xfa, 0xf3, 0x1f, 0xae, 0xa6, 0xa2, 0xd1, 0xda, 0x70, 0xfd, 0xf8, 0xcc, 0xab, 0x9d, 0x9c,
- 0x79, 0xb5, 0xd3, 0x33, 0xaf, 0xf6, 0x4d, 0x7b, 0xce, 0xb1, 0xf6, 0x9c, 0x13, 0xed, 0x39, 0xa7,
- 0xda, 0x73, 0x7e, 0x6b, 0xcf, 0xf9, 0xfe, 0xc7, 0xab, 0x7d, 0x6c, 0x4f, 0xfe, 0x8b, 0xfd, 0x0b,
- 0x00, 0x00, 0xff, 0xff, 0xf8, 0xf4, 0xd4, 0x78, 0xe2, 0x04, 0x00, 0x00,
-}
+func (m *LeaseSpec) Reset() { *m = LeaseSpec{} }
func (m *Lease) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/coordination/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/coordination/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..f9210fce
--- /dev/null
+++ b/operator/vendor/k8s.io/api/coordination/v1/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*Lease) ProtoMessage() {}
+
+func (*LeaseList) ProtoMessage() {}
+
+func (*LeaseSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/coordination/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/coordination/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..cf1217ef
--- /dev/null
+++ b/operator/vendor/k8s.io/api/coordination/v1/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Lease) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1.Lease"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseList) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1.LeaseList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseSpec) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1.LeaseSpec"
+}
diff --git a/operator/vendor/k8s.io/api/coordination/v1alpha2/doc.go b/operator/vendor/k8s.io/api/coordination/v1alpha2/doc.go
index dff7df47..134b182a 100644
--- a/operator/vendor/k8s.io/api/coordination/v1alpha2/doc.go
+++ b/operator/vendor/k8s.io/api/coordination/v1alpha2/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.coordination.v1alpha2
// +groupName=coordination.k8s.io
diff --git a/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go b/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go
index 85ceea1f..25a7a483 100644
--- a/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go
@@ -24,160 +24,19 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} }
-func (*LeaseCandidate) ProtoMessage() {}
-func (*LeaseCandidate) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1ec5c989d262916, []int{0}
-}
-func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseCandidate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCandidate.Merge(m, src)
-}
-func (m *LeaseCandidate) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCandidate) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCandidate.DiscardUnknown(m)
-}
+func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} }
-var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo
+func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} }
-func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} }
-func (*LeaseCandidateList) ProtoMessage() {}
-func (*LeaseCandidateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1ec5c989d262916, []int{1}
-}
-func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseCandidateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCandidateList.Merge(m, src)
-}
-func (m *LeaseCandidateList) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCandidateList) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo
-
-func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} }
-func (*LeaseCandidateSpec) ProtoMessage() {}
-func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_c1ec5c989d262916, []int{2}
-}
-func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCandidateSpec.Merge(m, src)
-}
-func (m *LeaseCandidateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCandidateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidate")
- proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateList")
- proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1alpha2.LeaseCandidateSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/coordination/v1alpha2/generated.proto", fileDescriptor_c1ec5c989d262916)
-}
-
-var fileDescriptor_c1ec5c989d262916 = []byte{
- // 555 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x8b, 0xd3, 0x4e,
- 0x18, 0xc7, 0x9b, 0xdd, 0xf6, 0x47, 0x3b, 0xbf, 0xad, 0xd4, 0x01, 0x21, 0xf4, 0x90, 0x96, 0x9e,
- 0x44, 0x70, 0x66, 0x77, 0x5d, 0x44, 0xf0, 0x96, 0xf5, 0x0f, 0x42, 0x57, 0x25, 0xab, 0x0b, 0xca,
- 0x1e, 0x9c, 0x26, 0x8f, 0xe9, 0xd8, 0x26, 0x13, 0x92, 0xe9, 0x4a, 0x6f, 0xbe, 0x04, 0x5f, 0x56,
- 0xf5, 0xb4, 0xc7, 0x3d, 0x15, 0x1b, 0xc1, 0x17, 0xe1, 0x49, 0x66, 0x9a, 0xf4, 0xaf, 0xa5, 0xc5,
- 0x5b, 0xe7, 0x99, 0xe7, 0xf3, 0x99, 0xf9, 0x3e, 0x69, 0x82, 0x0e, 0x7b, 0x8f, 0x12, 0xc2, 0x05,
- 0x65, 0x11, 0xa7, 0xae, 0x10, 0xb1, 0xc7, 0x43, 0x26, 0xb9, 0x08, 0xe9, 0xd5, 0x11, 0xeb, 0x47,
- 0x5d, 0x76, 0x4c, 0x7d, 0x08, 0x21, 0x66, 0x12, 0x3c, 0x12, 0xc5, 0x42, 0x0a, 0xdc, 0x9c, 0x12,
- 0x84, 0x45, 0x9c, 0x2c, 0x12, 0x24, 0x27, 0xea, 0xf7, 0x7d, 0x2e, 0xbb, 0x83, 0x0e, 0x71, 0x45,
- 0x40, 0x7d, 0xe1, 0x0b, 0xaa, 0xc1, 0xce, 0xe0, 0xa3, 0x5e, 0xe9, 0x85, 0xfe, 0x35, 0x15, 0xd6,
- 0xef, 0x6d, 0xbe, 0xc2, 0xea, 0xe1, 0xf5, 0x93, 0x79, 0x6f, 0xc0, 0xdc, 0x2e, 0x0f, 0x21, 0x1e,
- 0xd2, 0xa8, 0xe7, 0xab, 0x42, 0x42, 0x03, 0x90, 0xec, 0x6f, 0x14, 0xdd, 0x44, 0xc5, 0x83, 0x50,
- 0xf2, 0x00, 0xd6, 0x80, 0x87, 0xdb, 0x80, 0xc4, 0xed, 0x42, 0xc0, 0x56, 0xb9, 0xd6, 0x77, 0x03,
- 0xdd, 0x6a, 0x03, 0x4b, 0xe0, 0x94, 0x85, 0x1e, 0xf7, 0x98, 0x04, 0xfc, 0x01, 0x95, 0xd5, 0xb5,
- 0x3c, 0x26, 0x99, 0x69, 0x34, 0x8d, 0xbb, 0xff, 0x1f, 0x1f, 0x92, 0xf9, 0x04, 0x67, 0x76, 0x12,
- 0xf5, 0x7c, 0x55, 0x48, 0x88, 0xea, 0x26, 0x57, 0x47, 0xe4, 0x55, 0xe7, 0x13, 0xb8, 0xf2, 0x0c,
- 0x24, 0xb3, 0xf1, 0x68, 0xdc, 0x28, 0xa4, 0xe3, 0x06, 0x9a, 0xd7, 0x9c, 0x99, 0x15, 0x5f, 0xa0,
- 0x62, 0x12, 0x81, 0x6b, 0xee, 0x69, 0xfb, 0x09, 0xd9, 0xf6, 0x7c, 0xc8, 0xf2, 0x0d, 0xcf, 0x23,
- 0x70, 0xed, 0x83, 0xec, 0x84, 0xa2, 0x5a, 0x39, 0xda, 0xd7, 0xfa, 0x66, 0x20, 0xbc, 0xdc, 0xda,
- 0xe6, 0x89, 0xc4, 0x97, 0x6b, 0x81, 0xc8, 0x6e, 0x81, 0x14, 0xad, 0xe3, 0xd4, 0xb2, 0xc3, 0xca,
- 0x79, 0x65, 0x21, 0xcc, 0x5b, 0x54, 0xe2, 0x12, 0x82, 0xc4, 0xdc, 0x6b, 0xee, 0xaf, 0xcc, 0x6a,
- 0xa7, 0x34, 0x76, 0x35, 0x93, 0x97, 0x5e, 0x28, 0x8d, 0x33, 0xb5, 0xb5, 0x7e, 0xed, 0xaf, 0x66,
- 0x51, 0x41, 0x31, 0x45, 0x95, 0xbe, 0xaa, 0xbe, 0x64, 0x01, 0xe8, 0x30, 0x15, 0xfb, 0x76, 0xc6,
- 0x57, 0xda, 0xf9, 0x86, 0x33, 0xef, 0xc1, 0xef, 0x50, 0x39, 0xe2, 0xa1, 0xff, 0x86, 0x07, 0x90,
- 0xcd, 0x9b, 0xee, 0x16, 0xfe, 0x8c, 0xbb, 0xb1, 0x50, 0x98, 0x7d, 0xa0, 0x92, 0xbf, 0xce, 0x24,
- 0xce, 0x4c, 0x87, 0x2f, 0x51, 0x25, 0x86, 0x10, 0x3e, 0x6b, 0xf7, 0xfe, 0xbf, 0xb9, 0xab, 0xea,
- 0xe2, 0x4e, 0x6e, 0x71, 0xe6, 0x42, 0xfc, 0x18, 0x55, 0x3b, 0x3c, 0x64, 0xf1, 0xf0, 0x02, 0xe2,
- 0x84, 0x8b, 0xd0, 0x2c, 0xea, 0xb4, 0x77, 0xb2, 0xb4, 0x55, 0x7b, 0x71, 0xd3, 0x59, 0xee, 0xc5,
- 0x4f, 0x50, 0x0d, 0x82, 0x41, 0x5f, 0x0f, 0x3e, 0xe7, 0x4b, 0x9a, 0x37, 0x33, 0xbe, 0xf6, 0x74,
- 0x65, 0xdf, 0x59, 0x23, 0xb0, 0x8b, 0xca, 0x89, 0x54, 0x6f, 0x8b, 0x3f, 0x34, 0xff, 0xd3, 0xf4,
- 0xf3, 0xfc, 0x8f, 0x70, 0x9e, 0xd5, 0x7f, 0x8f, 0x1b, 0x0f, 0x36, 0x7f, 0x0d, 0xc8, 0x69, 0xbe,
- 0x06, 0x4f, 0x3f, 0x9d, 0x1c, 0x73, 0x66, 0x62, 0xfb, 0xd9, 0x68, 0x62, 0x15, 0xae, 0x27, 0x56,
- 0xe1, 0x66, 0x62, 0x15, 0xbe, 0xa4, 0x96, 0x31, 0x4a, 0x2d, 0xe3, 0x3a, 0xb5, 0x8c, 0x9b, 0xd4,
- 0x32, 0x7e, 0xa4, 0x96, 0xf1, 0xf5, 0xa7, 0x55, 0x78, 0xdf, 0xdc, 0xf6, 0xd5, 0xfb, 0x13, 0x00,
- 0x00, 0xff, 0xff, 0x7f, 0x15, 0x63, 0xd0, 0x18, 0x05, 0x00, 0x00,
-}
+func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} }
func (m *LeaseCandidate) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.protomessage.pb.go
new file mode 100644
index 00000000..93743262
--- /dev/null
+++ b/operator/vendor/k8s.io/api/coordination/v1alpha2/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha2
+
+func (*LeaseCandidate) ProtoMessage() {}
+
+func (*LeaseCandidateList) ProtoMessage() {}
+
+func (*LeaseCandidateSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.model_name.go
new file mode 100644
index 00000000..016d489f
--- /dev/null
+++ b/operator/vendor/k8s.io/api/coordination/v1alpha2/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseCandidate) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1alpha2.LeaseCandidate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseCandidateList) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1alpha2.LeaseCandidateList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseCandidateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1alpha2.LeaseCandidateSpec"
+}
diff --git a/operator/vendor/k8s.io/api/coordination/v1beta1/doc.go b/operator/vendor/k8s.io/api/coordination/v1beta1/doc.go
index cab8becf..22c5a917 100644
--- a/operator/vendor/k8s.io/api/coordination/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/coordination/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.coordination.v1beta1
// +groupName=coordination.k8s.io
diff --git a/operator/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
index 52fd4167..f9330ae7 100644
--- a/operator/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
@@ -24,259 +24,25 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_coordination_v1 "k8s.io/api/coordination/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Lease) Reset() { *m = Lease{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} }
-func (m *Lease) Reset() { *m = Lease{} }
-func (*Lease) ProtoMessage() {}
-func (*Lease) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{0}
-}
-func (m *Lease) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Lease) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Lease.Merge(m, src)
-}
-func (m *Lease) XXX_Size() int {
- return m.Size()
-}
-func (m *Lease) XXX_DiscardUnknown() {
- xxx_messageInfo_Lease.DiscardUnknown(m)
-}
+func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} }
-var xxx_messageInfo_Lease proto.InternalMessageInfo
+func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} }
-func (m *LeaseCandidate) Reset() { *m = LeaseCandidate{} }
-func (*LeaseCandidate) ProtoMessage() {}
-func (*LeaseCandidate) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{1}
-}
-func (m *LeaseCandidate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCandidate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseCandidate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCandidate.Merge(m, src)
-}
-func (m *LeaseCandidate) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCandidate) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCandidate.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCandidate proto.InternalMessageInfo
+func (m *LeaseList) Reset() { *m = LeaseList{} }
-func (m *LeaseCandidateList) Reset() { *m = LeaseCandidateList{} }
-func (*LeaseCandidateList) ProtoMessage() {}
-func (*LeaseCandidateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{2}
-}
-func (m *LeaseCandidateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCandidateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseCandidateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCandidateList.Merge(m, src)
-}
-func (m *LeaseCandidateList) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCandidateList) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCandidateList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCandidateList proto.InternalMessageInfo
-
-func (m *LeaseCandidateSpec) Reset() { *m = LeaseCandidateSpec{} }
-func (*LeaseCandidateSpec) ProtoMessage() {}
-func (*LeaseCandidateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{3}
-}
-func (m *LeaseCandidateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseCandidateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseCandidateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseCandidateSpec.Merge(m, src)
-}
-func (m *LeaseCandidateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseCandidateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseCandidateSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseCandidateSpec proto.InternalMessageInfo
-
-func (m *LeaseList) Reset() { *m = LeaseList{} }
-func (*LeaseList) ProtoMessage() {}
-func (*LeaseList) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{4}
-}
-func (m *LeaseList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseList.Merge(m, src)
-}
-func (m *LeaseList) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseList) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseList proto.InternalMessageInfo
-
-func (m *LeaseSpec) Reset() { *m = LeaseSpec{} }
-func (*LeaseSpec) ProtoMessage() {}
-func (*LeaseSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_8d4e223b8bb23da3, []int{5}
-}
-func (m *LeaseSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LeaseSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseSpec.Merge(m, src)
-}
-func (m *LeaseSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease")
- proto.RegisterType((*LeaseCandidate)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidate")
- proto.RegisterType((*LeaseCandidateList)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateList")
- proto.RegisterType((*LeaseCandidateSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseCandidateSpec")
- proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList")
- proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_8d4e223b8bb23da3)
-}
-
-var fileDescriptor_8d4e223b8bb23da3 = []byte{
- // 750 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x4e, 0x1b, 0x39,
- 0x18, 0xcd, 0x40, 0xb2, 0x9b, 0x38, 0x04, 0xb2, 0x5e, 0x56, 0x1a, 0x71, 0x31, 0x83, 0x72, 0xb1,
- 0x42, 0x48, 0xeb, 0x59, 0x60, 0xb5, 0x5a, 0x6d, 0x55, 0xa9, 0x1d, 0x40, 0x2d, 0x6a, 0x68, 0x91,
- 0xa1, 0x95, 0x5a, 0x21, 0xb5, 0xce, 0x8c, 0x99, 0xb8, 0x30, 0x3f, 0xf5, 0x38, 0x54, 0xb9, 0xeb,
- 0x23, 0xf4, 0x69, 0x5a, 0xf5, 0x0d, 0xd2, 0x3b, 0x2e, 0xb9, 0x8a, 0xca, 0x54, 0xea, 0x43, 0xf4,
- 0xaa, 0xb2, 0x33, 0xf9, 0x27, 0x22, 0x6d, 0x11, 0x77, 0xf1, 0xf7, 0x9d, 0x73, 0xfc, 0x1d, 0xfb,
- 0x38, 0x1a, 0x60, 0x1d, 0xff, 0x17, 0x23, 0x16, 0x5a, 0x24, 0x62, 0x96, 0x13, 0x86, 0xdc, 0x65,
- 0x01, 0x11, 0x2c, 0x0c, 0xac, 0xd3, 0xb5, 0x1a, 0x15, 0x64, 0xcd, 0xf2, 0x68, 0x40, 0x39, 0x11,
- 0xd4, 0x45, 0x11, 0x0f, 0x45, 0x08, 0xcd, 0x0e, 0x01, 0x91, 0x88, 0xa1, 0x41, 0x02, 0x4a, 0x09,
- 0x4b, 0x7f, 0x79, 0x4c, 0xd4, 0x1b, 0x35, 0xe4, 0x84, 0xbe, 0xe5, 0x85, 0x5e, 0x68, 0x29, 0x5e,
- 0xad, 0x71, 0xa4, 0x56, 0x6a, 0xa1, 0x7e, 0x75, 0xf4, 0x96, 0x56, 0x27, 0x0f, 0x30, 0xba, 0xf7,
- 0xd2, 0x3f, 0x7d, 0xac, 0x4f, 0x9c, 0x3a, 0x0b, 0x28, 0x6f, 0x5a, 0xd1, 0xb1, 0x27, 0x0b, 0xb1,
- 0xe5, 0x53, 0x41, 0x2e, 0x63, 0x59, 0x93, 0x58, 0xbc, 0x11, 0x08, 0xe6, 0xd3, 0x31, 0xc2, 0xbf,
- 0x57, 0x11, 0x62, 0xa7, 0x4e, 0x7d, 0x32, 0xca, 0xab, 0xbc, 0xd7, 0x40, 0xae, 0x4a, 0x49, 0x4c,
- 0xe1, 0x0b, 0x90, 0x97, 0xd3, 0xb8, 0x44, 0x10, 0x5d, 0x5b, 0xd6, 0x56, 0x8a, 0xeb, 0x7f, 0xa3,
- 0xfe, 0xb9, 0xf5, 0x44, 0x51, 0x74, 0xec, 0xc9, 0x42, 0x8c, 0x24, 0x1a, 0x9d, 0xae, 0xa1, 0x47,
- 0xb5, 0x97, 0xd4, 0x11, 0xbb, 0x54, 0x10, 0x1b, 0xb6, 0xda, 0x66, 0x26, 0x69, 0x9b, 0xa0, 0x5f,
- 0xc3, 0x3d, 0x55, 0x58, 0x05, 0xd9, 0x38, 0xa2, 0x8e, 0x3e, 0xa3, 0xd4, 0x57, 0xd1, 0x15, 0xb7,
- 0x82, 0xd4, 0x5c, 0xfb, 0x11, 0x75, 0xec, 0xb9, 0x54, 0x37, 0x2b, 0x57, 0x58, 0xa9, 0x54, 0x3e,
- 0x6a, 0x60, 0x5e, 0x21, 0x36, 0x49, 0xe0, 0x32, 0x97, 0x88, 0x9b, 0xb0, 0xf0, 0x78, 0xc8, 0xc2,
- 0xc6, 0x74, 0x16, 0x7a, 0x03, 0x4e, 0xf4, 0xd2, 0xd2, 0x00, 0x1c, 0x86, 0x56, 0x59, 0x2c, 0xe0,
- 0xe1, 0x98, 0x1f, 0x34, 0x9d, 0x1f, 0xc9, 0x56, 0x6e, 0xca, 0xe9, 0x66, 0xf9, 0x6e, 0x65, 0xc0,
- 0xcb, 0x01, 0xc8, 0x31, 0x41, 0xfd, 0x58, 0x9f, 0x59, 0x9e, 0x5d, 0x29, 0xae, 0x5b, 0xdf, 0x69,
- 0xc6, 0x2e, 0xa5, 0xda, 0xb9, 0x1d, 0xa9, 0x82, 0x3b, 0x62, 0x95, 0x2f, 0xb3, 0xa3, 0x56, 0xa4,
- 0x4f, 0x68, 0x81, 0xc2, 0x89, 0xac, 0x3e, 0x24, 0x3e, 0x55, 0x5e, 0x0a, 0xf6, 0x6f, 0x29, 0xbf,
- 0x50, 0xed, 0x36, 0x70, 0x1f, 0x03, 0x9f, 0x82, 0x7c, 0xc4, 0x02, 0xef, 0x80, 0xf9, 0x34, 0x3d,
- 0x6d, 0x6b, 0x3a, 0xef, 0xbb, 0xcc, 0xe1, 0xa1, 0xa4, 0xd9, 0x73, 0xd2, 0xf8, 0x5e, 0x2a, 0x82,
- 0x7b, 0x72, 0xf0, 0x10, 0x14, 0x38, 0x0d, 0xe8, 0x6b, 0xa5, 0x3d, 0xfb, 0x63, 0xda, 0x25, 0x39,
- 0x38, 0xee, 0xaa, 0xe0, 0xbe, 0x20, 0xbc, 0x05, 0x4a, 0x35, 0x16, 0x10, 0xde, 0x7c, 0x42, 0x79,
- 0xcc, 0xc2, 0x40, 0xcf, 0x2a, 0xb7, 0x7f, 0xa4, 0x6e, 0x4b, 0xf6, 0x60, 0x13, 0x0f, 0x63, 0xe1,
- 0x16, 0x28, 0x53, 0xbf, 0x71, 0xa2, 0xce, 0xbd, 0xcb, 0xcf, 0x29, 0xbe, 0x9e, 0xf2, 0xcb, 0xdb,
- 0x23, 0x7d, 0x3c, 0xc6, 0x80, 0x0e, 0xc8, 0xc7, 0x42, 0xbe, 0x72, 0xaf, 0xa9, 0xff, 0xa2, 0xd8,
- 0xf7, 0xba, 0x39, 0xd8, 0x4f, 0xeb, 0x5f, 0xdb, 0xe6, 0xc6, 0xe4, 0x7f, 0x31, 0xb4, 0xd9, 0x5d,
- 0x53, 0xb7, 0xf3, 0x0a, 0x53, 0x1a, 0xee, 0x09, 0x57, 0xde, 0x69, 0xa0, 0x73, 0x73, 0x37, 0x10,
- 0xd5, 0x07, 0xc3, 0x51, 0xfd, 0x73, 0xba, 0xa8, 0x4e, 0x48, 0xe8, 0x87, 0x6c, 0x3a, 0xb8, 0x0a,
- 0xe6, 0xff, 0x60, 0xbe, 0x1e, 0x9e, 0xb8, 0x94, 0xef, 0xb8, 0x34, 0x10, 0x4c, 0x34, 0xd3, 0x74,
- 0xc2, 0xa4, 0x6d, 0xce, 0xdf, 0x1f, 0xea, 0xe0, 0x11, 0x24, 0xac, 0x82, 0x45, 0x15, 0xd8, 0xad,
- 0x06, 0x57, 0xdb, 0xef, 0x53, 0x27, 0x0c, 0xdc, 0x58, 0xe5, 0x35, 0x67, 0xeb, 0x49, 0xdb, 0x5c,
- 0xac, 0x5e, 0xd2, 0xc7, 0x97, 0xb2, 0x60, 0x0d, 0x14, 0x89, 0xf3, 0xaa, 0xc1, 0x38, 0xfd, 0x99,
- 0x60, 0x2e, 0x24, 0x6d, 0xb3, 0x78, 0xb7, 0xaf, 0x83, 0x07, 0x45, 0x87, 0xa3, 0x9f, 0xbd, 0xee,
- 0xe8, 0xdf, 0x01, 0x65, 0xe5, 0xec, 0x80, 0x93, 0x20, 0x66, 0xd2, 0x5b, 0xac, 0xd2, 0x9b, 0xb3,
- 0x17, 0x65, 0x72, 0xab, 0x23, 0x3d, 0x3c, 0x86, 0x86, 0xcf, 0xc7, 0x92, 0xbb, 0x79, 0xad, 0xa9,
- 0x85, 0xb7, 0xc1, 0x42, 0xc4, 0xe9, 0x11, 0xe5, 0x9c, 0xba, 0x9d, 0xdb, 0xd5, 0x7f, 0x55, 0xfb,
- 0xfc, 0x9e, 0xb4, 0xcd, 0x85, 0xbd, 0xe1, 0x16, 0x1e, 0xc5, 0xda, 0xdb, 0xad, 0x0b, 0x23, 0x73,
- 0x76, 0x61, 0x64, 0xce, 0x2f, 0x8c, 0xcc, 0x9b, 0xc4, 0xd0, 0x5a, 0x89, 0xa1, 0x9d, 0x25, 0x86,
- 0x76, 0x9e, 0x18, 0xda, 0xa7, 0xc4, 0xd0, 0xde, 0x7e, 0x36, 0x32, 0xcf, 0xcc, 0x2b, 0x3e, 0x50,
- 0xbe, 0x05, 0x00, 0x00, 0xff, 0xff, 0xff, 0x56, 0x51, 0x57, 0xc2, 0x08, 0x00, 0x00,
-}
+func (m *LeaseSpec) Reset() { *m = LeaseSpec{} }
func (m *Lease) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/coordination/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/coordination/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..a1a88e55
--- /dev/null
+++ b/operator/vendor/k8s.io/api/coordination/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,34 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*Lease) ProtoMessage() {}
+
+func (*LeaseCandidate) ProtoMessage() {}
+
+func (*LeaseCandidateList) ProtoMessage() {}
+
+func (*LeaseCandidateSpec) ProtoMessage() {}
+
+func (*LeaseList) ProtoMessage() {}
+
+func (*LeaseSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/coordination/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/coordination/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..53b291db
--- /dev/null
+++ b/operator/vendor/k8s.io/api/coordination/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,52 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Lease) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1beta1.Lease"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseCandidate) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1beta1.LeaseCandidate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseCandidateList) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1beta1.LeaseCandidateList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseCandidateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1beta1.LeaseCandidateSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseList) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1beta1.LeaseList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LeaseSpec) OpenAPIModelName() string {
+ return "io.k8s.api.coordination.v1beta1.LeaseSpec"
+}
diff --git a/operator/vendor/k8s.io/api/core/v1/doc.go b/operator/vendor/k8s.io/api/core/v1/doc.go
index e4e9196a..a41f2ce1 100644
--- a/operator/vendor/k8s.io/api/core/v1/doc.go
+++ b/operator/vendor/k8s.io/api/core/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.core.v1
+
// +groupName=
// Package v1 is the v1 version of the core API.
diff --git a/operator/vendor/k8s.io/api/core/v1/generated.pb.go b/operator/vendor/k8s.io/api/core/v1/generated.pb.go
index e1a297b9..b7de1bea 100644
--- a/operator/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -23,14 +23,12 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -38,7973 +36,481 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *Affinity) Reset() { *m = Affinity{} }
-func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} }
-func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {}
-func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{0}
-}
-func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src)
-}
-func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo
-
-func (m *Affinity) Reset() { *m = Affinity{} }
-func (*Affinity) ProtoMessage() {}
-func (*Affinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{1}
-}
-func (m *Affinity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Affinity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Affinity.Merge(m, src)
-}
-func (m *Affinity) XXX_Size() int {
- return m.Size()
-}
-func (m *Affinity) XXX_DiscardUnknown() {
- xxx_messageInfo_Affinity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Affinity proto.InternalMessageInfo
-
-func (m *AppArmorProfile) Reset() { *m = AppArmorProfile{} }
-func (*AppArmorProfile) ProtoMessage() {}
-func (*AppArmorProfile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{2}
-}
-func (m *AppArmorProfile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AppArmorProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AppArmorProfile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AppArmorProfile.Merge(m, src)
-}
-func (m *AppArmorProfile) XXX_Size() int {
- return m.Size()
-}
-func (m *AppArmorProfile) XXX_DiscardUnknown() {
- xxx_messageInfo_AppArmorProfile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AppArmorProfile proto.InternalMessageInfo
-
-func (m *AttachedVolume) Reset() { *m = AttachedVolume{} }
-func (*AttachedVolume) ProtoMessage() {}
-func (*AttachedVolume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{3}
-}
-func (m *AttachedVolume) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AttachedVolume) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AttachedVolume.Merge(m, src)
-}
-func (m *AttachedVolume) XXX_Size() int {
- return m.Size()
-}
-func (m *AttachedVolume) XXX_DiscardUnknown() {
- xxx_messageInfo_AttachedVolume.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo
-
-func (m *AvoidPods) Reset() { *m = AvoidPods{} }
-func (*AvoidPods) ProtoMessage() {}
-func (*AvoidPods) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{4}
-}
-func (m *AvoidPods) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AvoidPods) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AvoidPods.Merge(m, src)
-}
-func (m *AvoidPods) XXX_Size() int {
- return m.Size()
-}
-func (m *AvoidPods) XXX_DiscardUnknown() {
- xxx_messageInfo_AvoidPods.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AvoidPods proto.InternalMessageInfo
-
-func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} }
-func (*AzureDiskVolumeSource) ProtoMessage() {}
-func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{5}
-}
-func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src)
-}
-func (m *AzureDiskVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo
-
-func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} }
-func (*AzureFilePersistentVolumeSource) ProtoMessage() {}
-func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{6}
-}
-func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src)
-}
-func (m *AzureFilePersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo
-
-func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} }
-func (*AzureFileVolumeSource) ProtoMessage() {}
-func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{7}
-}
-func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AzureFileVolumeSource.Merge(m, src)
-}
-func (m *AzureFileVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *AzureFileVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo
-
-func (m *Binding) Reset() { *m = Binding{} }
-func (*Binding) ProtoMessage() {}
-func (*Binding) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{8}
-}
-func (m *Binding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Binding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Binding.Merge(m, src)
-}
-func (m *Binding) XXX_Size() int {
- return m.Size()
-}
-func (m *Binding) XXX_DiscardUnknown() {
- xxx_messageInfo_Binding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Binding proto.InternalMessageInfo
-
-func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} }
-func (*CSIPersistentVolumeSource) ProtoMessage() {}
-func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{9}
-}
-func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src)
-}
-func (m *CSIPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo
-
-func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} }
-func (*CSIVolumeSource) ProtoMessage() {}
-func (*CSIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{10}
-}
-func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIVolumeSource.Merge(m, src)
-}
-func (m *CSIVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo
-
-func (m *Capabilities) Reset() { *m = Capabilities{} }
-func (*Capabilities) ProtoMessage() {}
-func (*Capabilities) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{11}
-}
-func (m *Capabilities) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Capabilities) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Capabilities.Merge(m, src)
-}
-func (m *Capabilities) XXX_Size() int {
- return m.Size()
-}
-func (m *Capabilities) XXX_DiscardUnknown() {
- xxx_messageInfo_Capabilities.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Capabilities proto.InternalMessageInfo
-
-func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} }
-func (*CephFSPersistentVolumeSource) ProtoMessage() {}
-func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{12}
-}
-func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src)
-}
-func (m *CephFSPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo
-
-func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} }
-func (*CephFSVolumeSource) ProtoMessage() {}
-func (*CephFSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{13}
-}
-func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CephFSVolumeSource.Merge(m, src)
-}
-func (m *CephFSVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *CephFSVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo
-
-func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} }
-func (*CinderPersistentVolumeSource) ProtoMessage() {}
-func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{14}
-}
-func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src)
-}
-func (m *CinderPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo
-
-func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} }
-func (*CinderVolumeSource) ProtoMessage() {}
-func (*CinderVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{15}
-}
-func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CinderVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CinderVolumeSource.Merge(m, src)
-}
-func (m *CinderVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *CinderVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo
-
-func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} }
-func (*ClientIPConfig) ProtoMessage() {}
-func (*ClientIPConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{16}
-}
-func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClientIPConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClientIPConfig.Merge(m, src)
-}
-func (m *ClientIPConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *ClientIPConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_ClientIPConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo
-
-func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} }
-func (*ClusterTrustBundleProjection) ProtoMessage() {}
-func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{17}
-}
-func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterTrustBundleProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterTrustBundleProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterTrustBundleProjection.Merge(m, src)
-}
-func (m *ClusterTrustBundleProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterTrustBundleProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterTrustBundleProjection.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo
-
-func (m *ComponentCondition) Reset() { *m = ComponentCondition{} }
-func (*ComponentCondition) ProtoMessage() {}
-func (*ComponentCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{18}
-}
-func (m *ComponentCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ComponentCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ComponentCondition.Merge(m, src)
-}
-func (m *ComponentCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *ComponentCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_ComponentCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo
-
-func (m *ComponentStatus) Reset() { *m = ComponentStatus{} }
-func (*ComponentStatus) ProtoMessage() {}
-func (*ComponentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{19}
-}
-func (m *ComponentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ComponentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ComponentStatus.Merge(m, src)
-}
-func (m *ComponentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ComponentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ComponentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo
-
-func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} }
-func (*ComponentStatusList) ProtoMessage() {}
-func (*ComponentStatusList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{20}
-}
-func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ComponentStatusList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ComponentStatusList.Merge(m, src)
-}
-func (m *ComponentStatusList) XXX_Size() int {
- return m.Size()
-}
-func (m *ComponentStatusList) XXX_DiscardUnknown() {
- xxx_messageInfo_ComponentStatusList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo
-
-func (m *ConfigMap) Reset() { *m = ConfigMap{} }
-func (*ConfigMap) ProtoMessage() {}
-func (*ConfigMap) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{21}
-}
-func (m *ConfigMap) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMap) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMap.Merge(m, src)
-}
-func (m *ConfigMap) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMap) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMap.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMap proto.InternalMessageInfo
-
-func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} }
-func (*ConfigMapEnvSource) ProtoMessage() {}
-func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{22}
-}
-func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMapEnvSource.Merge(m, src)
-}
-func (m *ConfigMapEnvSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMapEnvSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo
-
-func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} }
-func (*ConfigMapKeySelector) ProtoMessage() {}
-func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{23}
-}
-func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMapKeySelector.Merge(m, src)
-}
-func (m *ConfigMapKeySelector) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMapKeySelector) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo
-
-func (m *ConfigMapList) Reset() { *m = ConfigMapList{} }
-func (*ConfigMapList) ProtoMessage() {}
-func (*ConfigMapList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{24}
-}
-func (m *ConfigMapList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMapList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMapList.Merge(m, src)
-}
-func (m *ConfigMapList) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMapList) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMapList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo
-
-func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} }
-func (*ConfigMapNodeConfigSource) ProtoMessage() {}
-func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{25}
-}
-func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src)
-}
-func (m *ConfigMapNodeConfigSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo
-
-func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} }
-func (*ConfigMapProjection) ProtoMessage() {}
-func (*ConfigMapProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{26}
-}
-func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMapProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMapProjection.Merge(m, src)
-}
-func (m *ConfigMapProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMapProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo
-
-func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} }
-func (*ConfigMapVolumeSource) ProtoMessage() {}
-func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{27}
-}
-func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src)
-}
-func (m *ConfigMapVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo
-
-func (m *Container) Reset() { *m = Container{} }
-func (*Container) ProtoMessage() {}
-func (*Container) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{28}
-}
-func (m *Container) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Container) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Container.Merge(m, src)
-}
-func (m *Container) XXX_Size() int {
- return m.Size()
-}
-func (m *Container) XXX_DiscardUnknown() {
- xxx_messageInfo_Container.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Container proto.InternalMessageInfo
-
-func (m *ContainerExtendedResourceRequest) Reset() { *m = ContainerExtendedResourceRequest{} }
-func (*ContainerExtendedResourceRequest) ProtoMessage() {}
-func (*ContainerExtendedResourceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{29}
-}
-func (m *ContainerExtendedResourceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerExtendedResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerExtendedResourceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerExtendedResourceRequest.Merge(m, src)
-}
-func (m *ContainerExtendedResourceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerExtendedResourceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerExtendedResourceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerExtendedResourceRequest proto.InternalMessageInfo
-
-func (m *ContainerImage) Reset() { *m = ContainerImage{} }
-func (*ContainerImage) ProtoMessage() {}
-func (*ContainerImage) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{30}
-}
-func (m *ContainerImage) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerImage) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerImage.Merge(m, src)
-}
-func (m *ContainerImage) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerImage) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerImage.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerImage proto.InternalMessageInfo
-
-func (m *ContainerPort) Reset() { *m = ContainerPort{} }
-func (*ContainerPort) ProtoMessage() {}
-func (*ContainerPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{31}
-}
-func (m *ContainerPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerPort.Merge(m, src)
-}
-func (m *ContainerPort) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerPort) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerPort.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerPort proto.InternalMessageInfo
-
-func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} }
-func (*ContainerResizePolicy) ProtoMessage() {}
-func (*ContainerResizePolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{32}
-}
-func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerResizePolicy.Merge(m, src)
-}
-func (m *ContainerResizePolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerResizePolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo
-
-func (m *ContainerRestartRule) Reset() { *m = ContainerRestartRule{} }
-func (*ContainerRestartRule) ProtoMessage() {}
-func (*ContainerRestartRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{33}
-}
-func (m *ContainerRestartRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerRestartRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerRestartRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerRestartRule.Merge(m, src)
-}
-func (m *ContainerRestartRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerRestartRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerRestartRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerRestartRule proto.InternalMessageInfo
-
-func (m *ContainerRestartRuleOnExitCodes) Reset() { *m = ContainerRestartRuleOnExitCodes{} }
-func (*ContainerRestartRuleOnExitCodes) ProtoMessage() {}
-func (*ContainerRestartRuleOnExitCodes) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{34}
-}
-func (m *ContainerRestartRuleOnExitCodes) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerRestartRuleOnExitCodes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerRestartRuleOnExitCodes) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerRestartRuleOnExitCodes.Merge(m, src)
-}
-func (m *ContainerRestartRuleOnExitCodes) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerRestartRuleOnExitCodes) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerRestartRuleOnExitCodes.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerRestartRuleOnExitCodes proto.InternalMessageInfo
-
-func (m *ContainerState) Reset() { *m = ContainerState{} }
-func (*ContainerState) ProtoMessage() {}
-func (*ContainerState) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{35}
-}
-func (m *ContainerState) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerState) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerState.Merge(m, src)
-}
-func (m *ContainerState) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerState) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerState proto.InternalMessageInfo
-
-func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} }
-func (*ContainerStateRunning) ProtoMessage() {}
-func (*ContainerStateRunning) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{36}
-}
-func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerStateRunning) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerStateRunning.Merge(m, src)
-}
-func (m *ContainerStateRunning) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerStateRunning) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo
-
-func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} }
-func (*ContainerStateTerminated) ProtoMessage() {}
-func (*ContainerStateTerminated) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{37}
-}
-func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerStateTerminated.Merge(m, src)
-}
-func (m *ContainerStateTerminated) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerStateTerminated) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo
-
-func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} }
-func (*ContainerStateWaiting) ProtoMessage() {}
-func (*ContainerStateWaiting) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{38}
-}
-func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerStateWaiting.Merge(m, src)
-}
-func (m *ContainerStateWaiting) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerStateWaiting) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo
-
-func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
-func (*ContainerStatus) ProtoMessage() {}
-func (*ContainerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{39}
-}
-func (m *ContainerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerStatus.Merge(m, src)
-}
-func (m *ContainerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo
-
-func (m *ContainerUser) Reset() { *m = ContainerUser{} }
-func (*ContainerUser) ProtoMessage() {}
-func (*ContainerUser) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{40}
-}
-func (m *ContainerUser) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ContainerUser) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ContainerUser.Merge(m, src)
-}
-func (m *ContainerUser) XXX_Size() int {
- return m.Size()
-}
-func (m *ContainerUser) XXX_DiscardUnknown() {
- xxx_messageInfo_ContainerUser.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ContainerUser proto.InternalMessageInfo
-
-func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} }
-func (*DaemonEndpoint) ProtoMessage() {}
-func (*DaemonEndpoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{41}
-}
-func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonEndpoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonEndpoint.Merge(m, src)
-}
-func (m *DaemonEndpoint) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonEndpoint) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo
-
-func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} }
-func (*DownwardAPIProjection) ProtoMessage() {}
-func (*DownwardAPIProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{42}
-}
-func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DownwardAPIProjection.Merge(m, src)
-}
-func (m *DownwardAPIProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *DownwardAPIProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo
-
-func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} }
-func (*DownwardAPIVolumeFile) ProtoMessage() {}
-func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{43}
-}
-func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src)
-}
-func (m *DownwardAPIVolumeFile) XXX_Size() int {
- return m.Size()
-}
-func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() {
- xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo
-
-func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} }
-func (*DownwardAPIVolumeSource) ProtoMessage() {}
-func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{44}
-}
-func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src)
-}
-func (m *DownwardAPIVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo
-
-func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} }
-func (*EmptyDirVolumeSource) ProtoMessage() {}
-func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{45}
-}
-func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src)
-}
-func (m *EmptyDirVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo
-
-func (m *EndpointAddress) Reset() { *m = EndpointAddress{} }
-func (*EndpointAddress) ProtoMessage() {}
-func (*EndpointAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{46}
-}
-func (m *EndpointAddress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointAddress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointAddress.Merge(m, src)
-}
-func (m *EndpointAddress) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointAddress) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointAddress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo
-
-func (m *EndpointPort) Reset() { *m = EndpointPort{} }
-func (*EndpointPort) ProtoMessage() {}
-func (*EndpointPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{47}
-}
-func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointPort.Merge(m, src)
-}
-func (m *EndpointPort) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointPort) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointPort.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
-
-func (m *EndpointSubset) Reset() { *m = EndpointSubset{} }
-func (*EndpointSubset) ProtoMessage() {}
-func (*EndpointSubset) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{48}
-}
-func (m *EndpointSubset) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointSubset) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointSubset.Merge(m, src)
-}
-func (m *EndpointSubset) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointSubset) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointSubset.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo
-
-func (m *Endpoints) Reset() { *m = Endpoints{} }
-func (*Endpoints) ProtoMessage() {}
-func (*Endpoints) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{49}
-}
-func (m *Endpoints) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Endpoints) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Endpoints.Merge(m, src)
-}
-func (m *Endpoints) XXX_Size() int {
- return m.Size()
-}
-func (m *Endpoints) XXX_DiscardUnknown() {
- xxx_messageInfo_Endpoints.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Endpoints proto.InternalMessageInfo
-
-func (m *EndpointsList) Reset() { *m = EndpointsList{} }
-func (*EndpointsList) ProtoMessage() {}
-func (*EndpointsList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{50}
-}
-func (m *EndpointsList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointsList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointsList.Merge(m, src)
-}
-func (m *EndpointsList) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointsList) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointsList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointsList proto.InternalMessageInfo
-
-func (m *EnvFromSource) Reset() { *m = EnvFromSource{} }
-func (*EnvFromSource) ProtoMessage() {}
-func (*EnvFromSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{51}
-}
-func (m *EnvFromSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EnvFromSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnvFromSource.Merge(m, src)
-}
-func (m *EnvFromSource) XXX_Size() int {
- return m.Size()
-}
-func (m *EnvFromSource) XXX_DiscardUnknown() {
- xxx_messageInfo_EnvFromSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo
-
-func (m *EnvVar) Reset() { *m = EnvVar{} }
-func (*EnvVar) ProtoMessage() {}
-func (*EnvVar) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{52}
-}
-func (m *EnvVar) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EnvVar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnvVar.Merge(m, src)
-}
-func (m *EnvVar) XXX_Size() int {
- return m.Size()
-}
-func (m *EnvVar) XXX_DiscardUnknown() {
- xxx_messageInfo_EnvVar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnvVar proto.InternalMessageInfo
-
-func (m *EnvVarSource) Reset() { *m = EnvVarSource{} }
-func (*EnvVarSource) ProtoMessage() {}
-func (*EnvVarSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{53}
-}
-func (m *EnvVarSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EnvVarSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EnvVarSource.Merge(m, src)
-}
-func (m *EnvVarSource) XXX_Size() int {
- return m.Size()
-}
-func (m *EnvVarSource) XXX_DiscardUnknown() {
- xxx_messageInfo_EnvVarSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo
-
-func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} }
-func (*EphemeralContainer) ProtoMessage() {}
-func (*EphemeralContainer) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{54}
-}
-func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EphemeralContainer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EphemeralContainer.Merge(m, src)
-}
-func (m *EphemeralContainer) XXX_Size() int {
- return m.Size()
-}
-func (m *EphemeralContainer) XXX_DiscardUnknown() {
- xxx_messageInfo_EphemeralContainer.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo
-
-func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} }
-func (*EphemeralContainerCommon) ProtoMessage() {}
-func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{55}
-}
-func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EphemeralContainerCommon.Merge(m, src)
-}
-func (m *EphemeralContainerCommon) XXX_Size() int {
- return m.Size()
-}
-func (m *EphemeralContainerCommon) XXX_DiscardUnknown() {
- xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo
-
-func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} }
-func (*EphemeralVolumeSource) ProtoMessage() {}
-func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{56}
-}
-func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EphemeralVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EphemeralVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EphemeralVolumeSource.Merge(m, src)
-}
-func (m *EphemeralVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *EphemeralVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_EphemeralVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo
-
-func (m *Event) Reset() { *m = Event{} }
-func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{57}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func (m *EventList) Reset() { *m = EventList{} }
-func (*EventList) ProtoMessage() {}
-func (*EventList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{58}
-}
-func (m *EventList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventList.Merge(m, src)
-}
-func (m *EventList) XXX_Size() int {
- return m.Size()
-}
-func (m *EventList) XXX_DiscardUnknown() {
- xxx_messageInfo_EventList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventList proto.InternalMessageInfo
-
-func (m *EventSeries) Reset() { *m = EventSeries{} }
-func (*EventSeries) ProtoMessage() {}
-func (*EventSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{59}
-}
-func (m *EventSeries) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventSeries.Merge(m, src)
-}
-func (m *EventSeries) XXX_Size() int {
- return m.Size()
-}
-func (m *EventSeries) XXX_DiscardUnknown() {
- xxx_messageInfo_EventSeries.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventSeries proto.InternalMessageInfo
-
-func (m *EventSource) Reset() { *m = EventSource{} }
-func (*EventSource) ProtoMessage() {}
-func (*EventSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{60}
-}
-func (m *EventSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventSource.Merge(m, src)
-}
-func (m *EventSource) XXX_Size() int {
- return m.Size()
-}
-func (m *EventSource) XXX_DiscardUnknown() {
- xxx_messageInfo_EventSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventSource proto.InternalMessageInfo
-
-func (m *ExecAction) Reset() { *m = ExecAction{} }
-func (*ExecAction) ProtoMessage() {}
-func (*ExecAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{61}
-}
-func (m *ExecAction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExecAction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExecAction.Merge(m, src)
-}
-func (m *ExecAction) XXX_Size() int {
- return m.Size()
-}
-func (m *ExecAction) XXX_DiscardUnknown() {
- xxx_messageInfo_ExecAction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExecAction proto.InternalMessageInfo
-
-func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} }
-func (*FCVolumeSource) ProtoMessage() {}
-func (*FCVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{62}
-}
-func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FCVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FCVolumeSource.Merge(m, src)
-}
-func (m *FCVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *FCVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_FCVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo
-
-func (m *FileKeySelector) Reset() { *m = FileKeySelector{} }
-func (*FileKeySelector) ProtoMessage() {}
-func (*FileKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{63}
-}
-func (m *FileKeySelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FileKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FileKeySelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileKeySelector.Merge(m, src)
-}
-func (m *FileKeySelector) XXX_Size() int {
- return m.Size()
-}
-func (m *FileKeySelector) XXX_DiscardUnknown() {
- xxx_messageInfo_FileKeySelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileKeySelector proto.InternalMessageInfo
-
-func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} }
-func (*FlexPersistentVolumeSource) ProtoMessage() {}
-func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{64}
-}
-func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src)
-}
-func (m *FlexPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo
-
-func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} }
-func (*FlexVolumeSource) ProtoMessage() {}
-func (*FlexVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{65}
-}
-func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlexVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlexVolumeSource.Merge(m, src)
-}
-func (m *FlexVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *FlexVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo
-
-func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} }
-func (*FlockerVolumeSource) ProtoMessage() {}
-func (*FlockerVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{66}
-}
-func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlockerVolumeSource.Merge(m, src)
-}
-func (m *FlockerVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *FlockerVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo
-
-func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} }
-func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
-func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{67}
-}
-func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src)
-}
-func (m *GCEPersistentDiskVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo
-
-func (m *GRPCAction) Reset() { *m = GRPCAction{} }
-func (*GRPCAction) ProtoMessage() {}
-func (*GRPCAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{68}
-}
-func (m *GRPCAction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GRPCAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GRPCAction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GRPCAction.Merge(m, src)
-}
-func (m *GRPCAction) XXX_Size() int {
- return m.Size()
-}
-func (m *GRPCAction) XXX_DiscardUnknown() {
- xxx_messageInfo_GRPCAction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GRPCAction proto.InternalMessageInfo
-
-func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} }
-func (*GitRepoVolumeSource) ProtoMessage() {}
-func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{69}
-}
-func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GitRepoVolumeSource.Merge(m, src)
-}
-func (m *GitRepoVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *GitRepoVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo
-
-func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} }
-func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
-func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{70}
-}
-func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src)
-}
-func (m *GlusterfsPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo
-
-func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} }
-func (*GlusterfsVolumeSource) ProtoMessage() {}
-func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{71}
-}
-func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src)
-}
-func (m *GlusterfsVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo
-
-func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} }
-func (*HTTPGetAction) ProtoMessage() {}
-func (*HTTPGetAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{72}
-}
-func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPGetAction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPGetAction.Merge(m, src)
-}
-func (m *HTTPGetAction) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPGetAction) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPGetAction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo
-
-func (m *HTTPHeader) Reset() { *m = HTTPHeader{} }
-func (*HTTPHeader) ProtoMessage() {}
-func (*HTTPHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{73}
-}
-func (m *HTTPHeader) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPHeader.Merge(m, src)
-}
-func (m *HTTPHeader) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo
-
-func (m *HostAlias) Reset() { *m = HostAlias{} }
-func (*HostAlias) ProtoMessage() {}
-func (*HostAlias) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{74}
-}
-func (m *HostAlias) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HostAlias) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HostAlias.Merge(m, src)
-}
-func (m *HostAlias) XXX_Size() int {
- return m.Size()
-}
-func (m *HostAlias) XXX_DiscardUnknown() {
- xxx_messageInfo_HostAlias.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HostAlias proto.InternalMessageInfo
-
-func (m *HostIP) Reset() { *m = HostIP{} }
-func (*HostIP) ProtoMessage() {}
-func (*HostIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{75}
-}
-func (m *HostIP) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HostIP) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HostIP.Merge(m, src)
-}
-func (m *HostIP) XXX_Size() int {
- return m.Size()
-}
-func (m *HostIP) XXX_DiscardUnknown() {
- xxx_messageInfo_HostIP.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HostIP proto.InternalMessageInfo
-
-func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
-func (*HostPathVolumeSource) ProtoMessage() {}
-func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{76}
-}
-func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HostPathVolumeSource.Merge(m, src)
-}
-func (m *HostPathVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *HostPathVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
-
-func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} }
-func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
-func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{77}
-}
-func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src)
-}
-func (m *ISCSIPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
-
-func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
-func (*ISCSIVolumeSource) ProtoMessage() {}
-func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{78}
-}
-func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ISCSIVolumeSource.Merge(m, src)
-}
-func (m *ISCSIVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ISCSIVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
-
-func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} }
-func (*ImageVolumeSource) ProtoMessage() {}
-func (*ImageVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{79}
-}
-func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ImageVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ImageVolumeSource.Merge(m, src)
-}
-func (m *ImageVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ImageVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo
-
-func (m *KeyToPath) Reset() { *m = KeyToPath{} }
-func (*KeyToPath) ProtoMessage() {}
-func (*KeyToPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{80}
-}
-func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *KeyToPath) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyToPath.Merge(m, src)
-}
-func (m *KeyToPath) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyToPath) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyToPath.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
-
-func (m *Lifecycle) Reset() { *m = Lifecycle{} }
-func (*Lifecycle) ProtoMessage() {}
-func (*Lifecycle) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{81}
-}
-func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Lifecycle) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Lifecycle.Merge(m, src)
-}
-func (m *Lifecycle) XXX_Size() int {
- return m.Size()
-}
-func (m *Lifecycle) XXX_DiscardUnknown() {
- xxx_messageInfo_Lifecycle.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
-
-func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} }
-func (*LifecycleHandler) ProtoMessage() {}
-func (*LifecycleHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{82}
-}
-func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LifecycleHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LifecycleHandler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LifecycleHandler.Merge(m, src)
-}
-func (m *LifecycleHandler) XXX_Size() int {
- return m.Size()
-}
-func (m *LifecycleHandler) XXX_DiscardUnknown() {
- xxx_messageInfo_LifecycleHandler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
-
-func (m *LimitRange) Reset() { *m = LimitRange{} }
-func (*LimitRange) ProtoMessage() {}
-func (*LimitRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{83}
-}
-func (m *LimitRange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitRange.Merge(m, src)
-}
-func (m *LimitRange) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitRange) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitRange proto.InternalMessageInfo
-
-func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
-func (*LimitRangeItem) ProtoMessage() {}
-func (*LimitRangeItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{84}
-}
-func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitRangeItem) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitRangeItem.Merge(m, src)
-}
-func (m *LimitRangeItem) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitRangeItem) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitRangeItem.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
-
-func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
-func (*LimitRangeList) ProtoMessage() {}
-func (*LimitRangeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{85}
-}
-func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitRangeList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitRangeList.Merge(m, src)
-}
-func (m *LimitRangeList) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitRangeList) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitRangeList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
-
-func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
-func (*LimitRangeSpec) ProtoMessage() {}
-func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{86}
-}
-func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitRangeSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitRangeSpec.Merge(m, src)
-}
-func (m *LimitRangeSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitRangeSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
-
-func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} }
-func (*LinuxContainerUser) ProtoMessage() {}
-func (*LinuxContainerUser) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{87}
-}
-func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LinuxContainerUser) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LinuxContainerUser.Merge(m, src)
-}
-func (m *LinuxContainerUser) XXX_Size() int {
- return m.Size()
-}
-func (m *LinuxContainerUser) XXX_DiscardUnknown() {
- xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo
-
-func (m *List) Reset() { *m = List{} }
-func (*List) ProtoMessage() {}
-func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{88}
-}
-func (m *List) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *List) XXX_Merge(src proto.Message) {
- xxx_messageInfo_List.Merge(m, src)
-}
-func (m *List) XXX_Size() int {
- return m.Size()
-}
-func (m *List) XXX_DiscardUnknown() {
- xxx_messageInfo_List.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_List proto.InternalMessageInfo
-
-func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
-func (*LoadBalancerIngress) ProtoMessage() {}
-func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{89}
-}
-func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LoadBalancerIngress.Merge(m, src)
-}
-func (m *LoadBalancerIngress) XXX_Size() int {
- return m.Size()
-}
-func (m *LoadBalancerIngress) XXX_DiscardUnknown() {
- xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
-
-func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
-func (*LoadBalancerStatus) ProtoMessage() {}
-func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{90}
-}
-func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LoadBalancerStatus.Merge(m, src)
-}
-func (m *LoadBalancerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *LoadBalancerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
-
-func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
-func (*LocalObjectReference) ProtoMessage() {}
-func (*LocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{91}
-}
-func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LocalObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LocalObjectReference.Merge(m, src)
-}
-func (m *LocalObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *LocalObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_LocalObjectReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
-
-func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} }
-func (*LocalVolumeSource) ProtoMessage() {}
-func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{92}
-}
-func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LocalVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LocalVolumeSource.Merge(m, src)
-}
-func (m *LocalVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *LocalVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
-
-func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} }
-func (*ModifyVolumeStatus) ProtoMessage() {}
-func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{93}
-}
-func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ModifyVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ModifyVolumeStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ModifyVolumeStatus.Merge(m, src)
-}
-func (m *ModifyVolumeStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ModifyVolumeStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ModifyVolumeStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo
-
-func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
-func (*NFSVolumeSource) ProtoMessage() {}
-func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{94}
-}
-func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NFSVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NFSVolumeSource.Merge(m, src)
-}
-func (m *NFSVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *NFSVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
-
-func (m *Namespace) Reset() { *m = Namespace{} }
-func (*Namespace) ProtoMessage() {}
-func (*Namespace) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{95}
-}
-func (m *Namespace) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Namespace) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Namespace.Merge(m, src)
-}
-func (m *Namespace) XXX_Size() int {
- return m.Size()
-}
-func (m *Namespace) XXX_DiscardUnknown() {
- xxx_messageInfo_Namespace.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Namespace proto.InternalMessageInfo
-
-func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} }
-func (*NamespaceCondition) ProtoMessage() {}
-func (*NamespaceCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{96}
-}
-func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamespaceCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamespaceCondition.Merge(m, src)
-}
-func (m *NamespaceCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *NamespaceCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_NamespaceCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
-
-func (m *NamespaceList) Reset() { *m = NamespaceList{} }
-func (*NamespaceList) ProtoMessage() {}
-func (*NamespaceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{97}
-}
-func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamespaceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamespaceList.Merge(m, src)
-}
-func (m *NamespaceList) XXX_Size() int {
- return m.Size()
-}
-func (m *NamespaceList) XXX_DiscardUnknown() {
- xxx_messageInfo_NamespaceList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
-
-func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
-func (*NamespaceSpec) ProtoMessage() {}
-func (*NamespaceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{98}
-}
-func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamespaceSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamespaceSpec.Merge(m, src)
-}
-func (m *NamespaceSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *NamespaceSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_NamespaceSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
-
-func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
-func (*NamespaceStatus) ProtoMessage() {}
-func (*NamespaceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{99}
-}
-func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NamespaceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NamespaceStatus.Merge(m, src)
-}
-func (m *NamespaceStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *NamespaceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_NamespaceStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
-
-func (m *Node) Reset() { *m = Node{} }
-func (*Node) ProtoMessage() {}
-func (*Node) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{100}
-}
-func (m *Node) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Node) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Node.Merge(m, src)
-}
-func (m *Node) XXX_Size() int {
- return m.Size()
-}
-func (m *Node) XXX_DiscardUnknown() {
- xxx_messageInfo_Node.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Node proto.InternalMessageInfo
-
-func (m *NodeAddress) Reset() { *m = NodeAddress{} }
-func (*NodeAddress) ProtoMessage() {}
-func (*NodeAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{101}
-}
-func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeAddress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeAddress.Merge(m, src)
-}
-func (m *NodeAddress) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeAddress) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeAddress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
-
-func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
-func (*NodeAffinity) ProtoMessage() {}
-func (*NodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{102}
-}
-func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeAffinity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeAffinity.Merge(m, src)
-}
-func (m *NodeAffinity) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeAffinity) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeAffinity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
-
-func (m *NodeCondition) Reset() { *m = NodeCondition{} }
-func (*NodeCondition) ProtoMessage() {}
-func (*NodeCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{103}
-}
-func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeCondition.Merge(m, src)
-}
-func (m *NodeCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
-
-func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} }
-func (*NodeConfigSource) ProtoMessage() {}
-func (*NodeConfigSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{104}
-}
-func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeConfigSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeConfigSource.Merge(m, src)
-}
-func (m *NodeConfigSource) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeConfigSource) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeConfigSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
-
-func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} }
-func (*NodeConfigStatus) ProtoMessage() {}
-func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{105}
-}
-func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeConfigStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeConfigStatus.Merge(m, src)
-}
-func (m *NodeConfigStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeConfigStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
-
-func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
-func (*NodeDaemonEndpoints) ProtoMessage() {}
-func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{106}
-}
-func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src)
-}
-func (m *NodeDaemonEndpoints) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
-
-func (m *NodeFeatures) Reset() { *m = NodeFeatures{} }
-func (*NodeFeatures) ProtoMessage() {}
-func (*NodeFeatures) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{107}
-}
-func (m *NodeFeatures) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeFeatures) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeFeatures.Merge(m, src)
-}
-func (m *NodeFeatures) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeFeatures) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeFeatures.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo
-
-func (m *NodeList) Reset() { *m = NodeList{} }
-func (*NodeList) ProtoMessage() {}
-func (*NodeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{108}
-}
-func (m *NodeList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeList.Merge(m, src)
-}
-func (m *NodeList) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeList) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeList proto.InternalMessageInfo
-
-func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
-func (*NodeProxyOptions) ProtoMessage() {}
-func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{109}
-}
-func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeProxyOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeProxyOptions.Merge(m, src)
-}
-func (m *NodeProxyOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeProxyOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
-
-func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} }
-func (*NodeRuntimeHandler) ProtoMessage() {}
-func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{110}
-}
-func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeRuntimeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeRuntimeHandler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeRuntimeHandler.Merge(m, src)
-}
-func (m *NodeRuntimeHandler) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeRuntimeHandler) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeRuntimeHandler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo
-
-func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} }
-func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
-func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{111}
-}
-func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeRuntimeHandlerFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeRuntimeHandlerFeatures) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeRuntimeHandlerFeatures.Merge(m, src)
-}
-func (m *NodeRuntimeHandlerFeatures) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeRuntimeHandlerFeatures) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeRuntimeHandlerFeatures.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo
-
-func (m *NodeSelector) Reset() { *m = NodeSelector{} }
-func (*NodeSelector) ProtoMessage() {}
-func (*NodeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{112}
-}
-func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeSelector.Merge(m, src)
-}
-func (m *NodeSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
-
-func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
-func (*NodeSelectorRequirement) ProtoMessage() {}
-func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{113}
-}
-func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeSelectorRequirement.Merge(m, src)
-}
-func (m *NodeSelectorRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeSelectorRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
-
-func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
-func (*NodeSelectorTerm) ProtoMessage() {}
-func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{114}
-}
-func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeSelectorTerm.Merge(m, src)
-}
-func (m *NodeSelectorTerm) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeSelectorTerm) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
-
-func (m *NodeSpec) Reset() { *m = NodeSpec{} }
-func (*NodeSpec) ProtoMessage() {}
-func (*NodeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{115}
-}
-func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeSpec.Merge(m, src)
-}
-func (m *NodeSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
-
-func (m *NodeStatus) Reset() { *m = NodeStatus{} }
-func (*NodeStatus) ProtoMessage() {}
-func (*NodeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{116}
-}
-func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeStatus.Merge(m, src)
-}
-func (m *NodeStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
-
-func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} }
-func (*NodeSwapStatus) ProtoMessage() {}
-func (*NodeSwapStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{117}
-}
-func (m *NodeSwapStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeSwapStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeSwapStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeSwapStatus.Merge(m, src)
-}
-func (m *NodeSwapStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeSwapStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeSwapStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeSwapStatus proto.InternalMessageInfo
-
-func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
-func (*NodeSystemInfo) ProtoMessage() {}
-func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{118}
-}
-func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NodeSystemInfo) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NodeSystemInfo.Merge(m, src)
-}
-func (m *NodeSystemInfo) XXX_Size() int {
- return m.Size()
-}
-func (m *NodeSystemInfo) XXX_DiscardUnknown() {
- xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
-
-func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
-func (*ObjectFieldSelector) ProtoMessage() {}
-func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{119}
-}
-func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectFieldSelector.Merge(m, src)
-}
-func (m *ObjectFieldSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectFieldSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
-
-func (m *ObjectReference) Reset() { *m = ObjectReference{} }
-func (*ObjectReference) ProtoMessage() {}
-func (*ObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{120}
-}
-func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectReference.Merge(m, src)
-}
-func (m *ObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
-
-func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
-func (*PersistentVolume) ProtoMessage() {}
-func (*PersistentVolume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{121}
-}
-func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolume) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolume.Merge(m, src)
-}
-func (m *PersistentVolume) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolume) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolume.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
-
-func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
-func (*PersistentVolumeClaim) ProtoMessage() {}
-func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{122}
-}
-func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaim.Merge(m, src)
-}
-func (m *PersistentVolumeClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
-
-func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
-func (*PersistentVolumeClaimCondition) ProtoMessage() {}
-func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{123}
-}
-func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src)
-}
-func (m *PersistentVolumeClaimCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
-
-func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
-func (*PersistentVolumeClaimList) ProtoMessage() {}
-func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{124}
-}
-func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src)
-}
-func (m *PersistentVolumeClaimList) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
-
-func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
-func (*PersistentVolumeClaimSpec) ProtoMessage() {}
-func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{125}
-}
-func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src)
-}
-func (m *PersistentVolumeClaimSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
-
-func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
-func (*PersistentVolumeClaimStatus) ProtoMessage() {}
-func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{126}
-}
-func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src)
-}
-func (m *PersistentVolumeClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m)
-}
+func (m *AppArmorProfile) Reset() { *m = AppArmorProfile{} }
-var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
-
-func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} }
-func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
-func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{127}
-}
-func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaimTemplate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaimTemplate.Merge(m, src)
-}
-func (m *PersistentVolumeClaimTemplate) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaimTemplate) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaimTemplate.DiscardUnknown(m)
-}
+func (m *AttachedVolume) Reset() { *m = AttachedVolume{} }
-var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
+func (m *AvoidPods) Reset() { *m = AvoidPods{} }
-func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
-func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
-func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{128}
-}
-func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src)
-}
-func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m)
-}
+func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} }
-var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
+func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} }
-func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
-func (*PersistentVolumeList) ProtoMessage() {}
-func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{129}
-}
-func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeList.Merge(m, src)
-}
-func (m *PersistentVolumeList) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeList) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m)
-}
+func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} }
-var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
+func (m *Binding) Reset() { *m = Binding{} }
-func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
-func (*PersistentVolumeSource) ProtoMessage() {}
-func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{130}
-}
-func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeSource.Merge(m, src)
-}
-func (m *PersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m)
-}
+func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} }
-var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
+func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} }
-func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
-func (*PersistentVolumeSpec) ProtoMessage() {}
-func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{131}
-}
-func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeSpec.Merge(m, src)
-}
-func (m *PersistentVolumeSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m)
-}
+func (m *Capabilities) Reset() { *m = Capabilities{} }
-var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
+func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} }
-func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
-func (*PersistentVolumeStatus) ProtoMessage() {}
-func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{132}
-}
-func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PersistentVolumeStatus.Merge(m, src)
-}
-func (m *PersistentVolumeStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PersistentVolumeStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m)
-}
+func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} }
-var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
+func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} }
-func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
-func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
-func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{133}
-}
-func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src)
-}
-func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m)
-}
+func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} }
-var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
+func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} }
-func (m *Pod) Reset() { *m = Pod{} }
-func (*Pod) ProtoMessage() {}
-func (*Pod) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{134}
-}
-func (m *Pod) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Pod) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Pod.Merge(m, src)
-}
-func (m *Pod) XXX_Size() int {
- return m.Size()
-}
-func (m *Pod) XXX_DiscardUnknown() {
- xxx_messageInfo_Pod.DiscardUnknown(m)
-}
+func (m *ClusterTrustBundleProjection) Reset() { *m = ClusterTrustBundleProjection{} }
-var xxx_messageInfo_Pod proto.InternalMessageInfo
+func (m *ComponentCondition) Reset() { *m = ComponentCondition{} }
-func (m *PodAffinity) Reset() { *m = PodAffinity{} }
-func (*PodAffinity) ProtoMessage() {}
-func (*PodAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{135}
-}
-func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodAffinity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodAffinity.Merge(m, src)
-}
-func (m *PodAffinity) XXX_Size() int {
- return m.Size()
-}
-func (m *PodAffinity) XXX_DiscardUnknown() {
- xxx_messageInfo_PodAffinity.DiscardUnknown(m)
-}
+func (m *ComponentStatus) Reset() { *m = ComponentStatus{} }
-var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
+func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} }
-func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
-func (*PodAffinityTerm) ProtoMessage() {}
-func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{136}
-}
-func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodAffinityTerm) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodAffinityTerm.Merge(m, src)
-}
-func (m *PodAffinityTerm) XXX_Size() int {
- return m.Size()
-}
-func (m *PodAffinityTerm) XXX_DiscardUnknown() {
- xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m)
-}
+func (m *ConfigMap) Reset() { *m = ConfigMap{} }
-var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
+func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} }
-func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
-func (*PodAntiAffinity) ProtoMessage() {}
-func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{137}
-}
-func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodAntiAffinity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodAntiAffinity.Merge(m, src)
-}
-func (m *PodAntiAffinity) XXX_Size() int {
- return m.Size()
-}
-func (m *PodAntiAffinity) XXX_DiscardUnknown() {
- xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m)
-}
+func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} }
-var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
+func (m *ConfigMapList) Reset() { *m = ConfigMapList{} }
-func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
-func (*PodAttachOptions) ProtoMessage() {}
-func (*PodAttachOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{138}
-}
-func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodAttachOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodAttachOptions.Merge(m, src)
-}
-func (m *PodAttachOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PodAttachOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PodAttachOptions.DiscardUnknown(m)
-}
+func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} }
-var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
+func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} }
-func (m *PodCertificateProjection) Reset() { *m = PodCertificateProjection{} }
-func (*PodCertificateProjection) ProtoMessage() {}
-func (*PodCertificateProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{139}
-}
-func (m *PodCertificateProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodCertificateProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodCertificateProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodCertificateProjection.Merge(m, src)
-}
-func (m *PodCertificateProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *PodCertificateProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_PodCertificateProjection.DiscardUnknown(m)
-}
+func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} }
-var xxx_messageInfo_PodCertificateProjection proto.InternalMessageInfo
+func (m *Container) Reset() { *m = Container{} }
-func (m *PodCondition) Reset() { *m = PodCondition{} }
-func (*PodCondition) ProtoMessage() {}
-func (*PodCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{140}
-}
-func (m *PodCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodCondition.Merge(m, src)
-}
-func (m *PodCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *PodCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_PodCondition.DiscardUnknown(m)
-}
+func (m *ContainerExtendedResourceRequest) Reset() { *m = ContainerExtendedResourceRequest{} }
-var xxx_messageInfo_PodCondition proto.InternalMessageInfo
+func (m *ContainerImage) Reset() { *m = ContainerImage{} }
-func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
-func (*PodDNSConfig) ProtoMessage() {}
-func (*PodDNSConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{141}
-}
-func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDNSConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDNSConfig.Merge(m, src)
-}
-func (m *PodDNSConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDNSConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDNSConfig.DiscardUnknown(m)
-}
+func (m *ContainerPort) Reset() { *m = ContainerPort{} }
-var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
+func (m *ContainerResizePolicy) Reset() { *m = ContainerResizePolicy{} }
-func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
-func (*PodDNSConfigOption) ProtoMessage() {}
-func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{142}
-}
-func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDNSConfigOption.Merge(m, src)
-}
-func (m *PodDNSConfigOption) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDNSConfigOption) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m)
-}
+func (m *ContainerRestartRule) Reset() { *m = ContainerRestartRule{} }
-var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
+func (m *ContainerRestartRuleOnExitCodes) Reset() { *m = ContainerRestartRuleOnExitCodes{} }
-func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
-func (*PodExecOptions) ProtoMessage() {}
-func (*PodExecOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{143}
-}
-func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodExecOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodExecOptions.Merge(m, src)
-}
-func (m *PodExecOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PodExecOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PodExecOptions.DiscardUnknown(m)
-}
+func (m *ContainerState) Reset() { *m = ContainerState{} }
-var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
+func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} }
-func (m *PodExtendedResourceClaimStatus) Reset() { *m = PodExtendedResourceClaimStatus{} }
-func (*PodExtendedResourceClaimStatus) ProtoMessage() {}
-func (*PodExtendedResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{144}
-}
-func (m *PodExtendedResourceClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodExtendedResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodExtendedResourceClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodExtendedResourceClaimStatus.Merge(m, src)
-}
-func (m *PodExtendedResourceClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodExtendedResourceClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodExtendedResourceClaimStatus.DiscardUnknown(m)
-}
+func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} }
-var xxx_messageInfo_PodExtendedResourceClaimStatus proto.InternalMessageInfo
+func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} }
-func (m *PodIP) Reset() { *m = PodIP{} }
-func (*PodIP) ProtoMessage() {}
-func (*PodIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{145}
-}
-func (m *PodIP) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodIP) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodIP.Merge(m, src)
-}
-func (m *PodIP) XXX_Size() int {
- return m.Size()
-}
-func (m *PodIP) XXX_DiscardUnknown() {
- xxx_messageInfo_PodIP.DiscardUnknown(m)
-}
+func (m *ContainerStatus) Reset() { *m = ContainerStatus{} }
-var xxx_messageInfo_PodIP proto.InternalMessageInfo
+func (m *ContainerUser) Reset() { *m = ContainerUser{} }
-func (m *PodList) Reset() { *m = PodList{} }
-func (*PodList) ProtoMessage() {}
-func (*PodList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{146}
-}
-func (m *PodList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodList.Merge(m, src)
-}
-func (m *PodList) XXX_Size() int {
- return m.Size()
-}
-func (m *PodList) XXX_DiscardUnknown() {
- xxx_messageInfo_PodList.DiscardUnknown(m)
-}
+func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} }
-var xxx_messageInfo_PodList proto.InternalMessageInfo
+func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} }
-func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
-func (*PodLogOptions) ProtoMessage() {}
-func (*PodLogOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{147}
-}
-func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodLogOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodLogOptions.Merge(m, src)
-}
-func (m *PodLogOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PodLogOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PodLogOptions.DiscardUnknown(m)
-}
+func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} }
-var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
+func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} }
-func (m *PodOS) Reset() { *m = PodOS{} }
-func (*PodOS) ProtoMessage() {}
-func (*PodOS) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{148}
-}
-func (m *PodOS) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodOS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodOS) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodOS.Merge(m, src)
-}
-func (m *PodOS) XXX_Size() int {
- return m.Size()
-}
-func (m *PodOS) XXX_DiscardUnknown() {
- xxx_messageInfo_PodOS.DiscardUnknown(m)
-}
+func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} }
-var xxx_messageInfo_PodOS proto.InternalMessageInfo
+func (m *EndpointAddress) Reset() { *m = EndpointAddress{} }
-func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
-func (*PodPortForwardOptions) ProtoMessage() {}
-func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{149}
-}
-func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodPortForwardOptions.Merge(m, src)
-}
-func (m *PodPortForwardOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PodPortForwardOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m)
-}
+func (m *EndpointPort) Reset() { *m = EndpointPort{} }
-var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
+func (m *EndpointSubset) Reset() { *m = EndpointSubset{} }
-func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
-func (*PodProxyOptions) ProtoMessage() {}
-func (*PodProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{150}
-}
-func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodProxyOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodProxyOptions.Merge(m, src)
-}
-func (m *PodProxyOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PodProxyOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PodProxyOptions.DiscardUnknown(m)
-}
+func (m *Endpoints) Reset() { *m = Endpoints{} }
-var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
+func (m *EndpointsList) Reset() { *m = EndpointsList{} }
-func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
-func (*PodReadinessGate) ProtoMessage() {}
-func (*PodReadinessGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{151}
-}
-func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodReadinessGate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodReadinessGate.Merge(m, src)
-}
-func (m *PodReadinessGate) XXX_Size() int {
- return m.Size()
-}
-func (m *PodReadinessGate) XXX_DiscardUnknown() {
- xxx_messageInfo_PodReadinessGate.DiscardUnknown(m)
-}
+func (m *EnvFromSource) Reset() { *m = EnvFromSource{} }
-var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
+func (m *EnvVar) Reset() { *m = EnvVar{} }
-func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} }
-func (*PodResourceClaim) ProtoMessage() {}
-func (*PodResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{152}
-}
-func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodResourceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodResourceClaim.Merge(m, src)
-}
-func (m *PodResourceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *PodResourceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_PodResourceClaim.DiscardUnknown(m)
-}
+func (m *EnvVarSource) Reset() { *m = EnvVarSource{} }
-var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
+func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} }
-func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} }
-func (*PodResourceClaimStatus) ProtoMessage() {}
-func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{153}
-}
-func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodResourceClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodResourceClaimStatus.Merge(m, src)
-}
-func (m *PodResourceClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodResourceClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodResourceClaimStatus.DiscardUnknown(m)
-}
+func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} }
-var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
+func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} }
-func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} }
-func (*PodSchedulingGate) ProtoMessage() {}
-func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{154}
-}
-func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodSchedulingGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodSchedulingGate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSchedulingGate.Merge(m, src)
-}
-func (m *PodSchedulingGate) XXX_Size() int {
- return m.Size()
-}
-func (m *PodSchedulingGate) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSchedulingGate.DiscardUnknown(m)
-}
+func (m *Event) Reset() { *m = Event{} }
-var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
+func (m *EventList) Reset() { *m = EventList{} }
-func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
-func (*PodSecurityContext) ProtoMessage() {}
-func (*PodSecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{155}
-}
-func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodSecurityContext) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSecurityContext.Merge(m, src)
-}
-func (m *PodSecurityContext) XXX_Size() int {
- return m.Size()
-}
-func (m *PodSecurityContext) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSecurityContext.DiscardUnknown(m)
-}
+func (m *EventSeries) Reset() { *m = EventSeries{} }
-var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
+func (m *EventSource) Reset() { *m = EventSource{} }
-func (m *PodSignature) Reset() { *m = PodSignature{} }
-func (*PodSignature) ProtoMessage() {}
-func (*PodSignature) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{156}
-}
-func (m *PodSignature) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodSignature) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSignature.Merge(m, src)
-}
-func (m *PodSignature) XXX_Size() int {
- return m.Size()
-}
-func (m *PodSignature) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSignature.DiscardUnknown(m)
-}
+func (m *ExecAction) Reset() { *m = ExecAction{} }
-var xxx_messageInfo_PodSignature proto.InternalMessageInfo
+func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} }
-func (m *PodSpec) Reset() { *m = PodSpec{} }
-func (*PodSpec) ProtoMessage() {}
-func (*PodSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{157}
-}
-func (m *PodSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodSpec.Merge(m, src)
-}
-func (m *PodSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PodSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PodSpec.DiscardUnknown(m)
-}
+func (m *FileKeySelector) Reset() { *m = FileKeySelector{} }
-var xxx_messageInfo_PodSpec proto.InternalMessageInfo
+func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} }
-func (m *PodStatus) Reset() { *m = PodStatus{} }
-func (*PodStatus) ProtoMessage() {}
-func (*PodStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{158}
-}
-func (m *PodStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodStatus.Merge(m, src)
-}
-func (m *PodStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodStatus.DiscardUnknown(m)
-}
+func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} }
-var xxx_messageInfo_PodStatus proto.InternalMessageInfo
+func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} }
-func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
-func (*PodStatusResult) ProtoMessage() {}
-func (*PodStatusResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{159}
-}
-func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodStatusResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodStatusResult.Merge(m, src)
-}
-func (m *PodStatusResult) XXX_Size() int {
- return m.Size()
-}
-func (m *PodStatusResult) XXX_DiscardUnknown() {
- xxx_messageInfo_PodStatusResult.DiscardUnknown(m)
-}
+func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} }
-var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
+func (m *GRPCAction) Reset() { *m = GRPCAction{} }
-func (m *PodTemplate) Reset() { *m = PodTemplate{} }
-func (*PodTemplate) ProtoMessage() {}
-func (*PodTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{160}
-}
-func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodTemplate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodTemplate.Merge(m, src)
-}
-func (m *PodTemplate) XXX_Size() int {
- return m.Size()
-}
-func (m *PodTemplate) XXX_DiscardUnknown() {
- xxx_messageInfo_PodTemplate.DiscardUnknown(m)
-}
+func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} }
-var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
+func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} }
-func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
-func (*PodTemplateList) ProtoMessage() {}
-func (*PodTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{161}
-}
-func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodTemplateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodTemplateList.Merge(m, src)
-}
-func (m *PodTemplateList) XXX_Size() int {
- return m.Size()
-}
-func (m *PodTemplateList) XXX_DiscardUnknown() {
- xxx_messageInfo_PodTemplateList.DiscardUnknown(m)
-}
+func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} }
-var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
+func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} }
-func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
-func (*PodTemplateSpec) ProtoMessage() {}
-func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{162}
-}
-func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodTemplateSpec.Merge(m, src)
-}
-func (m *PodTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PodTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m)
-}
+func (m *HTTPHeader) Reset() { *m = HTTPHeader{} }
-var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
+func (m *HostAlias) Reset() { *m = HostAlias{} }
-func (m *PortStatus) Reset() { *m = PortStatus{} }
-func (*PortStatus) ProtoMessage() {}
-func (*PortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{163}
-}
-func (m *PortStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PortStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PortStatus.Merge(m, src)
-}
-func (m *PortStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PortStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PortStatus.DiscardUnknown(m)
-}
+func (m *HostIP) Reset() { *m = HostIP{} }
-var xxx_messageInfo_PortStatus proto.InternalMessageInfo
+func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
-func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
-func (*PortworxVolumeSource) ProtoMessage() {}
-func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{164}
-}
-func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PortworxVolumeSource.Merge(m, src)
-}
-func (m *PortworxVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *PortworxVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m)
-}
+func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} }
-var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
+func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
-func (m *Preconditions) Reset() { *m = Preconditions{} }
-func (*Preconditions) ProtoMessage() {}
-func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{165}
-}
-func (m *Preconditions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Preconditions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Preconditions.Merge(m, src)
-}
-func (m *Preconditions) XXX_Size() int {
- return m.Size()
-}
-func (m *Preconditions) XXX_DiscardUnknown() {
- xxx_messageInfo_Preconditions.DiscardUnknown(m)
-}
+func (m *ImageVolumeSource) Reset() { *m = ImageVolumeSource{} }
-var xxx_messageInfo_Preconditions proto.InternalMessageInfo
+func (m *KeyToPath) Reset() { *m = KeyToPath{} }
-func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
-func (*PreferAvoidPodsEntry) ProtoMessage() {}
-func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{166}
-}
-func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src)
-}
-func (m *PreferAvoidPodsEntry) XXX_Size() int {
- return m.Size()
-}
-func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m)
-}
+func (m *Lifecycle) Reset() { *m = Lifecycle{} }
-var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
+func (m *LifecycleHandler) Reset() { *m = LifecycleHandler{} }
-func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
-func (*PreferredSchedulingTerm) ProtoMessage() {}
-func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{167}
-}
-func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src)
-}
-func (m *PreferredSchedulingTerm) XXX_Size() int {
- return m.Size()
-}
-func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() {
- xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m)
-}
+func (m *LimitRange) Reset() { *m = LimitRange{} }
-var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
+func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
-func (m *Probe) Reset() { *m = Probe{} }
-func (*Probe) ProtoMessage() {}
-func (*Probe) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{168}
-}
-func (m *Probe) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Probe) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Probe.Merge(m, src)
-}
-func (m *Probe) XXX_Size() int {
- return m.Size()
-}
-func (m *Probe) XXX_DiscardUnknown() {
- xxx_messageInfo_Probe.DiscardUnknown(m)
-}
+func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
-var xxx_messageInfo_Probe proto.InternalMessageInfo
+func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
-func (m *ProbeHandler) Reset() { *m = ProbeHandler{} }
-func (*ProbeHandler) ProtoMessage() {}
-func (*ProbeHandler) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{169}
-}
-func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProbeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ProbeHandler) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProbeHandler.Merge(m, src)
-}
-func (m *ProbeHandler) XXX_Size() int {
- return m.Size()
-}
-func (m *ProbeHandler) XXX_DiscardUnknown() {
- xxx_messageInfo_ProbeHandler.DiscardUnknown(m)
-}
+func (m *LinuxContainerUser) Reset() { *m = LinuxContainerUser{} }
-var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
+func (m *List) Reset() { *m = List{} }
-func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
-func (*ProjectedVolumeSource) ProtoMessage() {}
-func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{170}
-}
-func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProjectedVolumeSource.Merge(m, src)
-}
-func (m *ProjectedVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ProjectedVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m)
-}
+func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
-var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
+func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
-func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
-func (*QuobyteVolumeSource) ProtoMessage() {}
-func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{171}
-}
-func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QuobyteVolumeSource.Merge(m, src)
-}
-func (m *QuobyteVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *QuobyteVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m)
-}
+func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
-var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
+func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} }
-func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
-func (*RBDPersistentVolumeSource) ProtoMessage() {}
-func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{172}
-}
-func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src)
-}
-func (m *RBDPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m)
-}
+func (m *ModifyVolumeStatus) Reset() { *m = ModifyVolumeStatus{} }
-var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
+func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
-func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
-func (*RBDVolumeSource) ProtoMessage() {}
-func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{173}
-}
-func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RBDVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RBDVolumeSource.Merge(m, src)
-}
-func (m *RBDVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *RBDVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m)
-}
+func (m *Namespace) Reset() { *m = Namespace{} }
-var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
+func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} }
-func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
-func (*RangeAllocation) ProtoMessage() {}
-func (*RangeAllocation) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{174}
-}
-func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RangeAllocation) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RangeAllocation.Merge(m, src)
-}
-func (m *RangeAllocation) XXX_Size() int {
- return m.Size()
-}
-func (m *RangeAllocation) XXX_DiscardUnknown() {
- xxx_messageInfo_RangeAllocation.DiscardUnknown(m)
-}
+func (m *NamespaceList) Reset() { *m = NamespaceList{} }
-var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
+func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
-func (m *ReplicationController) Reset() { *m = ReplicationController{} }
-func (*ReplicationController) ProtoMessage() {}
-func (*ReplicationController) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{175}
-}
-func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicationController) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicationController.Merge(m, src)
-}
-func (m *ReplicationController) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicationController) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicationController.DiscardUnknown(m)
-}
+func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
-var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
+func (m *Node) Reset() { *m = Node{} }
-func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
-func (*ReplicationControllerCondition) ProtoMessage() {}
-func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{176}
-}
-func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicationControllerCondition.Merge(m, src)
-}
-func (m *ReplicationControllerCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicationControllerCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m)
-}
+func (m *NodeAddress) Reset() { *m = NodeAddress{} }
-var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
+func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
-func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
-func (*ReplicationControllerList) ProtoMessage() {}
-func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{177}
-}
-func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicationControllerList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicationControllerList.Merge(m, src)
-}
-func (m *ReplicationControllerList) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicationControllerList) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m)
-}
+func (m *NodeCondition) Reset() { *m = NodeCondition{} }
-var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
+func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} }
-func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
-func (*ReplicationControllerSpec) ProtoMessage() {}
-func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{178}
-}
-func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicationControllerSpec.Merge(m, src)
-}
-func (m *ReplicationControllerSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicationControllerSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m)
-}
+func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} }
-var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
+func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
-func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
-func (*ReplicationControllerStatus) ProtoMessage() {}
-func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{179}
-}
-func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicationControllerStatus.Merge(m, src)
-}
-func (m *ReplicationControllerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicationControllerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m)
-}
+func (m *NodeFeatures) Reset() { *m = NodeFeatures{} }
-var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
+func (m *NodeList) Reset() { *m = NodeList{} }
-func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (*ResourceClaim) ProtoMessage() {}
-func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{180}
-}
-func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaim.Merge(m, src)
-}
-func (m *ResourceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
-}
+func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
-var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+func (m *NodeRuntimeHandler) Reset() { *m = NodeRuntimeHandler{} }
-func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
-func (*ResourceFieldSelector) ProtoMessage() {}
-func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{181}
-}
-func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceFieldSelector.Merge(m, src)
-}
-func (m *ResourceFieldSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceFieldSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m)
-}
+func (m *NodeRuntimeHandlerFeatures) Reset() { *m = NodeRuntimeHandlerFeatures{} }
-var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
+func (m *NodeSelector) Reset() { *m = NodeSelector{} }
-func (m *ResourceHealth) Reset() { *m = ResourceHealth{} }
-func (*ResourceHealth) ProtoMessage() {}
-func (*ResourceHealth) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{182}
-}
-func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceHealth) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceHealth.Merge(m, src)
-}
-func (m *ResourceHealth) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceHealth) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceHealth.DiscardUnknown(m)
-}
+func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
-var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
+func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
-func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
-func (*ResourceQuota) ProtoMessage() {}
-func (*ResourceQuota) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{183}
-}
-func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceQuota) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceQuota.Merge(m, src)
-}
-func (m *ResourceQuota) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceQuota) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceQuota.DiscardUnknown(m)
-}
+func (m *NodeSpec) Reset() { *m = NodeSpec{} }
-var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
+func (m *NodeStatus) Reset() { *m = NodeStatus{} }
-func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
-func (*ResourceQuotaList) ProtoMessage() {}
-func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{184}
-}
-func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceQuotaList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceQuotaList.Merge(m, src)
-}
-func (m *ResourceQuotaList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceQuotaList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m)
-}
+func (m *NodeSwapStatus) Reset() { *m = NodeSwapStatus{} }
-var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
+func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
-func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
-func (*ResourceQuotaSpec) ProtoMessage() {}
-func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{185}
-}
-func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceQuotaSpec.Merge(m, src)
-}
-func (m *ResourceQuotaSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceQuotaSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m)
-}
+func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
-var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
+func (m *ObjectReference) Reset() { *m = ObjectReference{} }
-func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
-func (*ResourceQuotaStatus) ProtoMessage() {}
-func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{186}
-}
-func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceQuotaStatus.Merge(m, src)
-}
-func (m *ResourceQuotaStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceQuotaStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m)
-}
+func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
-var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
+func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
-func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
-func (*ResourceRequirements) ProtoMessage() {}
-func (*ResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{187}
-}
-func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceRequirements) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceRequirements.Merge(m, src)
-}
-func (m *ResourceRequirements) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceRequirements) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceRequirements.DiscardUnknown(m)
-}
+func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
-var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
+func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
-func (m *ResourceStatus) Reset() { *m = ResourceStatus{} }
-func (*ResourceStatus) ProtoMessage() {}
-func (*ResourceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{188}
-}
-func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceStatus.Merge(m, src)
-}
-func (m *ResourceStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceStatus.DiscardUnknown(m)
-}
+func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
-var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
+func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
-func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
-func (*SELinuxOptions) ProtoMessage() {}
-func (*SELinuxOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{189}
-}
-func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SELinuxOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SELinuxOptions.Merge(m, src)
-}
-func (m *SELinuxOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *SELinuxOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_SELinuxOptions.DiscardUnknown(m)
-}
+func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} }
-var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
+func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
-func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
-func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
-func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{190}
-}
-func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src)
-}
-func (m *ScaleIOPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m)
-}
+func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
-var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
+func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
-func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
-func (*ScaleIOVolumeSource) ProtoMessage() {}
-func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{191}
-}
-func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src)
-}
-func (m *ScaleIOVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m)
-}
+func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
+
+func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
+
+func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
+
+func (m *Pod) Reset() { *m = Pod{} }
+
+func (m *PodAffinity) Reset() { *m = PodAffinity{} }
+
+func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
+
+func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
+
+func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
+
+func (m *PodCertificateProjection) Reset() { *m = PodCertificateProjection{} }
-var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
+func (m *PodCondition) Reset() { *m = PodCondition{} }
-func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
-func (*ScopeSelector) ProtoMessage() {}
-func (*ScopeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{192}
-}
-func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScopeSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopeSelector.Merge(m, src)
-}
-func (m *ScopeSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopeSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopeSelector.DiscardUnknown(m)
-}
+func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
-var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
+func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
-func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
-func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
-func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{193}
-}
-func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src)
-}
-func (m *ScopedResourceSelectorRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m)
-}
+func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
-var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
+func (m *PodExtendedResourceClaimStatus) Reset() { *m = PodExtendedResourceClaimStatus{} }
-func (m *SeccompProfile) Reset() { *m = SeccompProfile{} }
-func (*SeccompProfile) ProtoMessage() {}
-func (*SeccompProfile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{194}
-}
-func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SeccompProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SeccompProfile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SeccompProfile.Merge(m, src)
-}
-func (m *SeccompProfile) XXX_Size() int {
- return m.Size()
-}
-func (m *SeccompProfile) XXX_DiscardUnknown() {
- xxx_messageInfo_SeccompProfile.DiscardUnknown(m)
-}
+func (m *PodIP) Reset() { *m = PodIP{} }
-var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
+func (m *PodList) Reset() { *m = PodList{} }
-func (m *Secret) Reset() { *m = Secret{} }
-func (*Secret) ProtoMessage() {}
-func (*Secret) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{195}
-}
-func (m *Secret) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Secret) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Secret.Merge(m, src)
-}
-func (m *Secret) XXX_Size() int {
- return m.Size()
-}
-func (m *Secret) XXX_DiscardUnknown() {
- xxx_messageInfo_Secret.DiscardUnknown(m)
-}
+func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
-var xxx_messageInfo_Secret proto.InternalMessageInfo
+func (m *PodOS) Reset() { *m = PodOS{} }
-func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
-func (*SecretEnvSource) ProtoMessage() {}
-func (*SecretEnvSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{196}
-}
-func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecretEnvSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecretEnvSource.Merge(m, src)
-}
-func (m *SecretEnvSource) XXX_Size() int {
- return m.Size()
-}
-func (m *SecretEnvSource) XXX_DiscardUnknown() {
- xxx_messageInfo_SecretEnvSource.DiscardUnknown(m)
-}
+func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
-var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
+func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
-func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
-func (*SecretKeySelector) ProtoMessage() {}
-func (*SecretKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{197}
-}
-func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecretKeySelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecretKeySelector.Merge(m, src)
-}
-func (m *SecretKeySelector) XXX_Size() int {
- return m.Size()
-}
-func (m *SecretKeySelector) XXX_DiscardUnknown() {
- xxx_messageInfo_SecretKeySelector.DiscardUnknown(m)
-}
+func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
-var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
+func (m *PodResourceClaim) Reset() { *m = PodResourceClaim{} }
-func (m *SecretList) Reset() { *m = SecretList{} }
-func (*SecretList) ProtoMessage() {}
-func (*SecretList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{198}
-}
-func (m *SecretList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecretList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecretList.Merge(m, src)
-}
-func (m *SecretList) XXX_Size() int {
- return m.Size()
-}
-func (m *SecretList) XXX_DiscardUnknown() {
- xxx_messageInfo_SecretList.DiscardUnknown(m)
-}
+func (m *PodResourceClaimStatus) Reset() { *m = PodResourceClaimStatus{} }
-var xxx_messageInfo_SecretList proto.InternalMessageInfo
+func (m *PodSchedulingGate) Reset() { *m = PodSchedulingGate{} }
-func (m *SecretProjection) Reset() { *m = SecretProjection{} }
-func (*SecretProjection) ProtoMessage() {}
-func (*SecretProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{199}
-}
-func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecretProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecretProjection.Merge(m, src)
-}
-func (m *SecretProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *SecretProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_SecretProjection.DiscardUnknown(m)
-}
+func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
-var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
+func (m *PodSignature) Reset() { *m = PodSignature{} }
-func (m *SecretReference) Reset() { *m = SecretReference{} }
-func (*SecretReference) ProtoMessage() {}
-func (*SecretReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{200}
-}
-func (m *SecretReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecretReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecretReference.Merge(m, src)
-}
-func (m *SecretReference) XXX_Size() int {
- return m.Size()
-}
-func (m *SecretReference) XXX_DiscardUnknown() {
- xxx_messageInfo_SecretReference.DiscardUnknown(m)
-}
+func (m *PodSpec) Reset() { *m = PodSpec{} }
-var xxx_messageInfo_SecretReference proto.InternalMessageInfo
+func (m *PodStatus) Reset() { *m = PodStatus{} }
-func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
-func (*SecretVolumeSource) ProtoMessage() {}
-func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{201}
-}
-func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecretVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecretVolumeSource.Merge(m, src)
-}
-func (m *SecretVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *SecretVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m)
-}
+func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
-var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
+func (m *PodTemplate) Reset() { *m = PodTemplate{} }
-func (m *SecurityContext) Reset() { *m = SecurityContext{} }
-func (*SecurityContext) ProtoMessage() {}
-func (*SecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{202}
-}
-func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SecurityContext) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SecurityContext.Merge(m, src)
-}
-func (m *SecurityContext) XXX_Size() int {
- return m.Size()
-}
-func (m *SecurityContext) XXX_DiscardUnknown() {
- xxx_messageInfo_SecurityContext.DiscardUnknown(m)
-}
+func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
-var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
+func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
-func (m *SerializedReference) Reset() { *m = SerializedReference{} }
-func (*SerializedReference) ProtoMessage() {}
-func (*SerializedReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{203}
-}
-func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SerializedReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SerializedReference.Merge(m, src)
-}
-func (m *SerializedReference) XXX_Size() int {
- return m.Size()
-}
-func (m *SerializedReference) XXX_DiscardUnknown() {
- xxx_messageInfo_SerializedReference.DiscardUnknown(m)
-}
+func (m *PortStatus) Reset() { *m = PortStatus{} }
-var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
+func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
-func (m *Service) Reset() { *m = Service{} }
-func (*Service) ProtoMessage() {}
-func (*Service) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{204}
-}
-func (m *Service) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Service) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Service.Merge(m, src)
-}
-func (m *Service) XXX_Size() int {
- return m.Size()
-}
-func (m *Service) XXX_DiscardUnknown() {
- xxx_messageInfo_Service.DiscardUnknown(m)
-}
+func (m *Preconditions) Reset() { *m = Preconditions{} }
-var xxx_messageInfo_Service proto.InternalMessageInfo
+func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
-func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
-func (*ServiceAccount) ProtoMessage() {}
-func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{205}
-}
-func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccount) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccount.Merge(m, src)
-}
-func (m *ServiceAccount) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccount) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccount.DiscardUnknown(m)
-}
+func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
-var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
+func (m *Probe) Reset() { *m = Probe{} }
-func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
-func (*ServiceAccountList) ProtoMessage() {}
-func (*ServiceAccountList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{206}
-}
-func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccountList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccountList.Merge(m, src)
-}
-func (m *ServiceAccountList) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccountList) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccountList.DiscardUnknown(m)
-}
+func (m *ProbeHandler) Reset() { *m = ProbeHandler{} }
-var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
+func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
-func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
-func (*ServiceAccountTokenProjection) ProtoMessage() {}
-func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{207}
-}
-func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src)
-}
-func (m *ServiceAccountTokenProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m)
-}
+func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
-var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
+func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
-func (m *ServiceList) Reset() { *m = ServiceList{} }
-func (*ServiceList) ProtoMessage() {}
-func (*ServiceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{208}
-}
-func (m *ServiceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceList.Merge(m, src)
-}
-func (m *ServiceList) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceList) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceList.DiscardUnknown(m)
-}
+func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
-var xxx_messageInfo_ServiceList proto.InternalMessageInfo
+func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
-func (m *ServicePort) Reset() { *m = ServicePort{} }
-func (*ServicePort) ProtoMessage() {}
-func (*ServicePort) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{209}
-}
-func (m *ServicePort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServicePort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServicePort.Merge(m, src)
-}
-func (m *ServicePort) XXX_Size() int {
- return m.Size()
-}
-func (m *ServicePort) XXX_DiscardUnknown() {
- xxx_messageInfo_ServicePort.DiscardUnknown(m)
-}
+func (m *ReplicationController) Reset() { *m = ReplicationController{} }
-var xxx_messageInfo_ServicePort proto.InternalMessageInfo
+func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
-func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
-func (*ServiceProxyOptions) ProtoMessage() {}
-func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{210}
-}
-func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceProxyOptions.Merge(m, src)
-}
-func (m *ServiceProxyOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceProxyOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m)
-}
+func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
-var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
+func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
-func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
-func (*ServiceSpec) ProtoMessage() {}
-func (*ServiceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{211}
-}
-func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceSpec.Merge(m, src)
-}
-func (m *ServiceSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceSpec.DiscardUnknown(m)
-}
+func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
-var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
+func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
-func (*ServiceStatus) ProtoMessage() {}
-func (*ServiceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{212}
-}
-func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceStatus.Merge(m, src)
-}
-func (m *ServiceStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceStatus.DiscardUnknown(m)
-}
+func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
-var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
+func (m *ResourceHealth) Reset() { *m = ResourceHealth{} }
-func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
-func (*SessionAffinityConfig) ProtoMessage() {}
-func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{213}
-}
-func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SessionAffinityConfig.Merge(m, src)
-}
-func (m *SessionAffinityConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *SessionAffinityConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m)
-}
+func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
-var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
+func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
-func (m *SleepAction) Reset() { *m = SleepAction{} }
-func (*SleepAction) ProtoMessage() {}
-func (*SleepAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{214}
-}
-func (m *SleepAction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *SleepAction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SleepAction.Merge(m, src)
-}
-func (m *SleepAction) XXX_Size() int {
- return m.Size()
-}
-func (m *SleepAction) XXX_DiscardUnknown() {
- xxx_messageInfo_SleepAction.DiscardUnknown(m)
-}
+func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
-var xxx_messageInfo_SleepAction proto.InternalMessageInfo
+func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
-func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
-func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
-func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{215}
-}
-func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src)
-}
-func (m *StorageOSPersistentVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m)
-}
+func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
-var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
+func (m *ResourceStatus) Reset() { *m = ResourceStatus{} }
-func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
-func (*StorageOSVolumeSource) ProtoMessage() {}
-func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{216}
-}
-func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageOSVolumeSource.Merge(m, src)
-}
-func (m *StorageOSVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageOSVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m)
-}
+func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
-var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
+func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
-func (m *Sysctl) Reset() { *m = Sysctl{} }
-func (*Sysctl) ProtoMessage() {}
-func (*Sysctl) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{217}
-}
-func (m *Sysctl) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Sysctl) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sysctl.Merge(m, src)
-}
-func (m *Sysctl) XXX_Size() int {
- return m.Size()
-}
-func (m *Sysctl) XXX_DiscardUnknown() {
- xxx_messageInfo_Sysctl.DiscardUnknown(m)
-}
+func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
-var xxx_messageInfo_Sysctl proto.InternalMessageInfo
+func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
-func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
-func (*TCPSocketAction) ProtoMessage() {}
-func (*TCPSocketAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{218}
-}
-func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TCPSocketAction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TCPSocketAction.Merge(m, src)
-}
-func (m *TCPSocketAction) XXX_Size() int {
- return m.Size()
-}
-func (m *TCPSocketAction) XXX_DiscardUnknown() {
- xxx_messageInfo_TCPSocketAction.DiscardUnknown(m)
-}
+func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
-var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
+func (m *SeccompProfile) Reset() { *m = SeccompProfile{} }
-func (m *Taint) Reset() { *m = Taint{} }
-func (*Taint) ProtoMessage() {}
-func (*Taint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{219}
-}
-func (m *Taint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Taint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Taint.Merge(m, src)
-}
-func (m *Taint) XXX_Size() int {
- return m.Size()
-}
-func (m *Taint) XXX_DiscardUnknown() {
- xxx_messageInfo_Taint.DiscardUnknown(m)
-}
+func (m *Secret) Reset() { *m = Secret{} }
-var xxx_messageInfo_Taint proto.InternalMessageInfo
+func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
-func (m *Toleration) Reset() { *m = Toleration{} }
-func (*Toleration) ProtoMessage() {}
-func (*Toleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{220}
-}
-func (m *Toleration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Toleration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Toleration.Merge(m, src)
-}
-func (m *Toleration) XXX_Size() int {
- return m.Size()
-}
-func (m *Toleration) XXX_DiscardUnknown() {
- xxx_messageInfo_Toleration.DiscardUnknown(m)
-}
+func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
-var xxx_messageInfo_Toleration proto.InternalMessageInfo
+func (m *SecretList) Reset() { *m = SecretList{} }
-func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
-func (*TopologySelectorLabelRequirement) ProtoMessage() {}
-func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{221}
-}
-func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src)
-}
-func (m *TopologySelectorLabelRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m)
-}
+func (m *SecretProjection) Reset() { *m = SecretProjection{} }
-var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
+func (m *SecretReference) Reset() { *m = SecretReference{} }
-func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
-func (*TopologySelectorTerm) ProtoMessage() {}
-func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{222}
-}
-func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TopologySelectorTerm.Merge(m, src)
-}
-func (m *TopologySelectorTerm) XXX_Size() int {
- return m.Size()
-}
-func (m *TopologySelectorTerm) XXX_DiscardUnknown() {
- xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m)
-}
+func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
-var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
+func (m *SecurityContext) Reset() { *m = SecurityContext{} }
-func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} }
-func (*TopologySpreadConstraint) ProtoMessage() {}
-func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{223}
-}
-func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TopologySpreadConstraint.Merge(m, src)
-}
-func (m *TopologySpreadConstraint) XXX_Size() int {
- return m.Size()
-}
-func (m *TopologySpreadConstraint) XXX_DiscardUnknown() {
- xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m)
-}
+func (m *SerializedReference) Reset() { *m = SerializedReference{} }
-var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
+func (m *Service) Reset() { *m = Service{} }
-func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
-func (*TypedLocalObjectReference) ProtoMessage() {}
-func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{224}
-}
-func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypedLocalObjectReference.Merge(m, src)
-}
-func (m *TypedLocalObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *TypedLocalObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m)
-}
+func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
-var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
+func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
-func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} }
-func (*TypedObjectReference) ProtoMessage() {}
-func (*TypedObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{225}
-}
-func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypedObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypedObjectReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypedObjectReference.Merge(m, src)
-}
-func (m *TypedObjectReference) XXX_Size() int {
- return m.Size()
-}
-func (m *TypedObjectReference) XXX_DiscardUnknown() {
- xxx_messageInfo_TypedObjectReference.DiscardUnknown(m)
-}
+func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
-var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
+func (m *ServiceList) Reset() { *m = ServiceList{} }
-func (m *Volume) Reset() { *m = Volume{} }
-func (*Volume) ProtoMessage() {}
-func (*Volume) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{226}
-}
-func (m *Volume) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Volume) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Volume.Merge(m, src)
-}
-func (m *Volume) XXX_Size() int {
- return m.Size()
-}
-func (m *Volume) XXX_DiscardUnknown() {
- xxx_messageInfo_Volume.DiscardUnknown(m)
-}
+func (m *ServicePort) Reset() { *m = ServicePort{} }
-var xxx_messageInfo_Volume proto.InternalMessageInfo
+func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
-func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
-func (*VolumeDevice) ProtoMessage() {}
-func (*VolumeDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{227}
-}
-func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeDevice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeDevice.Merge(m, src)
-}
-func (m *VolumeDevice) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeDevice) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeDevice.DiscardUnknown(m)
-}
+func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
-var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
+func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
-func (m *VolumeMount) Reset() { *m = VolumeMount{} }
-func (*VolumeMount) ProtoMessage() {}
-func (*VolumeMount) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{228}
-}
-func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeMount) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeMount.Merge(m, src)
-}
-func (m *VolumeMount) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeMount) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeMount.DiscardUnknown(m)
-}
+func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
-var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
+func (m *SleepAction) Reset() { *m = SleepAction{} }
-func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} }
-func (*VolumeMountStatus) ProtoMessage() {}
-func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{229}
-}
-func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeMountStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeMountStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeMountStatus.Merge(m, src)
-}
-func (m *VolumeMountStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeMountStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeMountStatus.DiscardUnknown(m)
-}
+func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
-var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
+func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
-func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
-func (*VolumeNodeAffinity) ProtoMessage() {}
-func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{230}
-}
-func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeNodeAffinity.Merge(m, src)
-}
-func (m *VolumeNodeAffinity) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeNodeAffinity) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m)
-}
+func (m *Sysctl) Reset() { *m = Sysctl{} }
-var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
+func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
-func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
-func (*VolumeProjection) ProtoMessage() {}
-func (*VolumeProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{231}
-}
-func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeProjection) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeProjection.Merge(m, src)
-}
-func (m *VolumeProjection) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeProjection) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeProjection.DiscardUnknown(m)
-}
+func (m *Taint) Reset() { *m = Taint{} }
-var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
+func (m *Toleration) Reset() { *m = Toleration{} }
-func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} }
-func (*VolumeResourceRequirements) ProtoMessage() {}
-func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{232}
-}
-func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeResourceRequirements) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeResourceRequirements.Merge(m, src)
-}
-func (m *VolumeResourceRequirements) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeResourceRequirements) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeResourceRequirements.DiscardUnknown(m)
-}
+func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
-var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
+func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
-func (m *VolumeSource) Reset() { *m = VolumeSource{} }
-func (*VolumeSource) ProtoMessage() {}
-func (*VolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{233}
-}
-func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeSource.Merge(m, src)
-}
-func (m *VolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeSource.DiscardUnknown(m)
-}
+func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} }
-var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
+func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
-func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
-func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
-func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{234}
-}
-func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src)
-}
-func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int {
- return m.Size()
-}
-func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() {
- xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m)
-}
+func (m *TypedObjectReference) Reset() { *m = TypedObjectReference{} }
-var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
+func (m *Volume) Reset() { *m = Volume{} }
-func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
-func (*WeightedPodAffinityTerm) ProtoMessage() {}
-func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{235}
-}
-func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src)
-}
-func (m *WeightedPodAffinityTerm) XXX_Size() int {
- return m.Size()
-}
-func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() {
- xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m)
-}
+func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
-var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
+func (m *VolumeMount) Reset() { *m = VolumeMount{} }
-func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
-func (*WindowsSecurityContextOptions) ProtoMessage() {}
-func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6c07b07c062484ab, []int{236}
-}
-func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src)
-}
-func (m *WindowsSecurityContextOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource")
- proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity")
- proto.RegisterType((*AppArmorProfile)(nil), "k8s.io.api.core.v1.AppArmorProfile")
- proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume")
- proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods")
- proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource")
- proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource")
- proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource")
- proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding")
- proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry")
- proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry")
- proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities")
- proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource")
- proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
- proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource")
- proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
- proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig")
- proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection")
- proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition")
- proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus")
- proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList")
- proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap")
- proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry")
- proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource")
- proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector")
- proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList")
- proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource")
- proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection")
- proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource")
- proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container")
- proto.RegisterType((*ContainerExtendedResourceRequest)(nil), "k8s.io.api.core.v1.ContainerExtendedResourceRequest")
- proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage")
- proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort")
- proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy")
- proto.RegisterType((*ContainerRestartRule)(nil), "k8s.io.api.core.v1.ContainerRestartRule")
- proto.RegisterType((*ContainerRestartRuleOnExitCodes)(nil), "k8s.io.api.core.v1.ContainerRestartRuleOnExitCodes")
- proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState")
- proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning")
- proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated")
- proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting")
- proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry")
- proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser")
- proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint")
- proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection")
- proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile")
- proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource")
- proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource")
- proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress")
- proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort")
- proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset")
- proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints")
- proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList")
- proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource")
- proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar")
- proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource")
- proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer")
- proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon")
- proto.RegisterType((*EphemeralVolumeSource)(nil), "k8s.io.api.core.v1.EphemeralVolumeSource")
- proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event")
- proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList")
- proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries")
- proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource")
- proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction")
- proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource")
- proto.RegisterType((*FileKeySelector)(nil), "k8s.io.api.core.v1.FileKeySelector")
- proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry")
- proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry")
- proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource")
- proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource")
- proto.RegisterType((*GRPCAction)(nil), "k8s.io.api.core.v1.GRPCAction")
- proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource")
- proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource")
- proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource")
- proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction")
- proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader")
- proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias")
- proto.RegisterType((*HostIP)(nil), "k8s.io.api.core.v1.HostIP")
- proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource")
- proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource")
- proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource")
- proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource")
- proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath")
- proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle")
- proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler")
- proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange")
- proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry")
- proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList")
- proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec")
- proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser")
- proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List")
- proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress")
- proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus")
- proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference")
- proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource")
- proto.RegisterType((*ModifyVolumeStatus)(nil), "k8s.io.api.core.v1.ModifyVolumeStatus")
- proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource")
- proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace")
- proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition")
- proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList")
- proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec")
- proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus")
- proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node")
- proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress")
- proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity")
- proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition")
- proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource")
- proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus")
- proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints")
- proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures")
- proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList")
- proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions")
- proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler")
- proto.RegisterType((*NodeRuntimeHandlerFeatures)(nil), "k8s.io.api.core.v1.NodeRuntimeHandlerFeatures")
- proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector")
- proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement")
- proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm")
- proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec")
- proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry")
- proto.RegisterType((*NodeSwapStatus)(nil), "k8s.io.api.core.v1.NodeSwapStatus")
- proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo")
- proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector")
- proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference")
- proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume")
- proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim")
- proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition")
- proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList")
- proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec")
- proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus")
- proto.RegisterMapType((map[ResourceName]ClaimResourceStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourceStatusesEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry")
- proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate")
- proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource")
- proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList")
- proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource")
- proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry")
- proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus")
- proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource")
- proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod")
- proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity")
- proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm")
- proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity")
- proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions")
- proto.RegisterType((*PodCertificateProjection)(nil), "k8s.io.api.core.v1.PodCertificateProjection")
- proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition")
- proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig")
- proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption")
- proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions")
- proto.RegisterType((*PodExtendedResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodExtendedResourceClaimStatus")
- proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP")
- proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList")
- proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions")
- proto.RegisterType((*PodOS)(nil), "k8s.io.api.core.v1.PodOS")
- proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions")
- proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions")
- proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate")
- proto.RegisterType((*PodResourceClaim)(nil), "k8s.io.api.core.v1.PodResourceClaim")
- proto.RegisterType((*PodResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodResourceClaimStatus")
- proto.RegisterType((*PodSchedulingGate)(nil), "k8s.io.api.core.v1.PodSchedulingGate")
- proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext")
- proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature")
- proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry")
- proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus")
- proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult")
- proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate")
- proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList")
- proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec")
- proto.RegisterType((*PortStatus)(nil), "k8s.io.api.core.v1.PortStatus")
- proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource")
- proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions")
- proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry")
- proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm")
- proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe")
- proto.RegisterType((*ProbeHandler)(nil), "k8s.io.api.core.v1.ProbeHandler")
- proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource")
- proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource")
- proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource")
- proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource")
- proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation")
- proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController")
- proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition")
- proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList")
- proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry")
- proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus")
- proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim")
- proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector")
- proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth")
- proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota")
- proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList")
- proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry")
- proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry")
- proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry")
- proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus")
- proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions")
- proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource")
- proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource")
- proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector")
- proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement")
- proto.RegisterType((*SeccompProfile)(nil), "k8s.io.api.core.v1.SeccompProfile")
- proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret")
- proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry")
- proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource")
- proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector")
- proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList")
- proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection")
- proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference")
- proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource")
- proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext")
- proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference")
- proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service")
- proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount")
- proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList")
- proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection")
- proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList")
- proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort")
- proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions")
- proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry")
- proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus")
- proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig")
- proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction")
- proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource")
- proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource")
- proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl")
- proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction")
- proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint")
- proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration")
- proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement")
- proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm")
- proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint")
- proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference")
- proto.RegisterType((*TypedObjectReference)(nil), "k8s.io.api.core.v1.TypedObjectReference")
- proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume")
- proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice")
- proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount")
- proto.RegisterType((*VolumeMountStatus)(nil), "k8s.io.api.core.v1.VolumeMountStatus")
- proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity")
- proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection")
- proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.LimitsEntry")
- proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.RequestsEntry")
- proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource")
- proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource")
- proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm")
- proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/core/v1/generated.proto", fileDescriptor_6c07b07c062484ab)
-}
-
-var fileDescriptor_6c07b07c062484ab = []byte{
- // 16665 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x5b, 0x90, 0x5c, 0x49,
- 0x76, 0x18, 0xb6, 0xb7, 0xaa, 0x9f, 0xa7, 0xdf, 0x89, 0x57, 0xa1, 0x07, 0x40, 0x61, 0xee, 0xcc,
- 0x60, 0x30, 0x3b, 0x33, 0x8d, 0xc5, 0x3c, 0x76, 0xb1, 0x33, 0xb3, 0xc3, 0xe9, 0x27, 0xd0, 0x03,
- 0x74, 0xa3, 0x26, 0xab, 0x01, 0xec, 0x63, 0x76, 0xb5, 0x17, 0x55, 0xd9, 0xdd, 0x77, 0xbb, 0xea,
- 0xde, 0x9a, 0x7b, 0x6f, 0x35, 0xd0, 0x30, 0x15, 0xa4, 0x56, 0xe6, 0x4a, 0x4b, 0xd2, 0x11, 0x1b,
- 0x0a, 0x4b, 0x72, 0x90, 0x0a, 0x7e, 0xe8, 0x45, 0xd2, 0xb4, 0x64, 0x52, 0xa4, 0x45, 0x59, 0x14,
- 0x29, 0xda, 0x96, 0x23, 0x68, 0x7f, 0xc8, 0x14, 0x23, 0xcc, 0x65, 0x58, 0xe1, 0x96, 0xd9, 0xb6,
- 0x42, 0xc1, 0x0f, 0x53, 0x0a, 0xda, 0x1f, 0x76, 0x87, 0x6c, 0x2a, 0xf2, 0x79, 0x33, 0xef, 0xab,
- 0xaa, 0x31, 0x40, 0xef, 0x70, 0x63, 0xfe, 0xaa, 0xf2, 0x9c, 0x3c, 0x99, 0x37, 0x1f, 0x27, 0x4f,
- 0x9e, 0x73, 0xf2, 0x1c, 0xb0, 0x77, 0xae, 0x85, 0x73, 0xae, 0x7f, 0xc5, 0xe9, 0xb8, 0x57, 0x1a,
- 0x7e, 0x40, 0xae, 0xec, 0x5e, 0xbd, 0xb2, 0x45, 0x3c, 0x12, 0x38, 0x11, 0x69, 0xce, 0x75, 0x02,
- 0x3f, 0xf2, 0x11, 0xe2, 0x38, 0x73, 0x4e, 0xc7, 0x9d, 0xa3, 0x38, 0x73, 0xbb, 0x57, 0x67, 0x5f,
- 0xdd, 0x72, 0xa3, 0xed, 0xee, 0xfd, 0xb9, 0x86, 0xdf, 0xbe, 0xb2, 0xe5, 0x6f, 0xf9, 0x57, 0x18,
- 0xea, 0xfd, 0xee, 0x26, 0xfb, 0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xec, 0x1b, 0x71, 0x33, 0x6d,
- 0xa7, 0xb1, 0xed, 0x7a, 0x24, 0xd8, 0xbb, 0xd2, 0xd9, 0xd9, 0x62, 0xed, 0x06, 0x24, 0xf4, 0xbb,
- 0x41, 0x83, 0x24, 0x1b, 0x2e, 0xac, 0x15, 0x5e, 0x69, 0x93, 0xc8, 0xc9, 0xe8, 0xee, 0xec, 0x95,
- 0xbc, 0x5a, 0x41, 0xd7, 0x8b, 0xdc, 0x76, 0xba, 0x99, 0xcf, 0xf7, 0xaa, 0x10, 0x36, 0xb6, 0x49,
- 0xdb, 0x49, 0xd5, 0x7b, 0x3d, 0xaf, 0x5e, 0x37, 0x72, 0x5b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20,
- 0x59, 0xc9, 0xfe, 0xbe, 0x05, 0x17, 0xe7, 0xef, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0x85,
- 0x96, 0xdf, 0xd8, 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0x6e, 0x9b, 0xd4, 0xd9, 0x40, 0xa0,
- 0x57, 0x60, 0x64, 0x97, 0xfd, 0x5f, 0x5d, 0xaa, 0x58, 0x17, 0xad, 0xcb, 0xa3, 0x0b, 0xd3, 0xbf,
- 0xb3, 0x5f, 0xfd, 0xcc, 0xc1, 0x7e, 0x75, 0xe4, 0xae, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x82, 0xa1,
- 0xcd, 0x70, 0x63, 0xaf, 0x43, 0x2a, 0x25, 0x86, 0x3b, 0x29, 0x70, 0x87, 0x56, 0xea, 0xb4, 0x14,
- 0x0b, 0x28, 0xba, 0x02, 0xa3, 0x1d, 0x27, 0x88, 0xdc, 0xc8, 0xf5, 0xbd, 0x4a, 0xf9, 0xa2, 0x75,
- 0x79, 0x70, 0x61, 0x46, 0xa0, 0x8e, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xb4, 0x1b, 0x01, 0x71, 0x9a,
- 0xb7, 0xbd, 0xd6, 0x5e, 0x65, 0xe0, 0xa2, 0x75, 0x79, 0x24, 0xee, 0x06, 0x16, 0xe5, 0x58, 0x61,
- 0xd8, 0x3f, 0x53, 0x82, 0x91, 0xf9, 0xcd, 0x4d, 0xd7, 0x73, 0xa3, 0x3d, 0x74, 0x17, 0xc6, 0x3d,
- 0xbf, 0x49, 0xe4, 0x7f, 0xf6, 0x15, 0x63, 0xaf, 0x5d, 0x9c, 0x4b, 0x2f, 0xa5, 0xb9, 0x75, 0x0d,
- 0x6f, 0x61, 0xfa, 0x60, 0xbf, 0x3a, 0xae, 0x97, 0x60, 0x83, 0x0e, 0xc2, 0x30, 0xd6, 0xf1, 0x9b,
- 0x8a, 0x6c, 0x89, 0x91, 0xad, 0x66, 0x91, 0xad, 0xc5, 0x68, 0x0b, 0x53, 0x07, 0xfb, 0xd5, 0x31,
- 0xad, 0x00, 0xeb, 0x44, 0xd0, 0x7d, 0x98, 0xa2, 0x7f, 0xbd, 0xc8, 0x55, 0x74, 0xcb, 0x8c, 0xee,
- 0x73, 0x79, 0x74, 0x35, 0xd4, 0x85, 0x13, 0x07, 0xfb, 0xd5, 0xa9, 0x44, 0x21, 0x4e, 0x12, 0xb4,
- 0x7f, 0xda, 0x82, 0xa9, 0xf9, 0x4e, 0x67, 0x3e, 0x68, 0xfb, 0x41, 0x2d, 0xf0, 0x37, 0xdd, 0x16,
- 0x41, 0x5f, 0x80, 0x81, 0x88, 0xce, 0x1a, 0x9f, 0xe1, 0xe7, 0xc4, 0xd0, 0x0e, 0xd0, 0xb9, 0x3a,
- 0xdc, 0xaf, 0x9e, 0x48, 0xa0, 0xb3, 0xa9, 0x64, 0x15, 0xd0, 0x7b, 0x30, 0xdd, 0xf2, 0x1b, 0x4e,
- 0x6b, 0xdb, 0x0f, 0x23, 0x01, 0x15, 0x53, 0x7f, 0xf2, 0x60, 0xbf, 0x3a, 0x7d, 0x2b, 0x01, 0xc3,
- 0x29, 0x6c, 0xfb, 0x11, 0x4c, 0xce, 0x47, 0x91, 0xd3, 0xd8, 0x26, 0x4d, 0xbe, 0xa0, 0xd0, 0x1b,
- 0x30, 0xe0, 0x39, 0x6d, 0xd9, 0x99, 0x8b, 0xb2, 0x33, 0xeb, 0x4e, 0x9b, 0x76, 0x66, 0xfa, 0x8e,
- 0xe7, 0x7e, 0xd4, 0x15, 0x8b, 0x94, 0x96, 0x61, 0x86, 0x8d, 0x5e, 0x03, 0x68, 0x92, 0x5d, 0xb7,
- 0x41, 0x6a, 0x4e, 0xb4, 0x2d, 0xfa, 0x80, 0x44, 0x5d, 0x58, 0x52, 0x10, 0xac, 0x61, 0xd9, 0x0f,
- 0x61, 0x74, 0x7e, 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x19, 0xa2, 0x1d, 0x98, 0xea, 0x04, 0x64, 0x93,
- 0x04, 0xaa, 0xa8, 0x62, 0x5d, 0x2c, 0x5f, 0x1e, 0x7b, 0xed, 0x72, 0xe6, 0xd8, 0x9b, 0xa8, 0xcb,
- 0x5e, 0x14, 0xec, 0x2d, 0x9c, 0x11, 0xed, 0x4d, 0x25, 0xa0, 0x38, 0x49, 0xd9, 0xfe, 0x67, 0x25,
- 0x38, 0x35, 0xff, 0xa8, 0x1b, 0x90, 0x25, 0x37, 0xdc, 0x49, 0x6e, 0xb8, 0xa6, 0x1b, 0xee, 0xac,
- 0xc7, 0x23, 0xa0, 0x56, 0xfa, 0x92, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0a, 0xc3, 0xf4, 0xf7, 0x1d,
- 0xbc, 0x2a, 0x3e, 0xf9, 0x84, 0x40, 0x1e, 0x5b, 0x72, 0x22, 0x67, 0x89, 0x83, 0xb0, 0xc4, 0x41,
- 0x6b, 0x30, 0xd6, 0x60, 0xfc, 0x61, 0x6b, 0xcd, 0x6f, 0x12, 0xb6, 0xb6, 0x46, 0x17, 0x5e, 0xa6,
- 0xe8, 0x8b, 0x71, 0xf1, 0xe1, 0x7e, 0xb5, 0xc2, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47,
- 0xb6, 0xda, 0xee, 0x03, 0x8c, 0x12, 0x64, 0x6c, 0xf5, 0xcb, 0xda, 0xce, 0x1d, 0x64, 0x3b, 0x77,
- 0x3c, 0x7b, 0xd7, 0xa2, 0xab, 0x30, 0xb0, 0xe3, 0x7a, 0xcd, 0xca, 0x10, 0xa3, 0x75, 0x9e, 0xce,
- 0xf9, 0x4d, 0xd7, 0x6b, 0x1e, 0xee, 0x57, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0xff,
- 0xcb, 0x82, 0x2a, 0x83, 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63,
- 0x40, 0x5f, 0x03, 0x08, 0x49, 0x23, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0xd4, 0x15, 0x04, 0x6b,
- 0x58, 0x94, 0x3f, 0x85, 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x9f, 0xea, 0x12, 0x80,
- 0x63, 0x1c, 0x83, 0x3f, 0x95, 0x7b, 0xf1, 0x27, 0xf4, 0x25, 0x98, 0x8a, 0x1b, 0x0b, 0x3b, 0x4e,
- 0x43, 0x0e, 0x20, 0xdb, 0xc1, 0x75, 0x13, 0x84, 0x93, 0xb8, 0xf6, 0x7f, 0x6e, 0x89, 0xc5, 0x43,
- 0xbf, 0xfa, 0x13, 0xfe, 0xad, 0xf6, 0x3f, 0xb2, 0x60, 0x78, 0xc1, 0xf5, 0x9a, 0xae, 0xb7, 0x85,
- 0xbe, 0x09, 0x23, 0xf4, 0xa8, 0x6c, 0x3a, 0x91, 0x23, 0xd8, 0xf0, 0xe7, 0xb4, 0xbd, 0xa5, 0x4e,
- 0xae, 0xb9, 0xce, 0xce, 0x16, 0x2d, 0x08, 0xe7, 0x28, 0x36, 0xdd, 0x6d, 0xb7, 0xef, 0x7f, 0x8b,
- 0x34, 0xa2, 0x35, 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x61, 0x28, 0x72,
- 0x82, 0x2d, 0x12, 0x09, 0x7e, 0x9c, 0xc9, 0x37, 0x79, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0x1a, 0x24,
- 0x3e, 0xa5, 0x36, 0x58, 0x55, 0x2c, 0x48, 0xd8, 0xff, 0xdf, 0x30, 0x9c, 0x5d, 0xac, 0xaf, 0xe6,
- 0xac, 0xab, 0x4b, 0x30, 0xd4, 0x0c, 0xdc, 0x5d, 0x12, 0x88, 0x71, 0x56, 0x54, 0x96, 0x58, 0x29,
- 0x16, 0x50, 0x74, 0x0d, 0xc6, 0xf9, 0xf9, 0x78, 0xc3, 0xf1, 0x9a, 0x31, 0x7b, 0x14, 0xd8, 0xe3,
- 0x77, 0x35, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0x25, 0x36, 0x63, 0xde, 0xd9, 0xfb, 0x5d,
- 0x0b, 0xa6, 0x79, 0x33, 0xf3, 0x51, 0x14, 0xb8, 0xf7, 0xbb, 0x11, 0x09, 0x2b, 0x83, 0x8c, 0xd3,
- 0x2d, 0x66, 0x8d, 0x56, 0xee, 0x08, 0xcc, 0xdd, 0x4d, 0x50, 0xe1, 0x4c, 0xb0, 0x22, 0xda, 0x9d,
- 0x4e, 0x82, 0x71, 0xaa, 0x59, 0xf4, 0x17, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16,
- 0x09, 0x6a, 0xdd, 0xfb, 0x2d, 0x37, 0xdc, 0xe6, 0xeb, 0x14, 0x93, 0x4d, 0xc6, 0x09, 0x72, 0xe6,
- 0x50, 0x21, 0x89, 0x39, 0xbc, 0x70, 0xb0, 0x5f, 0x9d, 0x5d, 0xcc, 0x25, 0x85, 0x0b, 0x9a, 0x41,
- 0x3b, 0x80, 0xe8, 0xc9, 0x5e, 0x8f, 0x9c, 0x2d, 0x12, 0x37, 0x3e, 0xdc, 0x7f, 0xe3, 0xa7, 0x0f,
- 0xf6, 0xab, 0x68, 0x3d, 0x45, 0x02, 0x67, 0x90, 0x45, 0x1f, 0xc1, 0x49, 0x5a, 0x9a, 0xfa, 0xd6,
- 0x91, 0xfe, 0x9b, 0xab, 0x1c, 0xec, 0x57, 0x4f, 0xae, 0x67, 0x10, 0xc1, 0x99, 0xa4, 0xd1, 0x8f,
- 0x5b, 0x70, 0x36, 0xfe, 0xfc, 0xe5, 0x87, 0x1d, 0xc7, 0x6b, 0xc6, 0x0d, 0x8f, 0xf6, 0xdf, 0x30,
- 0xe5, 0xc9, 0x67, 0x17, 0xf3, 0x28, 0xe1, 0xfc, 0x46, 0x90, 0x07, 0x27, 0x68, 0xd7, 0x92, 0x6d,
- 0x43, 0xff, 0x6d, 0x9f, 0x39, 0xd8, 0xaf, 0x9e, 0x58, 0x4f, 0xd3, 0xc0, 0x59, 0x84, 0x67, 0x17,
- 0xe1, 0x54, 0xe6, 0xea, 0x44, 0xd3, 0x50, 0xde, 0x21, 0x5c, 0x08, 0x1c, 0xc5, 0xf4, 0x27, 0x3a,
- 0x09, 0x83, 0xbb, 0x4e, 0xab, 0x2b, 0x36, 0x26, 0xe6, 0x7f, 0xde, 0x2a, 0x5d, 0xb3, 0xec, 0xff,
- 0xbe, 0x0c, 0x53, 0x8b, 0xf5, 0xd5, 0xc7, 0xda, 0xf5, 0xfa, 0xb1, 0x57, 0x2a, 0x3c, 0xf6, 0xe2,
- 0x43, 0xb4, 0x9c, 0x7b, 0x88, 0xfe, 0x58, 0xc6, 0x96, 0x1d, 0x60, 0x5b, 0xf6, 0x8b, 0x39, 0x5b,
- 0xf6, 0x09, 0x6f, 0xd4, 0xdd, 0x9c, 0x55, 0x3b, 0xc8, 0x26, 0x30, 0x53, 0x42, 0x62, 0xb2, 0x5f,
- 0x92, 0xd5, 0x1e, 0x71, 0xe9, 0x3e, 0x99, 0x79, 0x6c, 0xc0, 0xf8, 0xa2, 0xd3, 0x71, 0xee, 0xbb,
- 0x2d, 0x37, 0x72, 0x49, 0x88, 0x5e, 0x84, 0xb2, 0xd3, 0x6c, 0x32, 0xe9, 0x6e, 0x74, 0xe1, 0xd4,
- 0xc1, 0x7e, 0xb5, 0x3c, 0xdf, 0xa4, 0x62, 0x06, 0x28, 0xac, 0x3d, 0x4c, 0x31, 0xd0, 0x67, 0x61,
- 0xa0, 0x19, 0xf8, 0x9d, 0x4a, 0x89, 0x61, 0xd2, 0x5d, 0x3e, 0xb0, 0x14, 0xf8, 0x9d, 0x04, 0x2a,
- 0xc3, 0xb1, 0x7f, 0xbb, 0x04, 0xe7, 0x16, 0x49, 0x67, 0x7b, 0xa5, 0x9e, 0x73, 0x5e, 0x5c, 0x86,
- 0x91, 0xb6, 0xef, 0xb9, 0x91, 0x1f, 0x84, 0xa2, 0x69, 0xb6, 0x22, 0xd6, 0x44, 0x19, 0x56, 0x50,
- 0x74, 0x11, 0x06, 0x3a, 0xb1, 0x10, 0x3b, 0x2e, 0x05, 0x60, 0x26, 0xbe, 0x32, 0x08, 0xc5, 0xe8,
- 0x86, 0x24, 0x10, 0x2b, 0x46, 0x61, 0xdc, 0x09, 0x49, 0x80, 0x19, 0x24, 0x96, 0x04, 0xa8, 0x8c,
- 0x20, 0x4e, 0x84, 0x84, 0x24, 0x40, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x18, 0x0d, 0x13, 0x33, 0xdb,
- 0xd7, 0xd6, 0x9c, 0x60, 0xa2, 0x82, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0x82, 0x0d, 0xf5, 0x14, 0x15,
- 0x7e, 0xa3, 0x04, 0x88, 0x0f, 0xe1, 0x9f, 0xb1, 0x81, 0xbb, 0x93, 0x1e, 0xb8, 0xfe, 0xb7, 0xc4,
- 0x93, 0x1a, 0xbd, 0xff, 0xdb, 0x82, 0x73, 0x8b, 0xae, 0xd7, 0x24, 0x41, 0xce, 0x02, 0x7c, 0x3a,
- 0x57, 0xf9, 0xa3, 0x09, 0x29, 0xc6, 0x12, 0x1b, 0x78, 0x02, 0x4b, 0xcc, 0xfe, 0xb7, 0x16, 0x20,
- 0xfe, 0xd9, 0x9f, 0xb8, 0x8f, 0xbd, 0x93, 0xfe, 0xd8, 0x27, 0xb0, 0x2c, 0xec, 0x5b, 0x30, 0xb9,
- 0xd8, 0x72, 0x89, 0x17, 0xad, 0xd6, 0x16, 0x7d, 0x6f, 0xd3, 0xdd, 0x42, 0x6f, 0xc1, 0x64, 0xe4,
- 0xb6, 0x89, 0xdf, 0x8d, 0xea, 0xa4, 0xe1, 0x7b, 0xec, 0xe6, 0x6a, 0x5d, 0x1e, 0x5c, 0x40, 0x07,
- 0xfb, 0xd5, 0xc9, 0x0d, 0x03, 0x82, 0x13, 0x98, 0xf6, 0xdf, 0xa5, 0x7c, 0xab, 0xd5, 0x0d, 0x23,
- 0x12, 0x6c, 0x04, 0xdd, 0x30, 0x5a, 0xe8, 0x52, 0xd9, 0xb3, 0x16, 0xf8, 0xb4, 0x3b, 0xae, 0xef,
- 0xa1, 0x73, 0xc6, 0x75, 0x7c, 0x44, 0x5e, 0xc5, 0xc5, 0xb5, 0x7b, 0x0e, 0x20, 0x74, 0xb7, 0x3c,
- 0x12, 0x68, 0xd7, 0x87, 0x49, 0xb6, 0x55, 0x54, 0x29, 0xd6, 0x30, 0x50, 0x0b, 0x26, 0x5a, 0xce,
- 0x7d, 0xd2, 0xaa, 0x93, 0x16, 0x69, 0x44, 0x7e, 0x20, 0xf4, 0x1b, 0xaf, 0xf7, 0x77, 0x0f, 0xb8,
- 0xa5, 0x57, 0x5d, 0x98, 0x39, 0xd8, 0xaf, 0x4e, 0x18, 0x45, 0xd8, 0x24, 0x4e, 0x59, 0x87, 0xdf,
- 0xa1, 0x5f, 0xe1, 0xb4, 0xf4, 0xcb, 0xe7, 0x6d, 0x51, 0x86, 0x15, 0x54, 0xb1, 0x8e, 0x81, 0x3c,
- 0xd6, 0x61, 0xff, 0x4b, 0xba, 0xd0, 0xfc, 0x76, 0xc7, 0xf7, 0x88, 0x17, 0x2d, 0xfa, 0x5e, 0x93,
- 0x6b, 0xa6, 0xde, 0x32, 0x54, 0x27, 0x97, 0x12, 0xaa, 0x93, 0xd3, 0xe9, 0x1a, 0x9a, 0xf6, 0xe4,
- 0x8b, 0x30, 0x14, 0x46, 0x4e, 0xd4, 0x0d, 0xc5, 0xc0, 0x3d, 0x2b, 0x97, 0x5d, 0x9d, 0x95, 0x1e,
- 0xee, 0x57, 0xa7, 0x54, 0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x12, 0x0c, 0xb7, 0x49, 0x18, 0x3a,
- 0x5b, 0x52, 0x6c, 0x98, 0x12, 0x75, 0x87, 0xd7, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x1c, 0x0c, 0x92,
- 0x20, 0xf0, 0x03, 0xf1, 0x6d, 0x13, 0x02, 0x71, 0x70, 0x99, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x27,
- 0x0b, 0xa6, 0x54, 0x5f, 0x79, 0x5b, 0xc7, 0x70, 0x5d, 0xfb, 0x2a, 0x40, 0x43, 0x7e, 0x60, 0xc8,
- 0x8e, 0xd9, 0xb1, 0xd7, 0x2e, 0x65, 0x4a, 0x34, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b,
- 0xd4, 0xec, 0xdf, 0xb4, 0xe0, 0x44, 0xe2, 0x8b, 0x6e, 0xb9, 0x61, 0x84, 0x3e, 0x4c, 0x7d, 0xd5,
- 0x5c, 0x9f, 0x8b, 0xcf, 0x0d, 0xf9, 0x37, 0xa9, 0x3d, 0x2f, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0x83,
- 0x6e, 0x44, 0xda, 0xf2, 0x63, 0x9e, 0x2b, 0xfc, 0x18, 0xde, 0xab, 0x78, 0x46, 0x56, 0x69, 0x4d,
- 0xcc, 0x09, 0xd8, 0xbf, 0x5d, 0x86, 0x51, 0xbe, 0xbf, 0xd7, 0x9c, 0xce, 0x31, 0xcc, 0xc5, 0xcb,
- 0x30, 0xea, 0xb6, 0xdb, 0xdd, 0xc8, 0xb9, 0x2f, 0xce, 0xbd, 0x11, 0xce, 0x83, 0x56, 0x65, 0x21,
- 0x8e, 0xe1, 0x68, 0x15, 0x06, 0x58, 0x57, 0xf8, 0x57, 0xbe, 0x98, 0xfd, 0x95, 0xa2, 0xef, 0x73,
- 0x4b, 0x4e, 0xe4, 0x70, 0x91, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x77,
- 0x3d, 0x27, 0xd8, 0xa3, 0x65, 0x95, 0x32, 0x23, 0xf8, 0x6a, 0x31, 0xc1, 0x05, 0x85, 0xcf, 0xc9,
- 0xaa, 0x0f, 0x8b, 0x01, 0x58, 0x23, 0x3a, 0xfb, 0x05, 0x18, 0x55, 0xc8, 0x47, 0x91, 0x1c, 0x67,
- 0xbf, 0x04, 0x53, 0x89, 0xb6, 0x7a, 0x55, 0x1f, 0xd7, 0x05, 0xcf, 0x7f, 0xcc, 0x58, 0x86, 0xe8,
- 0xf5, 0xb2, 0xb7, 0x2b, 0xce, 0xa6, 0x47, 0x70, 0xb2, 0x95, 0xc1, 0xf2, 0xc5, 0xbc, 0xf6, 0x7f,
- 0x44, 0x9c, 0x13, 0x9f, 0x7d, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x30, 0x38, 0x62, 0xa9, 0x88, 0x23,
- 0x52, 0x7e, 0x77, 0x52, 0x75, 0xfe, 0x26, 0xd9, 0x53, 0x4c, 0xf5, 0x07, 0xd9, 0xfd, 0xf3, 0x7c,
- 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0xca, 0x37, 0xc9, 0x1e, 0x9f, 0x0a, 0xfd, 0xeb, 0xca, 0x85,
- 0x5f, 0xf7, 0x2b, 0x16, 0x4c, 0xa8, 0xaf, 0x3b, 0x06, 0xbe, 0xb0, 0x60, 0xf2, 0x85, 0xf3, 0x85,
- 0x0b, 0x3c, 0x87, 0x23, 0xfc, 0x46, 0x09, 0xce, 0x2a, 0x1c, 0x7a, 0x89, 0xe2, 0x7f, 0xc4, 0xaa,
- 0xba, 0x02, 0xa3, 0x9e, 0x52, 0x27, 0x5a, 0xa6, 0x1e, 0x2f, 0x56, 0x26, 0xc6, 0x38, 0xf4, 0xc8,
- 0xf3, 0xe2, 0x43, 0x7b, 0x5c, 0xd7, 0xb3, 0x8b, 0xc3, 0x7d, 0x01, 0xca, 0x5d, 0xb7, 0x29, 0x0e,
- 0x98, 0xcf, 0xc9, 0xd1, 0xbe, 0xb3, 0xba, 0x74, 0xb8, 0x5f, 0x7d, 0x36, 0xcf, 0xe4, 0x44, 0x4f,
- 0xb6, 0x70, 0xee, 0xce, 0xea, 0x12, 0xa6, 0x95, 0xd1, 0x3c, 0x4c, 0x49, 0xab, 0xda, 0x5d, 0x2a,
- 0x97, 0xfa, 0x9e, 0x38, 0x87, 0x94, 0xb2, 0x1c, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x4b, 0x30, 0xbd,
- 0xd3, 0xbd, 0x4f, 0x5a, 0x24, 0xe2, 0x1f, 0x7c, 0x93, 0x70, 0x55, 0xf2, 0x68, 0x7c, 0x85, 0xbd,
- 0x99, 0x80, 0xe3, 0x54, 0x0d, 0xfb, 0x4f, 0xd9, 0x79, 0x20, 0x46, 0x4f, 0x93, 0x6f, 0x7e, 0x90,
- 0xcb, 0xb9, 0x9f, 0x55, 0x71, 0x93, 0xec, 0x6d, 0xf8, 0x54, 0x0e, 0xc9, 0x5e, 0x15, 0xc6, 0x9a,
- 0x1f, 0x28, 0x5c, 0xf3, 0xbf, 0x56, 0x82, 0x53, 0x6a, 0x04, 0x0c, 0x69, 0xf9, 0xcf, 0xfa, 0x18,
- 0x5c, 0x85, 0xb1, 0x26, 0xd9, 0x74, 0xba, 0xad, 0x48, 0xd9, 0x35, 0x06, 0xb9, 0xa9, 0x6d, 0x29,
- 0x2e, 0xc6, 0x3a, 0xce, 0x11, 0x86, 0xed, 0x6f, 0x4d, 0xb2, 0x83, 0x38, 0x72, 0xe8, 0x1a, 0x57,
- 0xbb, 0xc6, 0xca, 0xdd, 0x35, 0xcf, 0xc1, 0xa0, 0xdb, 0xa6, 0x82, 0x59, 0xc9, 0x94, 0xb7, 0x56,
- 0x69, 0x21, 0xe6, 0x30, 0xf4, 0x02, 0x0c, 0x37, 0xfc, 0x76, 0xdb, 0xf1, 0x9a, 0xec, 0xc8, 0x1b,
- 0x5d, 0x18, 0xa3, 0xb2, 0xdb, 0x22, 0x2f, 0xc2, 0x12, 0x46, 0x85, 0x6f, 0x27, 0xd8, 0xe2, 0xca,
- 0x1e, 0x21, 0x7c, 0xcf, 0x07, 0x5b, 0x21, 0x66, 0xa5, 0xf4, 0xae, 0xfa, 0xc0, 0x0f, 0x76, 0x5c,
- 0x6f, 0x6b, 0xc9, 0x0d, 0xc4, 0x96, 0x50, 0x67, 0xe1, 0x3d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x81,
- 0xc1, 0x8e, 0x1f, 0x44, 0x61, 0x65, 0x88, 0x0d, 0xf7, 0xb3, 0x39, 0x8c, 0x88, 0x7f, 0x6d, 0xcd,
- 0x0f, 0xa2, 0xf8, 0x03, 0xe8, 0xbf, 0x10, 0xf3, 0xea, 0xe8, 0x16, 0x0c, 0x13, 0x6f, 0x77, 0x25,
- 0xf0, 0xdb, 0x95, 0x13, 0xf9, 0x94, 0x96, 0x39, 0x0a, 0x5f, 0x66, 0xb1, 0x8c, 0x2a, 0x8a, 0xb1,
- 0x24, 0x81, 0xbe, 0x08, 0x65, 0xe2, 0xed, 0x56, 0x86, 0x19, 0xa5, 0xd9, 0x1c, 0x4a, 0x77, 0x9d,
- 0x20, 0xe6, 0xf9, 0xcb, 0xde, 0x2e, 0xa6, 0x75, 0xd0, 0x57, 0x60, 0x54, 0x32, 0x8c, 0x50, 0x68,
- 0x51, 0x33, 0x17, 0xac, 0x64, 0x33, 0x98, 0x7c, 0xd4, 0x75, 0x03, 0xd2, 0x26, 0x5e, 0x14, 0xc6,
- 0x1c, 0x52, 0x42, 0x43, 0x1c, 0x53, 0x43, 0x0d, 0x18, 0x0f, 0x48, 0xe8, 0x3e, 0x22, 0x35, 0xbf,
- 0xe5, 0x36, 0xf6, 0x2a, 0x67, 0x58, 0xf7, 0x5e, 0x2a, 0x1c, 0x32, 0xac, 0x55, 0x88, 0xb5, 0xfc,
- 0x7a, 0x29, 0x36, 0x88, 0xa2, 0x0f, 0x60, 0x22, 0x20, 0x61, 0xe4, 0x04, 0x91, 0x68, 0xa5, 0xa2,
- 0xac, 0x72, 0x13, 0x58, 0x07, 0xf0, 0xeb, 0x44, 0xdc, 0x4c, 0x0c, 0xc1, 0x26, 0x05, 0x14, 0x01,
- 0x32, 0x0a, 0x70, 0xb7, 0x45, 0xc2, 0xca, 0xd9, 0x7c, 0x6b, 0x66, 0x92, 0x2c, 0xad, 0xb0, 0x30,
- 0x2b, 0x3a, 0x8f, 0x70, 0x8a, 0x16, 0xce, 0xa0, 0x8f, 0xbe, 0x22, 0x0d, 0x1d, 0x6b, 0x7e, 0xd7,
- 0x8b, 0xc2, 0xca, 0x28, 0x6b, 0x2f, 0xd3, 0x22, 0x7e, 0x37, 0xc6, 0x4b, 0x5a, 0x42, 0x78, 0x65,
- 0x6c, 0x90, 0x42, 0x5f, 0x87, 0x09, 0xfe, 0x9f, 0x1b, 0x72, 0xc3, 0xca, 0x29, 0x46, 0xfb, 0x62,
- 0x3e, 0x6d, 0x8e, 0xb8, 0x70, 0x4a, 0x10, 0x9f, 0xd0, 0x4b, 0x43, 0x6c, 0x52, 0x43, 0x18, 0x26,
- 0x5a, 0xee, 0x2e, 0xf1, 0x48, 0x18, 0xd6, 0x02, 0xff, 0x3e, 0x11, 0x7a, 0xe9, 0xb3, 0xd9, 0x86,
- 0x5f, 0xff, 0x3e, 0x11, 0x57, 0x4f, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x3b, 0x30, 0x19, 0x10, 0xa7,
- 0xe9, 0xc6, 0x44, 0xc7, 0x7a, 0x11, 0x65, 0xd7, 0x75, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x36,
- 0x8c, 0xb3, 0x81, 0xef, 0x76, 0x38, 0xd1, 0xd3, 0xbd, 0x88, 0x32, 0x37, 0x86, 0xba, 0x56, 0x05,
- 0x1b, 0x04, 0xd0, 0xfb, 0x30, 0xda, 0x72, 0x37, 0x49, 0x63, 0xaf, 0xd1, 0x22, 0x95, 0x71, 0x46,
- 0x2d, 0x93, 0x05, 0xdf, 0x92, 0x48, 0xfc, 0x56, 0xa0, 0xfe, 0xe2, 0xb8, 0x3a, 0xba, 0x0b, 0xa7,
- 0x23, 0x12, 0xb4, 0x5d, 0xcf, 0xa1, 0xac, 0x53, 0x5c, 0x44, 0x99, 0x3d, 0x7e, 0x82, 0xad, 0xe9,
- 0x0b, 0x62, 0x36, 0x4e, 0x6f, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x21, 0x54, 0x32, 0x20, 0x7c,
- 0xb7, 0x9c, 0x64, 0x94, 0xdf, 0x11, 0x94, 0x2b, 0x1b, 0x39, 0x78, 0x87, 0x05, 0x30, 0x9c, 0x4b,
- 0x1d, 0xdd, 0x86, 0x29, 0xc6, 0xaf, 0x6b, 0xdd, 0x56, 0x4b, 0x34, 0x38, 0xc9, 0x1a, 0x7c, 0x41,
- 0x4a, 0x2f, 0xab, 0x26, 0xf8, 0x70, 0xbf, 0x0a, 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0xee, 0x33, 0xd3,
- 0x6f, 0x37, 0x70, 0xa3, 0x3d, 0xba, 0xe9, 0xc8, 0xc3, 0xa8, 0x32, 0x55, 0xa8, 0x06, 0xd3, 0x51,
- 0x95, 0x7d, 0x58, 0x2f, 0xc4, 0x49, 0x82, 0xf4, 0x00, 0x0a, 0xa3, 0xa6, 0xeb, 0x55, 0xa6, 0xf9,
- 0x2d, 0x4e, 0xf2, 0xef, 0x3a, 0x2d, 0xc4, 0x1c, 0xc6, 0xcc, 0xbe, 0xf4, 0xc7, 0x6d, 0x7a, 0xce,
- 0xcf, 0x30, 0xc4, 0xd8, 0xec, 0x2b, 0x01, 0x38, 0xc6, 0xa1, 0xa2, 0x77, 0x14, 0xed, 0x55, 0x10,
- 0x43, 0x55, 0x6c, 0x78, 0x63, 0xe3, 0x2b, 0x98, 0x96, 0xdb, 0xbf, 0x6b, 0xc1, 0x45, 0xc5, 0x46,
- 0x96, 0x1f, 0x46, 0xc4, 0x6b, 0x92, 0xa6, 0xce, 0x73, 0x49, 0x18, 0xa1, 0xb7, 0x61, 0xa2, 0x21,
- 0x71, 0x34, 0x13, 0xb5, 0xda, 0xa5, 0x8b, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x1a, 0xe3, 0xc6, 0x8c,
- 0x9e, 0xa6, 0x6c, 0xd2, 0x59, 0xac, 0x82, 0x61, 0x03, 0x13, 0xbd, 0x09, 0x63, 0x01, 0xef, 0x01,
- 0xab, 0x58, 0x36, 0x3d, 0x25, 0x70, 0x0c, 0xc2, 0x3a, 0x9e, 0x7d, 0x1f, 0x26, 0x55, 0x87, 0xd8,
- 0x34, 0xa3, 0x2a, 0x0c, 0x32, 0xf9, 0x59, 0xe8, 0xa1, 0x47, 0xe9, 0xa8, 0x32, 0xd9, 0x1a, 0xf3,
- 0x72, 0x36, 0xaa, 0xee, 0x23, 0xb2, 0xb0, 0x17, 0x11, 0xae, 0xd4, 0x29, 0x6b, 0xa3, 0x2a, 0x01,
- 0x38, 0xc6, 0xb1, 0xff, 0x7f, 0x7e, 0x0f, 0x89, 0x8f, 0xdb, 0x3e, 0x04, 0x8c, 0x57, 0x60, 0x84,
- 0x79, 0xd0, 0xf8, 0x01, 0x37, 0x73, 0x0f, 0xc6, 0x37, 0x8f, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x63,
- 0xcc, 0x59, 0x15, 0x2e, 0x1d, 0xa5, 0xc7, 0x9c, 0xd5, 0x33, 0x71, 0xd1, 0x35, 0x18, 0x61, 0xce,
- 0x62, 0x0d, 0xbf, 0x25, 0xc4, 0x76, 0x29, 0xe2, 0x8d, 0xd4, 0x44, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b,
- 0x6c, 0x74, 0x09, 0x86, 0x68, 0x17, 0x56, 0x6b, 0x42, 0x2e, 0x51, 0x2a, 0xd5, 0x1b, 0xac, 0x14,
- 0x0b, 0xa8, 0xfd, 0x9b, 0x16, 0x13, 0x4a, 0xd3, 0x87, 0x27, 0xba, 0x91, 0x98, 0x6f, 0x3e, 0x20,
- 0xcf, 0x67, 0xcd, 0xf7, 0x61, 0xf1, 0xfc, 0x7f, 0x35, 0x79, 0xc4, 0xf2, 0xa5, 0xf3, 0x86, 0x1c,
- 0x82, 0xe4, 0x31, 0xfb, 0x4c, 0xbc, 0x6e, 0x69, 0x7f, 0x8a, 0xce, 0x5a, 0xfb, 0xb7, 0xf8, 0x35,
- 0x39, 0x75, 0x7c, 0xa2, 0x25, 0x18, 0x72, 0xd8, 0x0d, 0x43, 0x74, 0xfc, 0x15, 0x39, 0x00, 0xf3,
- 0xac, 0xf4, 0x50, 0xd8, 0xab, 0x93, 0xf5, 0x38, 0x14, 0x8b, 0xba, 0xe8, 0x9b, 0x30, 0x4a, 0x1e,
- 0xba, 0xd1, 0xa2, 0xdf, 0x14, 0x0b, 0xca, 0xd4, 0x95, 0x16, 0x9e, 0xe0, 0xb7, 0xbd, 0x65, 0x59,
- 0x95, 0x33, 0x6d, 0xf5, 0x17, 0xc7, 0x44, 0xed, 0x9f, 0xb3, 0xa0, 0xda, 0xa3, 0x36, 0xba, 0x47,
- 0x85, 0x65, 0x12, 0x38, 0x91, 0x2f, 0xed, 0x9e, 0x6f, 0xcb, 0x65, 0x70, 0x5b, 0x94, 0x1f, 0xee,
- 0x57, 0x5f, 0xec, 0x41, 0x46, 0xa2, 0x62, 0x45, 0x0c, 0xd9, 0x30, 0xc4, 0xd4, 0x25, 0x5c, 0xfa,
- 0x1f, 0xe4, 0xc6, 0xcf, 0xbb, 0xac, 0x04, 0x0b, 0x88, 0xfd, 0x57, 0x4a, 0xda, 0x3e, 0xac, 0x47,
- 0x4e, 0x44, 0x50, 0x0d, 0x86, 0x1f, 0x38, 0x6e, 0xe4, 0x7a, 0x5b, 0xe2, 0x8a, 0x52, 0x2c, 0x93,
- 0xb1, 0x4a, 0xf7, 0x78, 0x05, 0x2e, 0x68, 0x8b, 0x3f, 0x58, 0x92, 0xa1, 0x14, 0x83, 0xae, 0xe7,
- 0x51, 0x8a, 0xa5, 0x7e, 0x29, 0x62, 0x5e, 0x81, 0x53, 0x14, 0x7f, 0xb0, 0x24, 0x83, 0x3e, 0x04,
- 0x90, 0xc7, 0x0a, 0x69, 0x0a, 0x35, 0xf7, 0x2b, 0xbd, 0x89, 0x6e, 0xa8, 0x3a, 0x5c, 0x8f, 0x1e,
- 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0xd2, 0x76, 0x8d, 0xde, 0x19, 0xf4, 0x35, 0xca, 0xd7, 0x9d, 0x20,
- 0x22, 0xcd, 0xf9, 0x48, 0x0c, 0xce, 0x67, 0xfb, 0xd3, 0x63, 0x6c, 0xb8, 0x6d, 0xa2, 0x9f, 0x01,
- 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x5e, 0x86, 0x4a, 0x5e, 0x77, 0x29, 0x5b, 0x92, 0xab, 0x4a,
- 0xd8, 0x1f, 0x14, 0x5b, 0x92, 0x4b, 0x00, 0x2b, 0x0c, 0xca, 0x1f, 0x42, 0x77, 0x4b, 0xaa, 0xa1,
- 0x06, 0x63, 0xfe, 0x50, 0x67, 0xa5, 0x58, 0x40, 0x29, 0x5e, 0x40, 0x9c, 0x50, 0xf8, 0x89, 0x6a,
- 0x7c, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0x42, 0x7c, 0xa0, 0x87, 0x42, 0xdc, 0x18, 0xa2, 0xc1,
- 0x27, 0x3b, 0x44, 0xe8, 0x1b, 0x00, 0x9b, 0xae, 0xe7, 0x86, 0xdb, 0x8c, 0xfa, 0xd0, 0x91, 0xa9,
- 0xab, 0xfb, 0xdb, 0x8a, 0xa2, 0x82, 0x35, 0x8a, 0xf4, 0x2c, 0x53, 0x2c, 0x7a, 0x75, 0x89, 0x79,
- 0xa9, 0x68, 0x67, 0x59, 0x7c, 0x5e, 0x2d, 0x61, 0x1d, 0xcf, 0xfe, 0x56, 0x72, 0xbd, 0x88, 0x1d,
- 0xa0, 0x8d, 0xaf, 0xd5, 0xef, 0xf8, 0x96, 0x8a, 0xc7, 0xd7, 0xfe, 0x17, 0xa3, 0x30, 0x65, 0x34,
- 0xd6, 0x0d, 0xfb, 0x38, 0xd5, 0xae, 0x53, 0xa9, 0xc5, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad,
- 0xa2, 0x4b, 0x36, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x1b, 0x30, 0xda, 0x72, 0x42, 0xa6, 0x5c, 0x27,
- 0x62, 0xdf, 0xf5, 0x43, 0x2c, 0xd6, 0x5d, 0x38, 0x61, 0xa4, 0x89, 0x8a, 0x9c, 0x76, 0x4c, 0x92,
- 0x8a, 0x57, 0x54, 0x28, 0x97, 0x8e, 0xc8, 0xaa, 0x13, 0x54, 0x72, 0xdf, 0xc3, 0x1c, 0x26, 0x84,
- 0x15, 0xba, 0x2a, 0x16, 0xe9, 0x15, 0x86, 0x2d, 0xb3, 0x41, 0x43, 0x58, 0x51, 0x30, 0x6c, 0x60,
- 0xc6, 0xea, 0x83, 0xa1, 0x02, 0xf5, 0xc1, 0x4b, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0x63,
- 0x95, 0x17, 0x63, 0x09, 0x4f, 0x2e, 0x98, 0x91, 0xfe, 0x16, 0x0c, 0x7a, 0x01, 0x86, 0xc5, 0xa2,
- 0x66, 0x1e, 0x42, 0x23, 0x9c, 0xcb, 0x89, 0x25, 0x8f, 0x25, 0x0c, 0xfd, 0xbc, 0x05, 0xc8, 0x69,
- 0xb5, 0xfc, 0x06, 0xe3, 0x50, 0xea, 0x1e, 0x0e, 0xec, 0x7e, 0xf6, 0x76, 0xcf, 0x61, 0xef, 0x86,
- 0x73, 0xf3, 0xa9, 0xda, 0x5c, 0xa9, 0xff, 0x96, 0xbc, 0x7e, 0xa6, 0x11, 0xf4, 0xe3, 0xfe, 0x96,
- 0x1b, 0x46, 0xdf, 0xfe, 0x57, 0x89, 0xe3, 0x3f, 0xa3, 0x4b, 0xe8, 0x8e, 0xae, 0x27, 0x18, 0x3b,
- 0xa2, 0x9e, 0x60, 0x22, 0x57, 0x47, 0xf0, 0xe7, 0x12, 0xb7, 0xde, 0x71, 0xf6, 0xe5, 0x2f, 0xf4,
- 0xb8, 0xf5, 0x0a, 0xcb, 0x4f, 0x3f, 0x77, 0xdf, 0x9a, 0x70, 0x59, 0x98, 0x60, 0x5d, 0x2e, 0xd6,
- 0xd7, 0xdc, 0x09, 0x49, 0xb0, 0x70, 0x56, 0x7a, 0x34, 0x1c, 0xea, 0xd2, 0x9d, 0xe6, 0xe2, 0xf0,
- 0xe3, 0x16, 0x54, 0xd2, 0x03, 0xc4, 0xbb, 0x54, 0x99, 0x64, 0xfd, 0xb7, 0x8b, 0x46, 0x46, 0x74,
- 0x5e, 0x7a, 0x66, 0x57, 0xe6, 0x73, 0x68, 0xe1, 0xdc, 0x56, 0xd0, 0x35, 0x80, 0x30, 0xf2, 0x3b,
- 0x9c, 0xd7, 0xb3, 0x1b, 0xd0, 0x28, 0xf3, 0x0d, 0x82, 0xba, 0x2a, 0x3d, 0x8c, 0xcf, 0x02, 0x0d,
- 0x77, 0xb6, 0x0b, 0x67, 0x72, 0x56, 0x4c, 0x86, 0x69, 0x66, 0x49, 0x37, 0xcd, 0xf4, 0x50, 0xe8,
- 0xcf, 0xc9, 0x39, 0x9d, 0xfb, 0xa0, 0xeb, 0x78, 0x91, 0x1b, 0xed, 0xe9, 0xa6, 0x1c, 0x0f, 0xcc,
- 0xa1, 0x44, 0x5f, 0x87, 0xc1, 0x96, 0xeb, 0x75, 0x1f, 0x8a, 0x33, 0xf6, 0x52, 0xf6, 0x9d, 0xd9,
- 0xeb, 0x3e, 0x34, 0x27, 0xa7, 0x4a, 0xb7, 0x32, 0x2b, 0x3f, 0xdc, 0xaf, 0xa2, 0x34, 0x02, 0xe6,
- 0x54, 0xed, 0xcf, 0xc2, 0xe4, 0x92, 0x43, 0xda, 0xbe, 0xb7, 0xec, 0x35, 0x3b, 0xbe, 0xeb, 0x45,
- 0xa8, 0x02, 0x03, 0x4c, 0x7c, 0xe7, 0x47, 0xeb, 0x00, 0x1d, 0x7c, 0xcc, 0x4a, 0xec, 0x2d, 0x38,
- 0xb5, 0xe4, 0x3f, 0xf0, 0x1e, 0x38, 0x41, 0x73, 0xbe, 0xb6, 0xaa, 0xa9, 0xb6, 0xd7, 0xa5, 0x6a,
- 0xd5, 0xca, 0x57, 0x5c, 0x69, 0x35, 0xf9, 0x22, 0x5c, 0x71, 0x5b, 0x24, 0xc7, 0x00, 0xf1, 0xd7,
- 0x4b, 0x46, 0x4b, 0x31, 0xbe, 0x32, 0x9f, 0x5b, 0xb9, 0x9e, 0x37, 0x1f, 0xc0, 0xc8, 0xa6, 0x4b,
- 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x6c, 0xbc, 0x98, 0xef, 0x9b, 0xbb, 0x42, 0x31, 0x95, 0x9d, 0x9f,
- 0x29, 0x66, 0x57, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x96, 0x73, 0x26, 0xa1, 0x82, 0xdf,
- 0xbf, 0x54, 0xb4, 0x7c, 0x4d, 0xe2, 0xec, 0x9d, 0x02, 0x4e, 0x90, 0xc1, 0x29, 0xc2, 0xe8, 0x1c,
- 0x0c, 0xb4, 0xa9, 0x64, 0x33, 0xc0, 0x86, 0x9f, 0x69, 0x62, 0x99, 0x52, 0x99, 0x95, 0xda, 0x7f,
- 0xc3, 0x82, 0x33, 0xa9, 0x91, 0x11, 0xca, 0xf5, 0x27, 0x3c, 0x0b, 0x49, 0x65, 0x77, 0xa9, 0xb7,
- 0xb2, 0xdb, 0xfe, 0x2f, 0x2c, 0x38, 0xb9, 0xdc, 0xee, 0x44, 0x7b, 0x4b, 0xae, 0xe9, 0x26, 0xf3,
- 0x05, 0x18, 0x6a, 0x93, 0xa6, 0xdb, 0x6d, 0x8b, 0x99, 0xab, 0xca, 0xd3, 0x7f, 0x8d, 0x95, 0x52,
- 0x0e, 0x52, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, 0x26, 0x43, 0xb9, 0x8f, 0xc8,
- 0x2d, 0xb7, 0xed, 0x46, 0x8f, 0xb7, 0xbb, 0x84, 0x87, 0x8b, 0x24, 0x82, 0x63, 0x7a, 0xf6, 0xf7,
- 0x2d, 0x98, 0x92, 0xeb, 0x7e, 0xbe, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0x28, 0xb9, 0x1d, 0xd1,
- 0x4b, 0x10, 0xbd, 0x2c, 0xad, 0xd6, 0x70, 0xc9, 0xed, 0xc8, 0x0b, 0xb1, 0x17, 0x5f, 0xee, 0x8d,
- 0x0b, 0xb1, 0xc7, 0xde, 0x4c, 0x48, 0x0c, 0x74, 0x19, 0x46, 0x3c, 0xbf, 0xc9, 0xef, 0x94, 0xc2,
- 0xdd, 0x83, 0x62, 0xae, 0x8b, 0x32, 0xac, 0xa0, 0xa8, 0x06, 0xa3, 0xdc, 0x15, 0x3c, 0x5e, 0xb4,
- 0x7d, 0x39, 0x94, 0xb3, 0x2f, 0xdb, 0x90, 0x35, 0x71, 0x4c, 0xc4, 0xfe, 0xa7, 0x16, 0x8c, 0xcb,
- 0x2f, 0xeb, 0xf3, 0xb6, 0x4f, 0xb7, 0x56, 0x7c, 0xd3, 0x8f, 0xb7, 0x16, 0xbd, 0xad, 0x33, 0x88,
- 0x71, 0x49, 0x2f, 0x1f, 0xe9, 0x92, 0x7e, 0x15, 0xc6, 0x9c, 0x4e, 0xa7, 0x66, 0xde, 0xf0, 0xd9,
- 0x52, 0x9a, 0x8f, 0x8b, 0xb1, 0x8e, 0x63, 0xff, 0x6c, 0x09, 0x26, 0xe5, 0x17, 0xd4, 0xbb, 0xf7,
- 0x43, 0x12, 0xa1, 0x0d, 0x18, 0x75, 0xf8, 0x2c, 0x11, 0xb9, 0xc8, 0x9f, 0xcb, 0x56, 0xe1, 0x1b,
- 0x53, 0x1a, 0x0b, 0xd2, 0xf3, 0xb2, 0x36, 0x8e, 0x09, 0xa1, 0x16, 0xcc, 0x78, 0x7e, 0xc4, 0x84,
- 0x2a, 0x05, 0x2f, 0xf2, 0xaa, 0x48, 0x52, 0x3f, 0x2b, 0xa8, 0xcf, 0xac, 0x27, 0xa9, 0xe0, 0x34,
- 0x61, 0xb4, 0x2c, 0xcd, 0x22, 0xe5, 0x7c, 0xcd, 0xb2, 0x3e, 0x71, 0xd9, 0x56, 0x11, 0xfb, 0x9f,
- 0x58, 0x30, 0x2a, 0xd1, 0x8e, 0xc3, 0x81, 0x66, 0x0d, 0x86, 0x43, 0x36, 0x09, 0x72, 0x68, 0xec,
- 0xa2, 0x8e, 0xf3, 0xf9, 0x8a, 0x65, 0x45, 0xfe, 0x3f, 0xc4, 0x92, 0x06, 0xb3, 0x8a, 0xab, 0xee,
- 0x7f, 0x42, 0xac, 0xe2, 0xaa, 0x3f, 0x39, 0x87, 0xd2, 0xbf, 0x61, 0x7d, 0xd6, 0xcc, 0x4c, 0xf4,
- 0x4a, 0xd3, 0x09, 0xc8, 0xa6, 0xfb, 0x30, 0x79, 0xa5, 0xa9, 0xb1, 0x52, 0x2c, 0xa0, 0xe8, 0x43,
- 0x18, 0x6f, 0x48, 0x73, 0x68, 0xbc, 0xc3, 0x2f, 0x15, 0x9a, 0xe6, 0x95, 0x17, 0x07, 0x57, 0xac,
- 0x2f, 0x6a, 0xf5, 0xb1, 0x41, 0xcd, 0x74, 0x75, 0x2c, 0xf7, 0x72, 0x75, 0x8c, 0xe9, 0xe6, 0x3b,
- 0xfe, 0xfd, 0x9c, 0x05, 0x43, 0xdc, 0x0c, 0xd6, 0x9f, 0x15, 0x52, 0x73, 0x6a, 0x89, 0xc7, 0x8e,
- 0x29, 0x57, 0x84, 0x64, 0x83, 0xd6, 0x60, 0x94, 0xfd, 0x60, 0x66, 0xbc, 0x72, 0xfe, 0xc3, 0x48,
- 0xde, 0xaa, 0xde, 0xc1, 0xbb, 0xb2, 0x1a, 0x8e, 0x29, 0xd8, 0x7f, 0x54, 0xa6, 0xdc, 0x2d, 0x46,
- 0x35, 0x0e, 0x7d, 0xeb, 0xe9, 0x1d, 0xfa, 0xa5, 0xa7, 0x75, 0xe8, 0x6f, 0xc1, 0x54, 0x43, 0x73,
- 0x81, 0x89, 0x67, 0xf2, 0x72, 0xe1, 0x22, 0xd1, 0xbc, 0x65, 0xb8, 0xca, 0x7e, 0xd1, 0x24, 0x82,
- 0x93, 0x54, 0xd1, 0xd7, 0x60, 0x9c, 0xcf, 0xb3, 0x68, 0x85, 0x7b, 0x8b, 0xbe, 0x90, 0xbf, 0x5e,
- 0xf4, 0x26, 0xb8, 0x89, 0x47, 0xab, 0x8e, 0x0d, 0x62, 0xa8, 0x0e, 0xb0, 0xe9, 0xb6, 0x88, 0x20,
- 0x5d, 0xe0, 0xd8, 0xbd, 0xc2, 0xb1, 0x14, 0xe1, 0x49, 0xae, 0x87, 0x90, 0x55, 0xb1, 0x46, 0xc6,
- 0xfe, 0x77, 0x16, 0xa0, 0xe5, 0xce, 0x36, 0x69, 0x93, 0xc0, 0x69, 0xc5, 0xe6, 0xf1, 0x9f, 0xb4,
- 0xa0, 0x42, 0x52, 0xc5, 0x8b, 0x7e, 0xbb, 0x2d, 0x34, 0x0c, 0x39, 0x4a, 0xb0, 0xe5, 0x9c, 0x3a,
- 0xf1, 0x2d, 0x23, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0xe0, 0x04, 0x3f, 0x7a, 0x0d, 0xbb, 0x82,
- 0xd8, 0x11, 0xcf, 0x08, 0xc2, 0x27, 0x36, 0xd2, 0x28, 0x38, 0xab, 0x9e, 0xfd, 0x0f, 0x26, 0x21,
- 0xb7, 0x17, 0x9f, 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0xc0, 0xa7, 0x7e, 0x01, 0x9f,
- 0xfa, 0x05, 0x7c, 0xea, 0x17, 0xf0, 0xa9, 0x5f, 0x80, 0xe6, 0x17, 0xf0, 0x57, 0x2d, 0x38, 0xa5,
- 0x0e, 0x4d, 0x43, 0xf7, 0xf0, 0xa3, 0x70, 0x82, 0x6f, 0xb7, 0xc5, 0x96, 0xe3, 0xb6, 0x37, 0x48,
- 0xbb, 0xd3, 0x72, 0x22, 0xe9, 0x73, 0x78, 0x35, 0x73, 0xe5, 0x26, 0x1e, 0x36, 0x19, 0x15, 0xf9,
- 0x0b, 0xd1, 0x0c, 0x00, 0xce, 0x6a, 0xc6, 0xfe, 0xf5, 0x11, 0x18, 0x5c, 0xde, 0x25, 0x5e, 0x74,
- 0x0c, 0xb7, 0xb4, 0x06, 0x4c, 0xba, 0xde, 0xae, 0xdf, 0xda, 0x25, 0x4d, 0x0e, 0x3f, 0x8a, 0x32,
- 0xe1, 0xb4, 0x20, 0x3d, 0xb9, 0x6a, 0x90, 0xc0, 0x09, 0x92, 0x4f, 0xc3, 0x50, 0x76, 0x1d, 0x86,
- 0xf8, 0x91, 0x27, 0x84, 0xc6, 0x4c, 0x9e, 0xcd, 0x06, 0x51, 0x1c, 0xe4, 0xb1, 0x11, 0x8f, 0x1f,
- 0xa9, 0xa2, 0x3a, 0xfa, 0x16, 0x4c, 0x6e, 0xba, 0x41, 0x18, 0x6d, 0xb8, 0x6d, 0x7a, 0x3e, 0xb4,
- 0x3b, 0x8f, 0x61, 0x18, 0x53, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, 0x0b, 0x26, 0x5a,
- 0x8e, 0xde, 0xd4, 0xf0, 0x91, 0x9b, 0x52, 0xa7, 0xc3, 0x2d, 0x9d, 0x10, 0x36, 0xe9, 0xd2, 0xed,
- 0xd4, 0x60, 0xb6, 0x9d, 0x11, 0xa6, 0x99, 0x51, 0xdb, 0x89, 0x1b, 0x75, 0x38, 0x8c, 0x8a, 0x85,
- 0xec, 0x79, 0xd0, 0xa8, 0x29, 0x16, 0x6a, 0x8f, 0x80, 0xbe, 0x09, 0xa3, 0x84, 0x0e, 0x21, 0x25,
- 0x2c, 0x0e, 0x98, 0x2b, 0xfd, 0xf5, 0x75, 0xcd, 0x6d, 0x04, 0xbe, 0x69, 0x92, 0x5c, 0x96, 0x94,
- 0x70, 0x4c, 0x14, 0x2d, 0xc2, 0x50, 0x48, 0x02, 0x57, 0x99, 0x3d, 0x0a, 0xa6, 0x91, 0xa1, 0x71,
- 0x2b, 0x3c, 0xff, 0x8d, 0x45, 0x55, 0xba, 0xbc, 0x84, 0x3b, 0xc3, 0xb8, 0xb9, 0xbc, 0x12, 0x0e,
- 0x0b, 0xef, 0xc3, 0x70, 0x40, 0x5a, 0xcc, 0xe6, 0x3d, 0xd1, 0xff, 0x22, 0xe7, 0x26, 0x74, 0x5e,
- 0x0f, 0x4b, 0x02, 0xe8, 0x26, 0x95, 0x57, 0xa8, 0x58, 0xe9, 0x7a, 0x5b, 0xea, 0xd1, 0x8c, 0x60,
- 0xb4, 0x4a, 0x7c, 0xc7, 0x31, 0x86, 0x7c, 0x7d, 0x8e, 0x33, 0xaa, 0xa1, 0xeb, 0x30, 0xa3, 0x4a,
- 0x57, 0xbd, 0x30, 0x72, 0x28, 0x83, 0xe3, 0x96, 0x07, 0xa5, 0x2a, 0xc2, 0x49, 0x04, 0x9c, 0xae,
- 0x63, 0xff, 0xa2, 0x05, 0x7c, 0x9c, 0x8f, 0x41, 0x41, 0xf2, 0xae, 0xa9, 0x20, 0x39, 0x9b, 0x3b,
- 0x73, 0x39, 0xca, 0x91, 0x5f, 0xb4, 0x60, 0x4c, 0x9b, 0xd9, 0x78, 0xcd, 0x5a, 0x05, 0x6b, 0xb6,
- 0x0b, 0xd3, 0x74, 0xa5, 0xdf, 0xbe, 0x1f, 0x92, 0x60, 0x97, 0x34, 0xd9, 0xc2, 0x2c, 0x3d, 0xde,
- 0xc2, 0x54, 0x0e, 0xfa, 0xb7, 0x12, 0x04, 0x71, 0xaa, 0x09, 0xfb, 0x9b, 0xb2, 0xab, 0xea, 0x3d,
- 0x43, 0x43, 0xcd, 0x79, 0xe2, 0x3d, 0x83, 0x9a, 0x55, 0x1c, 0xe3, 0xd0, 0xad, 0xb6, 0xed, 0x87,
- 0x51, 0xf2, 0x3d, 0xc3, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0xaf, 0x03, 0x2c, 0x3f, 0x24, 0x0d,
- 0xbe, 0x62, 0xf5, 0xab, 0x96, 0x95, 0x7f, 0xd5, 0xb2, 0x7f, 0xcf, 0x82, 0xc9, 0x95, 0x45, 0xe3,
- 0xe4, 0x9a, 0x03, 0xe0, 0xf7, 0xc3, 0x7b, 0xf7, 0xd6, 0xa5, 0x2f, 0x18, 0x77, 0xd6, 0x50, 0xa5,
- 0x58, 0xc3, 0x40, 0x67, 0xa1, 0xdc, 0xea, 0x7a, 0x42, 0x83, 0x3b, 0x4c, 0x8f, 0xc7, 0x5b, 0x5d,
- 0x0f, 0xd3, 0x32, 0xed, 0xe5, 0x69, 0xb9, 0xef, 0x97, 0xa7, 0x3d, 0x03, 0x60, 0xa1, 0x2a, 0x0c,
- 0x3e, 0x78, 0xe0, 0x36, 0x79, 0x5c, 0x0f, 0xe1, 0xa7, 0x76, 0xef, 0xde, 0xea, 0x52, 0x88, 0x79,
- 0xb9, 0xfd, 0xcb, 0x16, 0x4c, 0x25, 0x6e, 0xfb, 0xf4, 0xd6, 0xb8, 0xab, 0xa2, 0x2a, 0x25, 0x83,
- 0xc7, 0x68, 0xf1, 0x96, 0x34, 0xac, 0x3e, 0x5e, 0x5c, 0x8b, 0x17, 0x3b, 0xe5, 0x3e, 0x5e, 0xec,
- 0x14, 0xbb, 0xe1, 0x7f, 0xaf, 0x0c, 0xb3, 0x2b, 0x2d, 0xf2, 0xf0, 0x63, 0x86, 0x63, 0xe9, 0xf7,
- 0xa9, 0xef, 0xd1, 0xd4, 0x77, 0x47, 0x7d, 0xce, 0xdd, 0x7b, 0x0a, 0x37, 0x61, 0x98, 0x7f, 0xba,
- 0x0c, 0xce, 0x92, 0x69, 0x4c, 0xcf, 0x1f, 0x90, 0x39, 0x3e, 0x84, 0xc2, 0x98, 0xae, 0xce, 0x78,
- 0x51, 0x8a, 0x25, 0xf1, 0xd9, 0xb7, 0x60, 0x5c, 0xc7, 0x3c, 0x52, 0x60, 0x85, 0xbf, 0x50, 0x86,
- 0x69, 0xda, 0x83, 0xa7, 0x3a, 0x11, 0x77, 0xd2, 0x13, 0xf1, 0xa4, 0x1f, 0xd7, 0xf7, 0x9e, 0x8d,
- 0x0f, 0x93, 0xb3, 0x71, 0x35, 0x6f, 0x36, 0x8e, 0x7b, 0x0e, 0xfe, 0xa2, 0x05, 0x27, 0x56, 0x5a,
- 0x7e, 0x63, 0x27, 0xf1, 0x00, 0xfe, 0x4d, 0x18, 0xa3, 0x27, 0x48, 0x68, 0xc4, 0x82, 0x32, 0xa2,
- 0x83, 0x09, 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0xac, 0x2e, 0x65, 0x05, 0x15, 0x13, 0x20,
- 0xac, 0xe3, 0xd9, 0xff, 0xdc, 0x82, 0xf3, 0xd7, 0x17, 0x97, 0xe3, 0xa5, 0x98, 0x8a, 0x6b, 0x76,
- 0x09, 0x86, 0x3a, 0x4d, 0xad, 0x2b, 0xb1, 0x52, 0x7e, 0x89, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x21,
- 0x04, 0xef, 0x00, 0x5c, 0xc7, 0xb5, 0x45, 0x71, 0x54, 0x48, 0x1b, 0x9c, 0x95, 0x6b, 0x83, 0x7b,
- 0x01, 0x86, 0xe9, 0x51, 0xe6, 0x36, 0x64, 0xbf, 0xb9, 0xbb, 0x0c, 0x2f, 0xc2, 0x12, 0x66, 0xff,
- 0x82, 0x05, 0x27, 0xae, 0xbb, 0x11, 0x95, 0x33, 0x92, 0x81, 0xbb, 0xa8, 0xa0, 0x11, 0xba, 0x91,
- 0x1f, 0xec, 0x25, 0x79, 0x2f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x0f, 0xda, 0x75, 0xd9, 0x93, 0xba,
- 0x92, 0x69, 0xf5, 0xc4, 0xa2, 0x1c, 0x2b, 0x0c, 0x3a, 0x5e, 0x4d, 0x37, 0x60, 0x9c, 0x5e, 0x72,
- 0x63, 0x35, 0x5e, 0x4b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x8f, 0x2d, 0xa8, 0x5e, 0xe7, 0x81, 0x01,
- 0x36, 0xc3, 0x1c, 0xa6, 0xfb, 0x3a, 0x8c, 0x12, 0x69, 0x9e, 0x49, 0xfa, 0x72, 0x2b, 0xbb, 0x0d,
- 0x8f, 0x1f, 0xa6, 0xf0, 0xfa, 0x38, 0x33, 0x8e, 0x16, 0x66, 0x61, 0x05, 0x10, 0xd1, 0xdb, 0xd2,
- 0x03, 0xaa, 0xb1, 0xc8, 0x4c, 0xcb, 0x29, 0x28, 0xce, 0xa8, 0x61, 0xff, 0x0d, 0x0b, 0x4e, 0xa9,
- 0x0f, 0xfe, 0xc4, 0x7d, 0xa6, 0xfd, 0xab, 0x25, 0x98, 0xb8, 0xb1, 0xb1, 0x51, 0xbb, 0x4e, 0x22,
- 0x6d, 0x55, 0x16, 0x3b, 0x5d, 0x60, 0xcd, 0x76, 0x5c, 0x74, 0xad, 0xed, 0x46, 0x6e, 0x6b, 0x8e,
- 0x87, 0x09, 0x9d, 0x5b, 0xf5, 0xa2, 0xdb, 0x41, 0x3d, 0x0a, 0x5c, 0x6f, 0x2b, 0x73, 0xa5, 0x4b,
- 0x31, 0xab, 0x9c, 0x27, 0x66, 0xa1, 0xd7, 0x61, 0x88, 0xc5, 0x29, 0x95, 0x93, 0xf0, 0x8c, 0xba,
- 0x15, 0xb2, 0xd2, 0xc3, 0xfd, 0xea, 0xe8, 0x1d, 0xbc, 0xca, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0,
- 0xd8, 0x76, 0x14, 0x75, 0x6e, 0x10, 0xa7, 0x49, 0x02, 0xc9, 0x65, 0x2f, 0x64, 0x71, 0x59, 0x3a,
- 0x08, 0x1c, 0x2d, 0x66, 0x4c, 0x71, 0x59, 0x88, 0x75, 0x3a, 0x76, 0x1d, 0x20, 0x86, 0x3d, 0x21,
- 0xb3, 0x99, 0xbd, 0x01, 0xa3, 0xf4, 0x73, 0xe7, 0x5b, 0xae, 0x53, 0xec, 0x98, 0xf0, 0x32, 0x8c,
- 0x4a, 0xb7, 0x83, 0x50, 0x44, 0x11, 0x62, 0x27, 0x92, 0xf4, 0x4a, 0x08, 0x71, 0x0c, 0xb7, 0x9f,
- 0x07, 0xe1, 0x1b, 0x5f, 0x44, 0xd2, 0xde, 0x84, 0x93, 0xcc, 0xc9, 0xdf, 0x89, 0xb6, 0x8d, 0x35,
- 0xda, 0x7b, 0x31, 0xbc, 0x22, 0xae, 0xa2, 0x25, 0xe5, 0x6d, 0x25, 0xa3, 0x54, 0x8c, 0x4b, 0x8a,
- 0xf1, 0xb5, 0xd4, 0xfe, 0xa3, 0x01, 0x78, 0x66, 0xb5, 0x9e, 0x1f, 0xfe, 0xee, 0x1a, 0x8c, 0x73,
- 0x09, 0x97, 0x2e, 0x0d, 0xa7, 0x25, 0xda, 0x55, 0x4a, 0xdb, 0x0d, 0x0d, 0x86, 0x0d, 0x4c, 0x2a,
- 0x11, 0xba, 0x1f, 0x79, 0xc9, 0x37, 0xdc, 0xab, 0x1f, 0xac, 0x63, 0x5a, 0x4e, 0xc1, 0x54, 0x58,
- 0xe6, 0x2c, 0x5d, 0x81, 0x95, 0xc0, 0xfc, 0x2e, 0x4c, 0xba, 0x61, 0x23, 0x74, 0x57, 0x3d, 0xba,
- 0x4f, 0xb5, 0x9d, 0xae, 0xd4, 0x24, 0xb4, 0xd3, 0x0a, 0x8a, 0x13, 0xd8, 0xda, 0xf9, 0x32, 0xd8,
- 0xb7, 0xc0, 0xdd, 0x33, 0xf8, 0x0e, 0x65, 0xff, 0x1d, 0xf6, 0x75, 0x21, 0xb3, 0x55, 0x08, 0xf6,
- 0xcf, 0x3f, 0x38, 0xc4, 0x12, 0x46, 0xef, 0xa0, 0x8d, 0x6d, 0xa7, 0x33, 0xdf, 0x8d, 0xb6, 0x97,
- 0xdc, 0xb0, 0xe1, 0xef, 0x92, 0x60, 0x8f, 0xa9, 0x0f, 0x46, 0xe2, 0x3b, 0xa8, 0x02, 0x2c, 0xde,
- 0x98, 0xaf, 0x51, 0x4c, 0x9c, 0xae, 0x83, 0xe6, 0x61, 0x4a, 0x16, 0xd6, 0x49, 0xc8, 0x8e, 0x80,
- 0x31, 0x46, 0x46, 0xbd, 0xaa, 0x16, 0xc5, 0x8a, 0x48, 0x12, 0xdf, 0x14, 0x70, 0xe1, 0x49, 0x08,
- 0xb8, 0x5f, 0x80, 0x09, 0xd7, 0x73, 0x23, 0xd7, 0x89, 0x7c, 0x6e, 0x68, 0xe3, 0x9a, 0x02, 0xa6,
- 0x13, 0x5f, 0xd5, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0x8f, 0x01, 0x98, 0x61, 0xd3, 0xf6, 0xe9, 0x0a,
- 0xfb, 0x61, 0x5a, 0x61, 0x77, 0xd2, 0x2b, 0xec, 0x49, 0x48, 0xee, 0x8f, 0xbd, 0xcc, 0xbe, 0x63,
- 0xc1, 0x0c, 0x53, 0xcb, 0x1b, 0xcb, 0xec, 0x0a, 0x8c, 0x06, 0xc6, 0x83, 0xf7, 0x51, 0xdd, 0xfa,
- 0x27, 0xdf, 0xae, 0xc7, 0x38, 0xe8, 0x3d, 0x80, 0x4e, 0xac, 0xf6, 0x2f, 0x19, 0x51, 0x8a, 0x21,
- 0x57, 0xe3, 0xaf, 0xd5, 0xb1, 0xbf, 0x05, 0xa3, 0xea, 0x45, 0xbb, 0xbc, 0x20, 0x5b, 0x39, 0x17,
- 0xe4, 0xde, 0x62, 0x84, 0xf4, 0x4c, 0x2c, 0x67, 0x7a, 0x26, 0xfe, 0x6b, 0x0b, 0x62, 0xa3, 0x0c,
- 0xfa, 0x00, 0x46, 0x3b, 0x3e, 0x73, 0x64, 0x0f, 0xe4, 0xeb, 0x90, 0xe7, 0x0b, 0xad, 0x3a, 0x3c,
- 0x14, 0x69, 0xc0, 0xa7, 0xa3, 0x26, 0xab, 0xe2, 0x98, 0x0a, 0xba, 0x09, 0xc3, 0x9d, 0x80, 0xd4,
- 0x23, 0x16, 0x27, 0xaf, 0x7f, 0x82, 0x7c, 0xf9, 0xf2, 0x8a, 0x58, 0x52, 0x48, 0xf8, 0x05, 0x97,
- 0xfb, 0xf7, 0x0b, 0xb6, 0xff, 0x7e, 0x09, 0xa6, 0x93, 0x8d, 0xa0, 0x77, 0x60, 0x80, 0x3c, 0x24,
- 0x0d, 0xf1, 0xa5, 0x99, 0xd2, 0x44, 0xac, 0x10, 0xe2, 0x43, 0x47, 0xff, 0x63, 0x56, 0x0b, 0xdd,
- 0x80, 0x61, 0x2a, 0x4a, 0x5c, 0x57, 0xd1, 0x64, 0x9f, 0xcd, 0x13, 0x47, 0x94, 0x4c, 0xc6, 0x3f,
- 0x4b, 0x14, 0x61, 0x59, 0x9d, 0x39, 0x12, 0x36, 0x3a, 0x75, 0x7a, 0x4b, 0x8b, 0x8a, 0x94, 0x09,
- 0x1b, 0x8b, 0x35, 0x8e, 0x24, 0xa8, 0x71, 0x47, 0x42, 0x59, 0x88, 0x63, 0x22, 0xe8, 0x3d, 0x18,
- 0x0c, 0x5b, 0x84, 0x74, 0x84, 0xa7, 0x48, 0xa6, 0x4a, 0xb7, 0x4e, 0x11, 0x04, 0x25, 0xa6, 0x02,
- 0x62, 0x05, 0x98, 0x57, 0xb4, 0x7f, 0xcd, 0x02, 0xe0, 0x9e, 0x97, 0x8e, 0xb7, 0x45, 0x8e, 0xc1,
- 0x0a, 0xb2, 0x04, 0x03, 0x61, 0x87, 0x34, 0x8a, 0xde, 0x77, 0xc4, 0xfd, 0xa9, 0x77, 0x48, 0x23,
- 0x5e, 0xed, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x9f, 0x00, 0x98, 0x8c, 0xd1, 0x56, 0x23, 0xd2, 0x46,
- 0xaf, 0x1a, 0x21, 0xb8, 0xce, 0x26, 0x42, 0x70, 0x8d, 0x32, 0x6c, 0x4d, 0xe1, 0xfe, 0x2d, 0x28,
- 0xb7, 0x9d, 0x87, 0x42, 0xa3, 0xfa, 0x72, 0x71, 0x37, 0x28, 0xfd, 0xb9, 0x35, 0xe7, 0x21, 0xbf,
- 0xc1, 0xbf, 0x2c, 0x77, 0xe7, 0x9a, 0xf3, 0xb0, 0xe7, 0x1b, 0x04, 0xda, 0x08, 0x6b, 0xcb, 0xf5,
- 0x84, 0x53, 0x61, 0x5f, 0x6d, 0xb9, 0x5e, 0xb2, 0x2d, 0xd7, 0xeb, 0xa3, 0x2d, 0xd7, 0x43, 0x8f,
- 0x60, 0x58, 0xf8, 0xfc, 0x8a, 0xd8, 0xa0, 0x57, 0xfa, 0x68, 0x4f, 0xb8, 0x0c, 0xf3, 0x36, 0xaf,
- 0x48, 0x0d, 0x85, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x35, 0x0b, 0x26, 0xc5, 0x6f, 0xf1,
- 0x9c, 0x56, 0x48, 0xf0, 0x9f, 0xef, 0xbf, 0x0f, 0xa2, 0x22, 0xef, 0xca, 0xe7, 0xe5, 0x61, 0x6b,
- 0x02, 0x7b, 0xf6, 0x28, 0xd1, 0x0b, 0xf4, 0xf7, 0x2d, 0x38, 0xd9, 0x76, 0x1e, 0xf2, 0x16, 0x79,
- 0x19, 0x76, 0x22, 0xd7, 0x17, 0x6e, 0x2e, 0xef, 0xf4, 0x37, 0xfd, 0xa9, 0xea, 0xbc, 0x93, 0xd2,
- 0xba, 0x7c, 0x32, 0x0b, 0xa5, 0x67, 0x57, 0x33, 0xfb, 0x35, 0xbb, 0x09, 0x23, 0x72, 0xbd, 0x3d,
- 0xcd, 0x07, 0x0d, 0xac, 0x1d, 0xb1, 0xd6, 0x9e, 0x6a, 0x3b, 0xdf, 0x82, 0x71, 0x7d, 0x8d, 0x3d,
- 0xd5, 0xb6, 0x3e, 0x82, 0x13, 0x19, 0x6b, 0xe9, 0xa9, 0x36, 0xf9, 0x00, 0xce, 0xe6, 0xae, 0x8f,
- 0xa7, 0xfa, 0x20, 0xe5, 0x57, 0x2d, 0x9d, 0x0f, 0x1e, 0x83, 0x29, 0x6a, 0xd1, 0x34, 0x45, 0x5d,
- 0x28, 0xde, 0x39, 0x39, 0xf6, 0xa8, 0x0f, 0xf5, 0x4e, 0x53, 0xae, 0x8e, 0xde, 0x87, 0xa1, 0x16,
- 0x2d, 0x91, 0x9e, 0xe3, 0x76, 0xef, 0x1d, 0x19, 0x4b, 0xd4, 0xac, 0x3c, 0xc4, 0x82, 0x82, 0xfd,
- 0x33, 0x16, 0x64, 0x3c, 0xa9, 0xa1, 0x12, 0x56, 0xd7, 0x6d, 0xb2, 0x21, 0x29, 0xc7, 0x12, 0x96,
- 0x8a, 0x50, 0x75, 0x1e, 0xca, 0x5b, 0x6e, 0x53, 0xbc, 0xd6, 0x57, 0xe0, 0xeb, 0x14, 0xbc, 0xe5,
- 0x36, 0xd1, 0x0a, 0xa0, 0xb0, 0xdb, 0xe9, 0xb4, 0x98, 0x67, 0x98, 0xd3, 0xba, 0x1e, 0xf8, 0xdd,
- 0x0e, 0x77, 0x13, 0x2f, 0x73, 0xf5, 0x52, 0x3d, 0x05, 0xc5, 0x19, 0x35, 0xec, 0x7f, 0x64, 0xc1,
- 0xc0, 0x31, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xd5, 0x5c, 0xd2, 0x22, 0xa5, 0xcc, 0x1c, 0x76, 0x1e,
- 0xb0, 0x70, 0x0d, 0x21, 0x13, 0x38, 0x32, 0x67, 0x6d, 0xdf, 0x82, 0x13, 0xb7, 0x7c, 0xa7, 0xb9,
- 0xe0, 0xb4, 0x1c, 0xaf, 0x41, 0x82, 0x55, 0x6f, 0xeb, 0x48, 0x6f, 0x32, 0x4a, 0x3d, 0xdf, 0x64,
- 0x5c, 0x83, 0x21, 0xb7, 0xa3, 0xe5, 0xa4, 0xb8, 0x48, 0x67, 0x77, 0xb5, 0x26, 0xd2, 0x51, 0x20,
- 0xa3, 0x71, 0x56, 0x8a, 0x05, 0x3e, 0x5d, 0x96, 0xdc, 0x6f, 0x71, 0x20, 0x7f, 0x59, 0xd2, 0x5b,
- 0x52, 0x32, 0xd6, 0xa2, 0xe1, 0xb6, 0xbf, 0x0d, 0x46, 0x13, 0xe2, 0x91, 0x1a, 0x86, 0x61, 0x97,
- 0x7f, 0xa9, 0x58, 0x9b, 0x2f, 0x66, 0xdf, 0x5e, 0x52, 0x03, 0xa3, 0xbd, 0xc6, 0xe4, 0x05, 0x58,
- 0x12, 0xb2, 0xaf, 0x41, 0x66, 0x6c, 0xac, 0xde, 0x9a, 0x29, 0xfb, 0x2b, 0x30, 0xc3, 0x6a, 0x1e,
- 0x51, 0xeb, 0x63, 0x27, 0xf4, 0xe9, 0x19, 0xe1, 0xc5, 0xed, 0xff, 0xc5, 0x02, 0xb4, 0xe6, 0x37,
- 0xdd, 0xcd, 0x3d, 0x41, 0x9c, 0x7f, 0xff, 0x47, 0x50, 0xe5, 0xd7, 0xea, 0x64, 0x08, 0xee, 0xc5,
- 0x96, 0x13, 0x86, 0x9a, 0x2e, 0xff, 0x45, 0xd1, 0x6e, 0x75, 0xa3, 0x18, 0x1d, 0xf7, 0xa2, 0x87,
- 0x3e, 0x48, 0x44, 0x44, 0xfd, 0x62, 0x2a, 0x22, 0xea, 0x8b, 0x99, 0x4e, 0x40, 0xe9, 0xde, 0xcb,
- 0x48, 0xa9, 0xf6, 0x77, 0x2d, 0x98, 0x5a, 0x4f, 0x84, 0x94, 0xbe, 0xc4, 0x3c, 0x22, 0x32, 0x6c,
- 0x54, 0x75, 0x56, 0x8a, 0x05, 0xf4, 0x89, 0xeb, 0x70, 0xff, 0xd4, 0x82, 0x38, 0x16, 0xdf, 0x31,
- 0x88, 0xdc, 0x8b, 0x86, 0xc8, 0x9d, 0x79, 0x7d, 0x51, 0xdd, 0xc9, 0x93, 0xb8, 0xd1, 0x4d, 0x35,
- 0x27, 0x05, 0x37, 0x97, 0x98, 0x0c, 0xdf, 0x67, 0x93, 0xe6, 0xc4, 0xa9, 0xd9, 0xf8, 0xfd, 0x12,
- 0x20, 0x85, 0xdb, 0x77, 0x14, 0xdd, 0x74, 0x8d, 0x27, 0x13, 0x45, 0x77, 0x17, 0x10, 0xf3, 0xe9,
- 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0xa1, 0xb5, 0x3e, 0x9a, 0xc3, 0x90, 0x72, 0x89, 0xbd, 0x95,
- 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0x7c, 0xb5, 0x06, 0xfb, 0xf5, 0xd5, 0x1a, 0xea, 0xf1, 0xe8, 0xfe,
- 0x57, 0x2c, 0x98, 0x50, 0xc3, 0xf4, 0x09, 0x79, 0xba, 0xa3, 0xfa, 0x93, 0x73, 0xae, 0xd4, 0xb4,
- 0x2e, 0x33, 0x61, 0xe0, 0x47, 0x58, 0xf0, 0x04, 0xa7, 0xe5, 0x3e, 0x22, 0x2a, 0xd8, 0x7b, 0x55,
- 0x04, 0x43, 0x10, 0xa5, 0x87, 0xfb, 0xd5, 0x09, 0xf5, 0x8f, 0xfb, 0x23, 0xc4, 0x55, 0xec, 0xbf,
- 0x4d, 0x37, 0xbb, 0xb9, 0x14, 0xd1, 0x9b, 0x30, 0xd8, 0xd9, 0x76, 0x42, 0x92, 0x78, 0xe2, 0x38,
- 0x58, 0xa3, 0x85, 0x87, 0xfb, 0xd5, 0x49, 0x55, 0x81, 0x95, 0x60, 0x8e, 0xdd, 0x7f, 0x6c, 0xe2,
- 0xf4, 0xe2, 0xec, 0x19, 0x9b, 0xf8, 0xdf, 0x59, 0x30, 0xb0, 0x4e, 0x4f, 0xaf, 0xa7, 0xcf, 0x02,
- 0xde, 0x35, 0x58, 0xc0, 0xb9, 0xbc, 0xb4, 0x67, 0xb9, 0xbb, 0x7f, 0x25, 0xb1, 0xfb, 0x2f, 0xe4,
- 0x52, 0x28, 0xde, 0xf8, 0x6d, 0x18, 0x63, 0xc9, 0xd4, 0xc4, 0x73, 0xce, 0xd7, 0x8d, 0x0d, 0x5f,
- 0x4d, 0x6c, 0xf8, 0x29, 0x0d, 0x55, 0xdb, 0xe9, 0x2f, 0xc1, 0xb0, 0x78, 0x1f, 0x98, 0x8c, 0x41,
- 0x21, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x57, 0x06, 0x23, 0x79, 0x1b, 0xfa, 0x27, 0x16, 0xcc, 0x05,
- 0xdc, 0xc5, 0xbf, 0xb9, 0xd4, 0x0d, 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x6e, 0xcb, 0xf5,
- 0xb6, 0x56, 0xb7, 0x3c, 0x5f, 0x15, 0x2f, 0x3f, 0x24, 0x8d, 0xae, 0x8a, 0xdb, 0x53, 0x90, 0x29,
- 0x4e, 0x3d, 0x93, 0x79, 0xed, 0x60, 0xbf, 0x3a, 0x87, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8,
- 0x9f, 0x5b, 0x70, 0x85, 0x27, 0x11, 0xeb, 0xbf, 0xff, 0x05, 0x1a, 0x8e, 0x9a, 0x24, 0x15, 0x13,
- 0xd9, 0x20, 0x41, 0x7b, 0xe1, 0x0b, 0x62, 0x40, 0xaf, 0xd4, 0x8e, 0xd6, 0x16, 0x3e, 0x6a, 0xe7,
- 0xec, 0xff, 0xa6, 0x0c, 0x13, 0x22, 0x86, 0xad, 0x38, 0x03, 0xde, 0x34, 0x96, 0xc4, 0xb3, 0x89,
- 0x25, 0x31, 0x63, 0x20, 0x3f, 0x19, 0xf6, 0x1f, 0xc2, 0x0c, 0x65, 0xce, 0x37, 0x88, 0x13, 0x44,
- 0xf7, 0x89, 0xc3, 0x5d, 0x30, 0xcb, 0x47, 0xe6, 0xfe, 0x4a, 0xb1, 0x7e, 0x2b, 0x49, 0x0c, 0xa7,
- 0xe9, 0xff, 0x30, 0x9d, 0x39, 0x1e, 0x4c, 0xa7, 0xc2, 0x10, 0x7f, 0x15, 0x46, 0xd5, 0xe3, 0x36,
- 0xc1, 0x74, 0x8a, 0xa3, 0x79, 0x27, 0x29, 0x70, 0xa5, 0x67, 0xfc, 0xb0, 0x32, 0x26, 0x67, 0xff,
- 0x72, 0xc9, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x21, 0xcb, 0x30, 0xd0, 0x2c, 0xd2, 0x68,
- 0xa7, 0x9a, 0x61, 0x7e, 0x66, 0xf3, 0xa2, 0x26, 0x56, 0x34, 0xd0, 0x0d, 0xee, 0xe8, 0xba, 0x4b,
- 0x8a, 0xd4, 0xd9, 0x29, 0x6a, 0x20, 0x5d, 0x61, 0x77, 0x09, 0x16, 0xf5, 0xd1, 0xd7, 0xb9, 0x27,
- 0xf2, 0x4d, 0xcf, 0x7f, 0xe0, 0x5d, 0xf7, 0x7d, 0x19, 0x04, 0xaa, 0x3f, 0x82, 0x33, 0xd2, 0xff,
- 0x58, 0x55, 0xc7, 0x26, 0xb5, 0xfe, 0xe2, 0xfa, 0xff, 0x28, 0xb0, 0xa4, 0x49, 0x66, 0x2c, 0x89,
- 0x10, 0x11, 0x98, 0x12, 0x01, 0x92, 0x65, 0x99, 0x18, 0xbb, 0xcc, 0xeb, 0xb7, 0x59, 0x3b, 0xb6,
- 0x00, 0xdd, 0x34, 0x49, 0xe0, 0x24, 0x4d, 0x7b, 0x9b, 0x33, 0xe1, 0x15, 0xe2, 0x44, 0xdd, 0x80,
- 0x84, 0xe8, 0xcb, 0x50, 0x49, 0xdf, 0x8c, 0x85, 0x21, 0xc5, 0x62, 0xd2, 0xf3, 0xb9, 0x83, 0xfd,
- 0x6a, 0xa5, 0x9e, 0x83, 0x83, 0x73, 0x6b, 0xdb, 0x3f, 0x6f, 0x01, 0x7b, 0xc1, 0x7f, 0x0c, 0x92,
- 0xcf, 0x97, 0x4c, 0xc9, 0xa7, 0x92, 0x37, 0x9d, 0x39, 0x42, 0xcf, 0x1b, 0x7c, 0x0d, 0xd7, 0x02,
- 0xff, 0xe1, 0x9e, 0xf0, 0xfa, 0xea, 0x7d, 0x8d, 0xb3, 0xbf, 0x67, 0x01, 0xcb, 0x30, 0x86, 0xf9,
- 0xad, 0x5d, 0x1a, 0x38, 0x7a, 0x3b, 0x34, 0x7c, 0x19, 0x46, 0x36, 0xc5, 0xf0, 0x67, 0x28, 0x9d,
- 0x8c, 0x0e, 0x9b, 0xb4, 0xe5, 0xa4, 0x89, 0x97, 0xb8, 0xe2, 0x1f, 0x56, 0xd4, 0xec, 0xff, 0xd2,
- 0x82, 0xd9, 0xfc, 0x6a, 0xe8, 0x0e, 0x9c, 0x09, 0x48, 0xa3, 0x1b, 0x84, 0x74, 0x4b, 0x88, 0x0b,
- 0x90, 0x78, 0x01, 0xc6, 0xa7, 0xfa, 0x99, 0x83, 0xfd, 0xea, 0x19, 0x9c, 0x8d, 0x82, 0xf3, 0xea,
- 0xa2, 0xb7, 0x60, 0xb2, 0x1b, 0x72, 0xc9, 0x8f, 0x09, 0x5d, 0xa1, 0x08, 0x63, 0xcf, 0x1e, 0x49,
- 0xdd, 0x31, 0x20, 0x38, 0x81, 0x69, 0xff, 0x79, 0xbe, 0x1c, 0x95, 0xc7, 0x6b, 0x1b, 0x66, 0x3c,
- 0xed, 0x3f, 0x3d, 0x01, 0xe5, 0x55, 0xff, 0xf9, 0x5e, 0xa7, 0x3e, 0x3b, 0x2e, 0xb5, 0x18, 0x03,
- 0x09, 0x32, 0x38, 0x4d, 0xd9, 0xfe, 0x9b, 0x16, 0x9c, 0xd1, 0x11, 0xb5, 0x17, 0x87, 0xbd, 0xac,
- 0x80, 0x4b, 0x5a, 0x00, 0x3e, 0x7e, 0xcc, 0x5d, 0xce, 0x08, 0xc0, 0x77, 0x52, 0xa7, 0x5e, 0x18,
- 0x6d, 0x8f, 0xbf, 0x2d, 0xcd, 0x8a, 0xb6, 0xf7, 0x47, 0x16, 0x5f, 0x9f, 0x7a, 0xd7, 0xd1, 0x47,
- 0x30, 0xdd, 0x76, 0xa2, 0xc6, 0xf6, 0xf2, 0xc3, 0x4e, 0xc0, 0x8d, 0xbb, 0x72, 0x9c, 0x5e, 0xee,
- 0x35, 0x4e, 0xda, 0x47, 0xc6, 0xde, 0xe0, 0x6b, 0x09, 0x62, 0x38, 0x45, 0x1e, 0xdd, 0x87, 0x31,
- 0x56, 0xc6, 0xde, 0x62, 0x87, 0x45, 0xb2, 0x4c, 0x5e, 0x6b, 0xca, 0x39, 0x68, 0x2d, 0xa6, 0x83,
- 0x75, 0xa2, 0xf6, 0x2f, 0x95, 0x39, 0xd3, 0x60, 0x77, 0x8f, 0x97, 0x60, 0xb8, 0xe3, 0x37, 0x17,
- 0x57, 0x97, 0xb0, 0x98, 0x05, 0x75, 0xee, 0xd5, 0x78, 0x31, 0x96, 0x70, 0x74, 0x19, 0x46, 0xc4,
- 0x4f, 0x69, 0x8c, 0x67, 0x7b, 0x44, 0xe0, 0x85, 0x58, 0x41, 0xd1, 0x6b, 0x00, 0x9d, 0xc0, 0xdf,
- 0x75, 0x9b, 0x2c, 0xf6, 0x56, 0xd9, 0xf4, 0xeb, 0xab, 0x29, 0x08, 0xd6, 0xb0, 0xd0, 0xdb, 0x30,
- 0xd1, 0xf5, 0x42, 0x2e, 0x3f, 0x69, 0xc9, 0x38, 0x94, 0xc7, 0xd9, 0x1d, 0x1d, 0x88, 0x4d, 0x5c,
- 0x34, 0x0f, 0x43, 0x91, 0xc3, 0xfc, 0xd4, 0x06, 0xf3, 0x5f, 0x0c, 0x6c, 0x50, 0x0c, 0x3d, 0xed,
- 0x25, 0xad, 0x80, 0x45, 0x45, 0xf4, 0x55, 0x19, 0x16, 0x81, 0x9f, 0x44, 0xe2, 0xa9, 0x4e, 0x7f,
- 0xa7, 0x96, 0x16, 0x14, 0x41, 0x3c, 0x01, 0x32, 0x68, 0xa1, 0xb7, 0x00, 0xc8, 0xc3, 0x88, 0x04,
- 0x9e, 0xd3, 0x52, 0xde, 0xa5, 0x4a, 0x90, 0x59, 0xf2, 0xd7, 0xfd, 0xe8, 0x4e, 0x48, 0x96, 0x15,
- 0x06, 0xd6, 0xb0, 0xed, 0x9f, 0x18, 0x03, 0x88, 0x2f, 0x1a, 0xe8, 0x11, 0x8c, 0x34, 0x9c, 0x8e,
- 0xd3, 0xe0, 0x39, 0x9d, 0xcb, 0x79, 0x0f, 0xcb, 0xe3, 0x1a, 0x73, 0x8b, 0x02, 0x9d, 0x1b, 0x6f,
- 0x64, 0x3e, 0x83, 0x11, 0x59, 0xdc, 0xd3, 0x60, 0xa3, 0xda, 0x43, 0xdf, 0xb1, 0x60, 0x4c, 0xc4,
- 0xb6, 0x62, 0x33, 0x54, 0xca, 0xb7, 0xb7, 0x69, 0xed, 0xcf, 0xc7, 0x35, 0x78, 0x17, 0x5e, 0x97,
- 0x2b, 0x54, 0x83, 0xf4, 0xec, 0x85, 0xde, 0x30, 0xfa, 0x9c, 0xbc, 0xdb, 0x96, 0x8d, 0xa1, 0x54,
- 0x77, 0xdb, 0x51, 0x76, 0xd4, 0xe8, 0xd7, 0xda, 0x3b, 0xc6, 0xb5, 0x76, 0x20, 0xff, 0x89, 0xb6,
- 0x21, 0x6f, 0xf7, 0xba, 0xd1, 0xa2, 0x9a, 0x1e, 0x03, 0x66, 0x30, 0xff, 0x85, 0xaf, 0x76, 0xb1,
- 0xeb, 0x11, 0xff, 0xe5, 0x5b, 0x30, 0xd5, 0x34, 0xa5, 0x16, 0xb1, 0x12, 0x5f, 0xcc, 0xa3, 0x9b,
- 0x10, 0x72, 0x62, 0x39, 0x25, 0x01, 0xc0, 0x49, 0xc2, 0xa8, 0xc6, 0x43, 0x02, 0xad, 0x7a, 0x9b,
- 0xbe, 0x78, 0x2e, 0x66, 0xe7, 0xce, 0xe5, 0x5e, 0x18, 0x91, 0x36, 0xc5, 0x8c, 0x85, 0x84, 0x75,
- 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x7d, 0x18, 0x62, 0x4f, 0x3c, 0xc3, 0xca, 0x48, 0xbe, 0x59, 0xc3,
- 0x8c, 0x2e, 0x1c, 0x6f, 0x48, 0xf6, 0x37, 0xc4, 0x82, 0x02, 0xba, 0x21, 0x1f, 0x50, 0x87, 0xab,
- 0xde, 0x9d, 0x90, 0xb0, 0x07, 0xd4, 0xa3, 0x0b, 0xcf, 0xc7, 0x6f, 0xa3, 0x79, 0x79, 0x66, 0x72,
- 0x6c, 0xa3, 0x26, 0x15, 0xfb, 0xc4, 0x7f, 0x99, 0x73, 0x5b, 0x44, 0xea, 0xcb, 0xec, 0x9e, 0x99,
- 0x97, 0x3b, 0x1e, 0xce, 0xbb, 0x26, 0x09, 0x9c, 0xa4, 0x49, 0x45, 0x68, 0xbe, 0xeb, 0xc5, 0x83,
- 0xb3, 0x5e, 0xbc, 0x83, 0x6b, 0x0e, 0xd8, 0x69, 0xc4, 0x4b, 0xb0, 0xa8, 0x8f, 0x5c, 0x98, 0x0a,
- 0x0c, 0xf1, 0x42, 0x06, 0xd8, 0xbb, 0xd4, 0x9f, 0x10, 0xa3, 0x65, 0x19, 0x31, 0xc9, 0xe0, 0x24,
- 0x5d, 0xf4, 0xbe, 0x26, 0x28, 0x4d, 0x14, 0xdf, 0xfc, 0x7b, 0x89, 0x46, 0xb3, 0x3b, 0x30, 0x61,
- 0x30, 0x9b, 0xa7, 0x6a, 0x82, 0xf4, 0x60, 0x3a, 0xc9, 0x59, 0x9e, 0xaa, 0xe5, 0xf1, 0x2d, 0x98,
- 0x64, 0x1b, 0xe1, 0x81, 0xd3, 0x11, 0xac, 0xf8, 0xb2, 0xc1, 0x8a, 0xad, 0xcb, 0x65, 0x3e, 0x30,
- 0x72, 0x08, 0x62, 0xc6, 0x69, 0xff, 0x9d, 0x41, 0x51, 0x59, 0xed, 0x22, 0x74, 0x05, 0x46, 0x45,
- 0x07, 0x54, 0xaa, 0x3e, 0xc5, 0x18, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x43, 0x23, 0xab, 0xae,
- 0xbd, 0x50, 0x88, 0x33, 0x34, 0x2a, 0x08, 0xd6, 0xb0, 0xe8, 0xe5, 0xf7, 0xbe, 0xef, 0x47, 0xea,
- 0x0c, 0x56, 0x5b, 0x6d, 0x81, 0x95, 0x62, 0x01, 0xa5, 0x67, 0xef, 0x0e, 0x09, 0x3c, 0xd2, 0x32,
- 0x73, 0xd5, 0xa8, 0xb3, 0xf7, 0xa6, 0x0e, 0xc4, 0x26, 0x2e, 0x95, 0x20, 0xfc, 0x90, 0xed, 0x5d,
- 0x71, 0xc5, 0x8e, 0x5f, 0x7c, 0xd4, 0x79, 0x90, 0x0f, 0x09, 0x47, 0x5f, 0x81, 0x33, 0x2a, 0xd8,
- 0xa6, 0x58, 0x99, 0xb2, 0xc5, 0x21, 0x43, 0x23, 0x76, 0x66, 0x31, 0x1b, 0x0d, 0xe7, 0xd5, 0x47,
- 0xef, 0xc2, 0xa4, 0xb8, 0x86, 0x49, 0x8a, 0xc3, 0xa6, 0xfb, 0xe2, 0x4d, 0x03, 0x8a, 0x13, 0xd8,
- 0x32, 0xdb, 0x0e, 0xbb, 0x9f, 0x48, 0x0a, 0x23, 0xe9, 0x6c, 0x3b, 0x3a, 0x1c, 0xa7, 0x6a, 0xa0,
- 0x79, 0x98, 0xe2, 0x62, 0xa7, 0xeb, 0x6d, 0xf1, 0x39, 0x11, 0x4f, 0x60, 0xd5, 0x86, 0xbc, 0x6d,
- 0x82, 0x71, 0x12, 0x1f, 0x5d, 0x83, 0x71, 0x27, 0x68, 0x6c, 0xbb, 0x11, 0x69, 0xd0, 0x5d, 0xc5,
- 0x3c, 0x08, 0x35, 0xff, 0xcf, 0x79, 0x0d, 0x86, 0x0d, 0x4c, 0xf4, 0x1e, 0x0c, 0x84, 0x0f, 0x9c,
- 0x8e, 0xe0, 0x3e, 0xf9, 0xac, 0x5c, 0xad, 0x60, 0xee, 0xfa, 0x45, 0xff, 0x63, 0x56, 0xd3, 0x7e,
- 0x04, 0x27, 0x32, 0x82, 0x12, 0xd1, 0xa5, 0xe7, 0x74, 0x5c, 0x39, 0x2a, 0x89, 0x67, 0x1a, 0xf3,
- 0xb5, 0x55, 0x39, 0x1e, 0x1a, 0x16, 0x5d, 0xdf, 0x2c, 0x78, 0x51, 0x2d, 0x36, 0x24, 0xa9, 0xf5,
- 0xbd, 0x22, 0x01, 0x38, 0xc6, 0xb1, 0xff, 0xa4, 0x04, 0x53, 0x19, 0xe6, 0x41, 0x96, 0x1b, 0x3f,
- 0x71, 0xcf, 0x8b, 0x53, 0xe1, 0x9b, 0xe9, 0x9f, 0x4a, 0x47, 0x48, 0xff, 0x54, 0xee, 0x95, 0xfe,
- 0x69, 0xe0, 0xe3, 0xa4, 0x7f, 0x32, 0x47, 0x6c, 0xb0, 0xaf, 0x11, 0xcb, 0x48, 0x19, 0x35, 0x74,
- 0xc4, 0x94, 0x51, 0xc6, 0xa0, 0x0f, 0xf7, 0x31, 0xe8, 0xff, 0x69, 0x09, 0xa6, 0x93, 0x96, 0xc5,
- 0x63, 0xd0, 0xce, 0xbf, 0x6f, 0x68, 0xe7, 0x2f, 0xf7, 0x13, 0xf4, 0x20, 0x57, 0x53, 0x8f, 0x13,
- 0x9a, 0xfa, 0xcf, 0xf6, 0x45, 0xad, 0x58, 0x6b, 0xff, 0xb7, 0x4a, 0x70, 0x2a, 0xd3, 0xe0, 0x7a,
- 0x0c, 0x63, 0x73, 0xdb, 0x18, 0x9b, 0x57, 0xfb, 0x0e, 0x08, 0x91, 0x3b, 0x40, 0xf7, 0x12, 0x03,
- 0x74, 0xa5, 0x7f, 0x92, 0xc5, 0xa3, 0xf4, 0xfd, 0x32, 0x5c, 0xc8, 0xac, 0x17, 0x2b, 0xb7, 0x57,
- 0x0c, 0xe5, 0xf6, 0x6b, 0x09, 0xe5, 0xb6, 0x5d, 0x5c, 0xfb, 0xc9, 0x68, 0xbb, 0x45, 0x60, 0x04,
- 0x16, 0xde, 0xe5, 0x31, 0x35, 0xdd, 0x46, 0x60, 0x04, 0x45, 0x08, 0x9b, 0x74, 0x7f, 0x98, 0x34,
- 0xdc, 0xff, 0x83, 0x05, 0x67, 0x33, 0xe7, 0xe6, 0x18, 0xf4, 0x8c, 0xeb, 0xa6, 0x9e, 0xf1, 0xa5,
- 0xbe, 0x57, 0x6b, 0x8e, 0xe2, 0xf1, 0xbb, 0x43, 0x39, 0xdf, 0xc2, 0xd4, 0x1f, 0xb7, 0x61, 0xcc,
- 0x69, 0x34, 0x48, 0x18, 0xae, 0xb1, 0x54, 0x13, 0xdc, 0xf6, 0xfa, 0x2a, 0xbb, 0x9c, 0xc6, 0xc5,
- 0x87, 0xfb, 0xd5, 0xd9, 0x24, 0x89, 0x18, 0x8c, 0x75, 0x0a, 0xe8, 0xeb, 0x30, 0x12, 0xca, 0x24,
- 0xbf, 0x03, 0x8f, 0x9f, 0xe4, 0x97, 0x49, 0x92, 0x4a, 0xbd, 0xa3, 0x48, 0xa2, 0x3f, 0xa7, 0x87,
- 0xf7, 0x2a, 0x50, 0x6c, 0xf2, 0x4e, 0x3e, 0x46, 0x90, 0x2f, 0xf3, 0x39, 0x7c, 0xb9, 0xaf, 0xe7,
- 0xf0, 0xef, 0xc1, 0x74, 0xc8, 0xc3, 0xe5, 0xc6, 0x2e, 0x32, 0x7c, 0x2d, 0xb2, 0x88, 0x83, 0xf5,
- 0x04, 0x0c, 0xa7, 0xb0, 0xd1, 0x8a, 0x6c, 0x95, 0x39, 0x43, 0xf1, 0xe5, 0x79, 0x29, 0x6e, 0x51,
- 0x38, 0x44, 0x9d, 0x4c, 0x4e, 0x02, 0x1b, 0x7e, 0xad, 0x26, 0xfa, 0x3a, 0x00, 0x5d, 0x44, 0x42,
- 0x85, 0x33, 0x9c, 0xcf, 0x42, 0x29, 0x6f, 0x69, 0x66, 0xbe, 0xc0, 0x60, 0x11, 0x0d, 0x96, 0x14,
- 0x11, 0xac, 0x11, 0x44, 0x0e, 0x4c, 0xc4, 0xff, 0x30, 0xd9, 0x2c, 0x0a, 0xb0, 0xc6, 0x5a, 0x48,
- 0x12, 0x67, 0xe6, 0x8d, 0x25, 0x9d, 0x04, 0x36, 0x29, 0xa2, 0xaf, 0xc1, 0xd9, 0xdd, 0x5c, 0xbf,
- 0x23, 0x2e, 0x4b, 0x9e, 0x3f, 0xd8, 0xaf, 0x9e, 0xcd, 0xf7, 0x36, 0xca, 0xaf, 0x6f, 0xff, 0x8f,
- 0x00, 0xcf, 0x14, 0x70, 0x7a, 0x34, 0x6f, 0xfa, 0x0c, 0xbc, 0x9c, 0xd4, 0xab, 0xcc, 0x66, 0x56,
- 0x36, 0x14, 0x2d, 0x89, 0x0d, 0x55, 0xfa, 0xd8, 0x1b, 0xea, 0xa7, 0x2c, 0xed, 0x9a, 0xc5, 0x3d,
- 0xca, 0xbf, 0x74, 0xc4, 0x13, 0xec, 0x09, 0xaa, 0xc0, 0x36, 0x33, 0xf4, 0x48, 0xaf, 0xf5, 0xdd,
- 0x9d, 0xfe, 0x15, 0x4b, 0xbf, 0x9a, 0x9d, 0x60, 0x80, 0xab, 0x98, 0xae, 0x1f, 0xf5, 0xfb, 0x8f,
- 0x2b, 0xd9, 0xc0, 0xef, 0x5b, 0x70, 0x36, 0x55, 0xcc, 0xfb, 0x40, 0x42, 0x11, 0xce, 0x70, 0xfd,
- 0x63, 0x77, 0x5e, 0x12, 0xe4, 0xdf, 0x70, 0x43, 0x7c, 0xc3, 0xd9, 0x5c, 0xbc, 0x64, 0xd7, 0x7f,
- 0xf2, 0x5f, 0x55, 0x4f, 0xb0, 0x06, 0x4c, 0x44, 0x9c, 0xdf, 0x75, 0xd4, 0x81, 0x8b, 0x8d, 0x6e,
- 0x10, 0xc4, 0x8b, 0x35, 0x63, 0x73, 0xf2, 0xdb, 0xe2, 0xf3, 0x07, 0xfb, 0xd5, 0x8b, 0x8b, 0x3d,
- 0x70, 0x71, 0x4f, 0x6a, 0xc8, 0x03, 0xd4, 0x4e, 0x79, 0xf7, 0x31, 0x06, 0x90, 0xa3, 0x05, 0x4a,
- 0xfb, 0x02, 0x72, 0x3f, 0xdd, 0x0c, 0x1f, 0xc1, 0x0c, 0xca, 0xc7, 0xab, 0xbb, 0xf9, 0xc1, 0x64,
- 0x33, 0x98, 0xbd, 0x05, 0x17, 0x8a, 0x17, 0xd3, 0x91, 0x42, 0x50, 0xfc, 0x9e, 0x05, 0xe7, 0x0b,
- 0x43, 0xb3, 0xfd, 0x19, 0xbc, 0x2c, 0xd8, 0xdf, 0xb6, 0xe0, 0xd9, 0xcc, 0x1a, 0xc9, 0xc7, 0x83,
- 0x0d, 0x5a, 0xa8, 0x39, 0xc3, 0xc6, 0x41, 0x8a, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xbf, 0x68, 0xa9,
- 0xa7, 0xbf, 0xe8, 0x3f, 0xb5, 0x20, 0x75, 0xd4, 0x1f, 0x83, 0xe4, 0xb9, 0x6a, 0x4a, 0x9e, 0xcf,
- 0xf7, 0x33, 0x9a, 0x39, 0x42, 0xe7, 0xbf, 0x9d, 0x82, 0xd3, 0x39, 0x2f, 0xc8, 0x77, 0x61, 0x66,
- 0xab, 0x41, 0xcc, 0x90, 0x21, 0x45, 0xd1, 0xff, 0x0a, 0xe3, 0x8b, 0x2c, 0x9c, 0x3a, 0xd8, 0xaf,
- 0xce, 0xa4, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x6d, 0x0b, 0x4e, 0x3a, 0x0f, 0xc2, 0x65, 0x7a, 0x83,
- 0x70, 0x1b, 0x0b, 0x2d, 0xbf, 0xb1, 0x43, 0x05, 0x33, 0xb9, 0xad, 0xde, 0xc8, 0x54, 0x85, 0xdf,
- 0xab, 0xa7, 0xf0, 0x8d, 0xe6, 0x2b, 0x07, 0xfb, 0xd5, 0x93, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84,
- 0x45, 0x0e, 0x3f, 0x27, 0xda, 0x2e, 0x0a, 0x6a, 0x93, 0xf5, 0xd4, 0x9f, 0x8b, 0xc4, 0x12, 0x82,
- 0x15, 0x1d, 0xf4, 0x4d, 0x18, 0xdd, 0x92, 0xf1, 0x2b, 0x32, 0x44, 0xee, 0x78, 0x20, 0x8b, 0xa3,
- 0x7a, 0x70, 0x07, 0x1c, 0x85, 0x84, 0x63, 0xa2, 0xe8, 0x5d, 0x28, 0x7b, 0x9b, 0x61, 0x51, 0x08,
- 0xe9, 0x84, 0xa7, 0x35, 0x8f, 0x76, 0xb5, 0xbe, 0x52, 0xc7, 0xb4, 0x22, 0xba, 0x01, 0xe5, 0xe0,
- 0x7e, 0x53, 0xd8, 0x71, 0x32, 0x37, 0x29, 0x5e, 0x58, 0xca, 0xe9, 0x15, 0xa3, 0x84, 0x17, 0x96,
- 0x30, 0x25, 0x81, 0x6a, 0x30, 0xc8, 0x9e, 0x5d, 0x0b, 0xd1, 0x36, 0xf3, 0x2a, 0x5f, 0x10, 0xbe,
- 0x80, 0xbf, 0x87, 0x64, 0x08, 0x98, 0x13, 0x42, 0x1b, 0x30, 0xd4, 0x70, 0xbd, 0x26, 0x09, 0x84,
- 0x2c, 0xfb, 0xb9, 0x4c, 0x8b, 0x0d, 0xc3, 0xc8, 0xa1, 0xc9, 0x0d, 0x18, 0x0c, 0x03, 0x0b, 0x5a,
- 0x8c, 0x2a, 0xe9, 0x6c, 0x6f, 0xca, 0x13, 0x2b, 0x9b, 0x2a, 0xe9, 0x6c, 0xaf, 0xd4, 0x0b, 0xa9,
- 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xb7, 0xa0, 0xb4, 0xd9, 0x10, 0x4f, 0xaa, 0x33, 0xd5, 0x9b, 0x66,
- 0xc0, 0xb2, 0x85, 0xa1, 0x83, 0xfd, 0x6a, 0x69, 0x65, 0x11, 0x97, 0x36, 0x1b, 0x68, 0x1d, 0x86,
- 0x37, 0x79, 0xbc, 0x20, 0xa1, 0x1f, 0x7d, 0x31, 0x3b, 0x94, 0x51, 0x2a, 0xa4, 0x10, 0x7f, 0xdb,
- 0x2a, 0x00, 0x58, 0x12, 0x61, 0x09, 0xcf, 0x54, 0xdc, 0x23, 0x11, 0x29, 0x76, 0xee, 0x68, 0xb1,
- 0xaa, 0x44, 0xa0, 0x71, 0x45, 0x05, 0x6b, 0x14, 0xe9, 0xaa, 0x76, 0x1e, 0x75, 0x03, 0x96, 0x11,
- 0x45, 0x18, 0x66, 0x32, 0x57, 0xf5, 0xbc, 0x44, 0x2a, 0x5a, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1,
- 0x0e, 0x4c, 0xec, 0x86, 0x9d, 0x6d, 0x22, 0xb7, 0x34, 0x8b, 0x30, 0x98, 0x23, 0xcd, 0xde, 0x15,
- 0x88, 0x6e, 0x10, 0x75, 0x9d, 0x56, 0x8a, 0x0b, 0xb1, 0x6b, 0xcd, 0x5d, 0x9d, 0x18, 0x36, 0x69,
- 0xd3, 0xe1, 0xff, 0xa8, 0xeb, 0xdf, 0xdf, 0x8b, 0x88, 0x08, 0xf0, 0x9a, 0x39, 0xfc, 0x1f, 0x70,
- 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x5d, 0x31, 0x3c, 0x8c, 0x7b, 0x4e, 0xe7, 0x07,
- 0xc2, 0x9f, 0x97, 0x48, 0x39, 0x83, 0xc2, 0xb8, 0x65, 0x4c, 0x8a, 0x71, 0xc9, 0xce, 0xb6, 0x1f,
- 0xf9, 0x5e, 0x82, 0x43, 0xcf, 0xe4, 0x73, 0xc9, 0x5a, 0x06, 0x7e, 0x9a, 0x4b, 0x66, 0x61, 0xe1,
- 0xcc, 0xb6, 0x50, 0x13, 0x26, 0x3b, 0x7e, 0x10, 0x3d, 0xf0, 0x03, 0xb9, 0xbe, 0x50, 0x81, 0xa2,
- 0xd4, 0xc0, 0x14, 0x2d, 0x32, 0xb7, 0x20, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x97, 0x61, 0x38, 0x6c,
- 0x38, 0x2d, 0xb2, 0x7a, 0xbb, 0x72, 0x22, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0xc3,
- 0x3d, 0x71, 0x14, 0x2c, 0xc9, 0xa1, 0x15, 0x18, 0x64, 0x39, 0xef, 0x59, 0x34, 0xe2, 0x9c, 0x78,
- 0xfe, 0xa9, 0x47, 0x3d, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x34, 0x05, 0x7e,
- 0x58, 0x39, 0x95, 0xbf, 0x07, 0x84, 0x82, 0xe1, 0x76, 0xbd, 0x68, 0x0f, 0x28, 0x24, 0x1c, 0x13,
- 0xa5, 0x9c, 0x99, 0x72, 0xd3, 0xd3, 0x05, 0x0e, 0x9b, 0xb9, 0xbc, 0x94, 0x71, 0x66, 0xca, 0x49,
- 0x29, 0x09, 0xfb, 0x37, 0x47, 0xd2, 0x32, 0x0b, 0xd3, 0x30, 0xfd, 0xc7, 0x56, 0xca, 0x63, 0xe3,
- 0xf3, 0xfd, 0x2a, 0xbc, 0x9f, 0xe0, 0xc5, 0xf5, 0xdb, 0x16, 0x9c, 0xee, 0x64, 0x7e, 0x88, 0x10,
- 0x00, 0xfa, 0xd3, 0x9b, 0xf3, 0x4f, 0x57, 0x91, 0xab, 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xa4, 0x72,
- 0xa0, 0xfc, 0xb1, 0x95, 0x03, 0x6b, 0x30, 0xd2, 0xe0, 0x37, 0x39, 0x99, 0x3c, 0xa2, 0xaf, 0xb8,
- 0xab, 0xdc, 0x4e, 0x2b, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xb4, 0x05, 0xe7, 0x93, 0x5d, 0xc7, 0x84,
- 0x81, 0x85, 0xbb, 0x26, 0x57, 0x6b, 0xad, 0x88, 0xef, 0x4f, 0xc9, 0xff, 0x06, 0xf2, 0x61, 0x2f,
- 0x04, 0x5c, 0xdc, 0x18, 0x5a, 0xca, 0xd0, 0xab, 0x0d, 0x99, 0x36, 0xc9, 0x3e, 0x74, 0x6b, 0x6f,
- 0xc0, 0x78, 0xdb, 0xef, 0x7a, 0x91, 0xf0, 0xba, 0x14, 0xae, 0x5b, 0xcc, 0x65, 0x69, 0x4d, 0x2b,
- 0xc7, 0x06, 0x56, 0x42, 0x23, 0x37, 0xf2, 0xd8, 0x1a, 0xb9, 0x0f, 0x61, 0xdc, 0xd3, 0x1e, 0x24,
- 0x14, 0xdd, 0x60, 0x85, 0x76, 0x51, 0xc3, 0xe6, 0xbd, 0xd4, 0x4b, 0xb0, 0x41, 0xad, 0x58, 0x5b,
- 0x06, 0x1f, 0x4f, 0x5b, 0x76, 0xac, 0x57, 0x62, 0xfb, 0xef, 0x95, 0x32, 0x6e, 0x0c, 0x5c, 0x2b,
- 0xf7, 0x8e, 0xa9, 0x95, 0xbb, 0x94, 0xd4, 0xca, 0xa5, 0x4c, 0x55, 0x86, 0x42, 0xae, 0xff, 0x0c,
- 0xa6, 0x7d, 0xc7, 0xd2, 0xfe, 0x0b, 0x16, 0x9c, 0x61, 0xb6, 0x0f, 0xda, 0xc0, 0xc7, 0xb6, 0x77,
- 0x30, 0x87, 0xd8, 0x5b, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x0b, 0x2e, 0xf6, 0x3a, 0x77, 0x99,
- 0x7f, 0x71, 0x53, 0xb9, 0x57, 0xc4, 0xfe, 0xc5, 0xcd, 0xd5, 0x25, 0xcc, 0x20, 0xfd, 0x86, 0x5d,
- 0xb4, 0xff, 0x4f, 0x0b, 0xca, 0x35, 0xbf, 0x79, 0x0c, 0x37, 0xfa, 0x2f, 0x19, 0x37, 0xfa, 0x67,
- 0xb2, 0x4f, 0xfc, 0x66, 0xae, 0xb1, 0x6f, 0x39, 0x61, 0xec, 0x3b, 0x9f, 0x47, 0xa0, 0xd8, 0xb4,
- 0xf7, 0xb7, 0xcb, 0x30, 0x56, 0xf3, 0x9b, 0x6a, 0x9f, 0xfd, 0x77, 0x8f, 0xf3, 0x8c, 0x28, 0x37,
- 0x67, 0x99, 0x46, 0x99, 0xf9, 0x13, 0xcb, 0xa8, 0x17, 0x7f, 0xc6, 0x5e, 0x13, 0xdd, 0x23, 0xee,
- 0xd6, 0x76, 0x44, 0x9a, 0xc9, 0xcf, 0x39, 0xbe, 0xd7, 0x44, 0x7f, 0x58, 0x86, 0xa9, 0x44, 0xeb,
- 0xa8, 0x05, 0x13, 0x2d, 0xdd, 0x94, 0x24, 0xd6, 0xe9, 0x63, 0x59, 0xa1, 0xc4, 0x6b, 0x0c, 0xad,
- 0x08, 0x9b, 0xc4, 0xd1, 0x1c, 0x80, 0xa7, 0xfb, 0xa4, 0xab, 0x98, 0xd0, 0x9a, 0x3f, 0xba, 0x86,
- 0x81, 0xde, 0x84, 0xb1, 0xc8, 0xef, 0xf8, 0x2d, 0x7f, 0x6b, 0xef, 0xa6, 0x8a, 0x8f, 0xac, 0x5c,
- 0x96, 0x37, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x43, 0x98, 0x51, 0x44, 0xea, 0x4f, 0xc0, 0xbc, 0xc6,
- 0xd4, 0x26, 0xeb, 0x49, 0x8a, 0x38, 0xdd, 0x08, 0x7a, 0x0b, 0x26, 0x99, 0xef, 0x34, 0xab, 0x7f,
- 0x93, 0xec, 0xc9, 0xe0, 0xd2, 0x4c, 0xc2, 0x5e, 0x33, 0x20, 0x38, 0x81, 0x89, 0x16, 0x61, 0xa6,
- 0xed, 0x86, 0x89, 0xea, 0x43, 0xac, 0x3a, 0xeb, 0xc0, 0x5a, 0x12, 0x88, 0xd3, 0xf8, 0xf6, 0x2f,
- 0x88, 0x39, 0xf6, 0x22, 0xf7, 0xd3, 0xed, 0xf8, 0xc9, 0xde, 0x8e, 0xdf, 0xb7, 0x60, 0x9a, 0xb6,
- 0xce, 0x1c, 0x42, 0xa5, 0x20, 0xa5, 0xd2, 0x8f, 0x58, 0x05, 0xe9, 0x47, 0x2e, 0x51, 0xb6, 0xdd,
- 0xf4, 0xbb, 0x91, 0xd0, 0x8e, 0x6a, 0x7c, 0x99, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x08, 0xc4,
- 0xab, 0x7b, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x99, 0x9d, 0x64, 0x20, 0x3b, 0x3b, 0x09, 0x0f,
- 0x32, 0x2f, 0xfc, 0xe8, 0x84, 0x48, 0xab, 0x05, 0x99, 0x97, 0x0e, 0x76, 0x31, 0x8e, 0xfd, 0xd7,
- 0xca, 0x50, 0xa9, 0xf9, 0xcd, 0x45, 0x12, 0x44, 0xee, 0xa6, 0xdb, 0x70, 0x22, 0xa2, 0xe5, 0xdb,
- 0x7d, 0x0d, 0x80, 0x3d, 0x22, 0x0b, 0xb2, 0x22, 0xa8, 0xd7, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x2a,
- 0xd9, 0x21, 0x7b, 0xda, 0xc9, 0xab, 0xa4, 0x92, 0x9b, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0xc5, 0x42,
- 0x19, 0x2d, 0x3f, 0xec, 0xb8, 0x01, 0xcf, 0x4c, 0x4e, 0x1a, 0xbe, 0xd7, 0x0c, 0x45, 0xe0, 0xb7,
- 0x8a, 0x08, 0x44, 0x94, 0x82, 0xe3, 0xcc, 0x5a, 0xa8, 0x06, 0x27, 0x1b, 0x01, 0x69, 0x12, 0x2f,
- 0x72, 0x9d, 0xd6, 0x42, 0xd7, 0x6b, 0xb6, 0x78, 0x4a, 0x9e, 0x01, 0x23, 0x83, 0xe8, 0xc9, 0xc5,
- 0x0c, 0x1c, 0x9c, 0x59, 0x53, 0x7c, 0x0a, 0x23, 0x32, 0x98, 0xfa, 0x14, 0x56, 0x4f, 0xc2, 0x59,
- 0xe3, 0xf1, 0x10, 0x2e, 0x6e, 0x3b, 0xae, 0xc7, 0xea, 0x0d, 0x25, 0x1a, 0xcf, 0xc0, 0xc1, 0x99,
- 0x35, 0xed, 0x3f, 0x2d, 0xc3, 0x38, 0x9d, 0x18, 0xe5, 0x71, 0xf3, 0x86, 0xe1, 0x71, 0x73, 0x31,
- 0xe1, 0x71, 0x33, 0xad, 0xe3, 0x6a, 0xfe, 0x35, 0xef, 0x03, 0xf2, 0x45, 0x52, 0x82, 0xeb, 0xc4,
- 0x23, 0x7c, 0xc8, 0x98, 0x92, 0xb1, 0x1c, 0xfb, 0xa3, 0xdc, 0x4e, 0x61, 0xe0, 0x8c, 0x5a, 0x9f,
- 0xfa, 0xea, 0x1c, 0xaf, 0xaf, 0xce, 0x6f, 0x59, 0x6c, 0x05, 0x2c, 0xad, 0xd7, 0xb9, 0x13, 0x39,
- 0xba, 0x0a, 0x63, 0xec, 0x18, 0x63, 0xb1, 0x3c, 0xa4, 0x4b, 0x0b, 0xcb, 0x6e, 0xbb, 0x1e, 0x17,
- 0x63, 0x1d, 0x07, 0x5d, 0x86, 0x91, 0x90, 0x38, 0x41, 0x63, 0x5b, 0x9d, 0xe1, 0xc2, 0xff, 0x84,
- 0x97, 0x61, 0x05, 0x45, 0x1f, 0xc4, 0x11, 0xe1, 0xcb, 0xf9, 0x1e, 0xe9, 0x7a, 0x7f, 0x38, 0x1f,
- 0xcc, 0x0f, 0x03, 0x6f, 0xdf, 0x03, 0x94, 0xc6, 0xef, 0xe3, 0x89, 0x5f, 0xd5, 0x8c, 0x59, 0x3c,
- 0x9a, 0x8a, 0x57, 0xfc, 0xef, 0x2d, 0x98, 0xac, 0xf9, 0x4d, 0xca, 0x9f, 0x7f, 0x98, 0x98, 0xb1,
- 0x9e, 0xc1, 0x63, 0xa8, 0x20, 0x83, 0xc7, 0x81, 0x05, 0x17, 0xd8, 0xe7, 0x47, 0xc4, 0x6b, 0xc6,
- 0x06, 0x4f, 0xdd, 0xdf, 0xe3, 0x01, 0x4c, 0x05, 0x3c, 0x7c, 0xd7, 0x9a, 0xd3, 0xe9, 0xb8, 0xde,
- 0x96, 0x7c, 0xdf, 0xf6, 0x46, 0xe1, 0xbb, 0x8d, 0x24, 0x49, 0x11, 0x02, 0x4c, 0x77, 0x54, 0x35,
- 0x88, 0xe2, 0x64, 0x2b, 0x3c, 0x2b, 0x8d, 0xd6, 0x1f, 0x2d, 0x41, 0xa5, 0x96, 0x95, 0x26, 0x81,
- 0x80, 0xd3, 0x75, 0xec, 0xe7, 0x60, 0xb0, 0xe6, 0x37, 0x7b, 0x04, 0x8f, 0xfe, 0x3b, 0x16, 0x0c,
- 0xd7, 0xfc, 0xe6, 0x31, 0x98, 0x10, 0xdf, 0x31, 0x4d, 0x88, 0x67, 0x72, 0x36, 0x47, 0x8e, 0xd5,
- 0xf0, 0x9f, 0x0d, 0xc0, 0x04, 0xed, 0xa7, 0xbf, 0x25, 0xd7, 0xab, 0xb1, 0x36, 0xac, 0x3e, 0xd6,
- 0x06, 0xbd, 0xd0, 0xfa, 0xad, 0x96, 0xff, 0x20, 0xb9, 0x76, 0x57, 0x58, 0x29, 0x16, 0x50, 0xf4,
- 0x0a, 0x8c, 0x74, 0x02, 0xb2, 0xeb, 0xfa, 0xe2, 0xa6, 0xa8, 0x19, 0x64, 0x6b, 0xa2, 0x1c, 0x2b,
- 0x0c, 0xf4, 0x06, 0x8c, 0x87, 0xae, 0x47, 0xa5, 0x62, 0x7e, 0xf4, 0x0e, 0xb0, 0x83, 0x81, 0xe7,
- 0xd2, 0xd3, 0xca, 0xb1, 0x81, 0x85, 0xee, 0xc1, 0x28, 0xfb, 0xcf, 0x78, 0xeb, 0xe0, 0x91, 0x79,
- 0xab, 0x48, 0x94, 0x2e, 0x08, 0xe0, 0x98, 0x16, 0x15, 0x38, 0x22, 0x99, 0x8f, 0x2a, 0x14, 0x41,
- 0x84, 0x95, 0xc0, 0xa1, 0x32, 0x55, 0x85, 0x58, 0xc3, 0x42, 0x2f, 0xc3, 0x68, 0xe4, 0xb8, 0xad,
- 0x5b, 0xae, 0xc7, 0x3c, 0x51, 0x68, 0xff, 0x45, 0xbe, 0x72, 0x51, 0x88, 0x63, 0x38, 0xbd, 0xd5,
- 0xb0, 0xd8, 0x6a, 0x0b, 0x7b, 0x91, 0xc8, 0xa2, 0x59, 0xe6, 0xb7, 0x9a, 0x5b, 0xaa, 0x14, 0x6b,
- 0x18, 0x68, 0x1b, 0xce, 0xb9, 0x1e, 0xcb, 0x3b, 0x47, 0xea, 0x3b, 0x6e, 0x67, 0xe3, 0x56, 0xfd,
- 0x2e, 0x09, 0xdc, 0xcd, 0xbd, 0x05, 0xa7, 0xb1, 0x43, 0xbc, 0x26, 0x53, 0x7a, 0x8d, 0x2c, 0x3c,
- 0x2f, 0xba, 0x78, 0x6e, 0xb5, 0x00, 0x17, 0x17, 0x52, 0x42, 0x36, 0xe5, 0x39, 0x01, 0x71, 0xda,
- 0x42, 0xbb, 0xc5, 0x73, 0x56, 0xb1, 0x12, 0x2c, 0x20, 0xf6, 0xeb, 0x6c, 0x4f, 0xdc, 0xae, 0xa3,
- 0xcf, 0x1a, 0x3c, 0xf4, 0xb4, 0xce, 0x43, 0x0f, 0xf7, 0xab, 0x43, 0xb7, 0xeb, 0x5a, 0x9c, 0xad,
- 0x6b, 0x70, 0xaa, 0xe6, 0x37, 0x6b, 0x7e, 0x10, 0xad, 0xf8, 0xc1, 0x03, 0x27, 0x68, 0xca, 0x25,
- 0x58, 0x95, 0x91, 0xc6, 0x28, 0x67, 0x18, 0xe4, 0x6c, 0xd6, 0x88, 0x22, 0xf6, 0x3a, 0xbb, 0x9f,
- 0x1c, 0xf1, 0x61, 0x77, 0x83, 0x49, 0xca, 0x2a, 0xbb, 0xe3, 0x75, 0x27, 0x22, 0xe8, 0x36, 0x4c,
- 0x34, 0x74, 0xd9, 0x44, 0x54, 0x7f, 0x49, 0x9e, 0xe8, 0x86, 0xe0, 0x92, 0x29, 0xcc, 0x98, 0xf5,
- 0xed, 0xdf, 0xb7, 0x44, 0x2b, 0x1a, 0xd7, 0xe8, 0xe3, 0x60, 0x59, 0xcc, 0x62, 0x4e, 0xfc, 0xa6,
- 0x7a, 0xaa, 0x5f, 0xc6, 0x84, 0xbe, 0x06, 0x67, 0x8d, 0x42, 0xe9, 0x14, 0xa2, 0xe5, 0xdf, 0x67,
- 0x9a, 0x49, 0x9c, 0x87, 0x84, 0xf3, 0xeb, 0xdb, 0x3f, 0x06, 0xa7, 0x93, 0xdf, 0x25, 0x38, 0xfa,
- 0x63, 0x7e, 0x5d, 0xe9, 0x68, 0x5f, 0x67, 0xbf, 0x09, 0x33, 0x35, 0x5f, 0x8b, 0xa2, 0xc2, 0xe6,
- 0xaf, 0x77, 0x30, 0xb7, 0x5f, 0x1e, 0x61, 0x67, 0x7d, 0x22, 0x65, 0x23, 0xfa, 0x06, 0x4c, 0x86,
- 0x84, 0x45, 0x30, 0x94, 0x3a, 0xea, 0x82, 0xa8, 0x0c, 0xf5, 0x65, 0x1d, 0x93, 0xdf, 0xc3, 0xcd,
- 0x32, 0x9c, 0xa0, 0x86, 0xda, 0x30, 0xf9, 0xc0, 0xf5, 0x9a, 0xfe, 0x83, 0x50, 0xd2, 0x1f, 0xc9,
- 0x37, 0x78, 0xdd, 0xe3, 0x98, 0x89, 0x3e, 0x1a, 0xcd, 0xdd, 0x33, 0x88, 0xe1, 0x04, 0x71, 0xca,
- 0x6a, 0x82, 0xae, 0x37, 0x1f, 0xde, 0x09, 0x49, 0x20, 0xe2, 0x2b, 0x32, 0x56, 0x83, 0x65, 0x21,
- 0x8e, 0xe1, 0x94, 0xd5, 0xb0, 0x3f, 0x2c, 0xac, 0x03, 0xe3, 0x65, 0x82, 0xd5, 0x60, 0x55, 0x8a,
- 0x35, 0x0c, 0xca, 0x8a, 0xd9, 0xbf, 0x75, 0xdf, 0xc3, 0xbe, 0x1f, 0x49, 0xe6, 0xcd, 0xb2, 0xea,
- 0x6a, 0xe5, 0xd8, 0xc0, 0xca, 0x89, 0xe6, 0x38, 0x70, 0xd4, 0x68, 0x8e, 0x28, 0x2a, 0x88, 0x64,
- 0xc1, 0xe3, 0x91, 0x5f, 0x2b, 0x8a, 0x64, 0x71, 0xf8, 0x58, 0x51, 0x2e, 0xa8, 0xc0, 0xb3, 0x29,
- 0x06, 0x68, 0x90, 0x87, 0xab, 0x64, 0x26, 0xf9, 0x3a, 0x1f, 0x1d, 0x09, 0x43, 0xcb, 0x30, 0x1c,
- 0xee, 0x85, 0x8d, 0xa8, 0x15, 0x16, 0x65, 0x4e, 0xae, 0x33, 0x94, 0x58, 0x1e, 0xe5, 0xff, 0x43,
- 0x2c, 0xeb, 0xa2, 0x06, 0x9c, 0x10, 0x14, 0x17, 0xb7, 0x1d, 0x4f, 0x65, 0x56, 0xe5, 0xbe, 0xb7,
- 0x57, 0x0f, 0xf6, 0xab, 0x27, 0x44, 0xcb, 0x3a, 0xf8, 0x70, 0xbf, 0x4a, 0xb7, 0x64, 0x06, 0x04,
- 0x67, 0x51, 0xe3, 0x4b, 0xbe, 0xd1, 0xf0, 0xdb, 0x9d, 0x5a, 0xe0, 0x6f, 0xba, 0x2d, 0x52, 0xe4,
- 0xd6, 0x50, 0x37, 0x30, 0xc5, 0x92, 0x37, 0xca, 0x70, 0x82, 0x1a, 0xba, 0x0f, 0x53, 0x4e, 0xa7,
- 0x33, 0x1f, 0xb4, 0xfd, 0x40, 0x36, 0x30, 0x96, 0x6f, 0x1f, 0x9b, 0x37, 0x51, 0x79, 0x62, 0xd5,
- 0x44, 0x21, 0x4e, 0x12, 0xa4, 0x03, 0x25, 0x36, 0x9a, 0x31, 0x50, 0x13, 0xf1, 0x40, 0x89, 0x7d,
- 0x99, 0x31, 0x50, 0x19, 0x10, 0x9c, 0x45, 0xcd, 0xfe, 0xf3, 0xec, 0x76, 0xc3, 0xa2, 0x9d, 0xb3,
- 0x47, 0x6e, 0x6d, 0x98, 0xe8, 0x30, 0xb6, 0x2f, 0x92, 0x1e, 0x0a, 0x56, 0xf1, 0x46, 0x9f, 0x6a,
- 0xf8, 0x07, 0x2c, 0xab, 0xb3, 0xe1, 0x8e, 0x5d, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xaf, 0x67,
- 0x99, 0xe8, 0x58, 0xe7, 0xba, 0xf5, 0x61, 0xf1, 0xe4, 0x57, 0x48, 0xc9, 0xb3, 0xf9, 0x56, 0xac,
- 0x78, 0x7d, 0x89, 0x67, 0xc3, 0x58, 0xd6, 0x45, 0x5f, 0x87, 0x49, 0xd7, 0x73, 0xe3, 0x24, 0xeb,
- 0x61, 0xe5, 0x64, 0x7e, 0x2c, 0x39, 0x85, 0xa5, 0x27, 0x44, 0xd5, 0x2b, 0xe3, 0x04, 0x31, 0xf4,
- 0x01, 0xf3, 0x50, 0x96, 0xa4, 0x4b, 0xfd, 0x90, 0xd6, 0x9d, 0x91, 0x25, 0x59, 0x8d, 0x08, 0xea,
- 0xc2, 0x89, 0x74, 0xb2, 0xf9, 0xb0, 0x62, 0xe7, 0x5f, 0x00, 0xd3, 0xf9, 0xe2, 0xe3, 0xcc, 0x95,
- 0x69, 0x58, 0x88, 0xb3, 0xe8, 0xa3, 0x5b, 0xc9, 0x54, 0xe0, 0x65, 0xc3, 0xfe, 0x95, 0x4a, 0x07,
- 0x3e, 0x51, 0x98, 0x05, 0x7c, 0x0b, 0xce, 0x6b, 0x79, 0x8d, 0xaf, 0x07, 0x0e, 0xf3, 0x90, 0x73,
- 0xd9, 0x69, 0xa4, 0x09, 0xb5, 0xcf, 0x1e, 0xec, 0x57, 0xcf, 0x6f, 0x14, 0x21, 0xe2, 0x62, 0x3a,
- 0xe8, 0x36, 0x9c, 0xe2, 0x91, 0x90, 0x96, 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x92, 0x9a, 0x39, 0xef,
- 0x3a, 0x7b, 0xb0, 0x5f, 0x3d, 0x35, 0x9f, 0x85, 0x80, 0xb3, 0xeb, 0xa1, 0x77, 0x60, 0xb4, 0xe9,
- 0x49, 0x2e, 0x3b, 0x64, 0xa4, 0x8e, 0x1e, 0x5d, 0x5a, 0xaf, 0xab, 0xef, 0x8f, 0xff, 0xe0, 0xb8,
- 0x02, 0xda, 0xe2, 0x06, 0x58, 0xa5, 0x35, 0x1f, 0x4e, 0x05, 0xc8, 0x4d, 0x1a, 0x96, 0x8c, 0xd0,
- 0x22, 0xdc, 0xf3, 0x40, 0x3d, 0x3f, 0x35, 0xa2, 0x8e, 0x18, 0x84, 0xd1, 0xfb, 0x80, 0x44, 0xbe,
- 0xaf, 0xf9, 0x06, 0xcb, 0xa8, 0xa9, 0x79, 0x45, 0x2b, 0x3d, 0x49, 0x3d, 0x85, 0x81, 0x33, 0x6a,
- 0xa1, 0x1b, 0x94, 0x3d, 0xea, 0xa5, 0x82, 0xfd, 0x4a, 0x7d, 0x56, 0x65, 0x89, 0x74, 0x02, 0xc2,
- 0x1c, 0x79, 0x4d, 0x8a, 0x38, 0x51, 0x0f, 0x35, 0xe1, 0x9c, 0xd3, 0x8d, 0x7c, 0x66, 0xdb, 0x36,
- 0x51, 0x37, 0xfc, 0x1d, 0xe2, 0x31, 0xb7, 0x92, 0x11, 0x16, 0x78, 0xf7, 0xdc, 0x7c, 0x01, 0x1e,
- 0x2e, 0xa4, 0x42, 0xaf, 0x53, 0x74, 0x2c, 0x34, 0xb3, 0xb3, 0x11, 0x25, 0x81, 0xfb, 0x62, 0x48,
- 0x0c, 0xf4, 0x26, 0x8c, 0x6d, 0xfb, 0x61, 0xb4, 0x4e, 0xa2, 0x07, 0x7e, 0xb0, 0x23, 0x12, 0x8c,
- 0xc4, 0x49, 0x9d, 0x62, 0x10, 0xd6, 0xf1, 0xd0, 0x4b, 0x30, 0xcc, 0x9c, 0x1e, 0x57, 0x97, 0xd8,
- 0x59, 0x3b, 0x12, 0xf3, 0x98, 0x1b, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0x5d, 0xad, 0x2d, 0x32, 0x76,
- 0x9c, 0x40, 0x5d, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xdc, 0x76, 0x02, 0x52, 0x0b, 0xfc,
- 0x06, 0x09, 0xb5, 0x54, 0x62, 0xcf, 0xf0, 0xf4, 0x29, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70, 0x76,
- 0x3d, 0x44, 0xd2, 0x39, 0xbd, 0x27, 0xf3, 0x8d, 0xfe, 0x69, 0x71, 0xb0, 0xcf, 0xb4, 0xde, 0x1e,
- 0x4c, 0xab, 0x6c, 0xe2, 0x3c, 0x61, 0x4a, 0x58, 0x99, 0xca, 0xcf, 0xe9, 0x9f, 0xf9, 0xd6, 0x47,
- 0xb9, 0x51, 0xac, 0x26, 0x28, 0xe1, 0x14, 0x6d, 0x23, 0xb2, 0xf3, 0x74, 0xcf, 0xc8, 0xce, 0x57,
- 0x60, 0x34, 0xec, 0xde, 0x6f, 0xfa, 0x6d, 0xc7, 0xf5, 0x98, 0xef, 0x98, 0x76, 0x71, 0xaf, 0x4b,
- 0x00, 0x8e, 0x71, 0xd0, 0x0a, 0x8c, 0x38, 0xd2, 0x47, 0x02, 0xe5, 0x07, 0xad, 0x54, 0x9e, 0x11,
- 0x3c, 0x8e, 0x9b, 0xf4, 0x8a, 0x50, 0x75, 0xd1, 0xdb, 0x30, 0x21, 0x02, 0xe3, 0x08, 0x7d, 0xfc,
- 0x09, 0xf3, 0x29, 0x7f, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x81, 0xb1, 0xc8, 0x6f, 0x09, 0x45,
- 0x6e, 0x58, 0x39, 0x9d, 0x1f, 0x5b, 0x7a, 0x43, 0xa1, 0xe9, 0xd6, 0x3b, 0x55, 0x15, 0xeb, 0x74,
- 0xd0, 0x06, 0x5f, 0xef, 0x2c, 0x71, 0x18, 0x09, 0x2b, 0x67, 0xf2, 0xcf, 0x24, 0x95, 0x5f, 0xcc,
- 0xdc, 0x0e, 0xa2, 0x26, 0xd6, 0xc9, 0xa0, 0xeb, 0x30, 0xd3, 0x09, 0x5c, 0x9f, 0xad, 0x09, 0xe5,
- 0xf3, 0x51, 0x31, 0x75, 0x48, 0xb5, 0x24, 0x02, 0x4e, 0xd7, 0x61, 0x71, 0x8d, 0x44, 0x61, 0xe5,
- 0x2c, 0x4f, 0x75, 0xc8, 0xf5, 0x20, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x9c, 0x98, 0xeb, 0x29,
- 0x2b, 0xb3, 0xf9, 0xd1, 0x32, 0x74, 0x7d, 0x26, 0x97, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x4d,
- 0x98, 0x0c, 0xf4, 0x1b, 0x70, 0x58, 0x39, 0x57, 0xe0, 0x79, 0x9e, 0xb8, 0x2e, 0xc7, 0x02, 0x81,
- 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1e, 0x4c, 0x8b, 0xa0, 0x1f, 0xf1, 0x30, 0x9d, 0x8f, 0x5f,
- 0xe7, 0xe1, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0xaa, 0x41, 0xe7, 0x7e, 0x8b, 0x08, 0xd6, 0x77, 0xcb,
- 0xf5, 0x76, 0xc2, 0xca, 0x05, 0xc6, 0x1f, 0x44, 0xaa, 0xc1, 0x24, 0x14, 0x67, 0xd4, 0x40, 0x1b,
- 0x30, 0xdd, 0x09, 0x08, 0x69, 0xb3, 0x7b, 0x92, 0x38, 0xcf, 0xaa, 0x3c, 0xac, 0x17, 0xed, 0x49,
- 0x2d, 0x01, 0x3b, 0xcc, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x01, 0x8c, 0xf8, 0xbb, 0x24, 0xd8, 0x26,
- 0x4e, 0xb3, 0x72, 0xb1, 0xe0, 0xcd, 0xa8, 0x38, 0xdc, 0x6e, 0x0b, 0xdc, 0x84, 0x4b, 0x9d, 0x2c,
- 0xee, 0xed, 0x52, 0x27, 0x1b, 0x43, 0xff, 0x89, 0x05, 0x67, 0xa5, 0x91, 0xba, 0xde, 0xa1, 0xa3,
- 0xbe, 0xe8, 0x7b, 0x61, 0x14, 0xf0, 0x40, 0x54, 0xcf, 0xe6, 0x07, 0x67, 0xda, 0xc8, 0xa9, 0xa4,
- 0x4c, 0x25, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0xbd, 0xd9, 0x87, 0x24, 0x92, 0xcc, 0x68,
- 0x3e, 0x5c, 0xf9, 0x60, 0x69, 0xbd, 0xf2, 0x1c, 0x8f, 0xa2, 0x45, 0x37, 0x43, 0x3d, 0x09, 0xc4,
- 0x69, 0x7c, 0x74, 0x15, 0x4a, 0x7e, 0x58, 0x79, 0x9e, 0xad, 0xed, 0xb3, 0x39, 0xe3, 0x78, 0xbb,
- 0xce, 0x5d, 0xab, 0x6f, 0xd7, 0x71, 0xc9, 0x0f, 0x65, 0xba, 0x3f, 0x7a, 0x9d, 0x0d, 0x2b, 0x2f,
- 0x70, 0xc5, 0xba, 0x4c, 0xf7, 0xc7, 0x0a, 0x71, 0x0c, 0x47, 0xdb, 0x30, 0x15, 0x1a, 0x6a, 0x83,
- 0xb0, 0x72, 0x89, 0x8d, 0xd4, 0x0b, 0x79, 0x93, 0x66, 0x60, 0x6b, 0x79, 0xb8, 0x4c, 0x2a, 0x38,
- 0x49, 0x96, 0xef, 0x2e, 0x4d, 0x71, 0x11, 0x56, 0x5e, 0xec, 0xb1, 0xbb, 0x34, 0x64, 0x7d, 0x77,
- 0xe9, 0x34, 0x70, 0x82, 0x26, 0xba, 0xa3, 0x3f, 0xc8, 0xbd, 0x9c, 0xef, 0xa6, 0x9b, 0xf9, 0x14,
- 0x77, 0x22, 0xf7, 0x19, 0xee, 0x7b, 0x30, 0x2d, 0xcf, 0x12, 0xba, 0x32, 0x03, 0xb7, 0x49, 0x2a,
- 0x2f, 0xc5, 0x9b, 0xf6, 0x46, 0x02, 0x86, 0x53, 0xd8, 0xb3, 0x3f, 0x02, 0x33, 0x29, 0x39, 0xee,
- 0x28, 0xef, 0x9b, 0x66, 0x77, 0x60, 0xc2, 0xd8, 0x2b, 0x4f, 0xd7, 0xfd, 0x6d, 0x0c, 0x46, 0x95,
- 0x5b, 0x52, 0x8e, 0x39, 0x72, 0xe6, 0xb1, 0xcc, 0x91, 0x57, 0x4c, 0xef, 0xb9, 0xb3, 0x49, 0xef,
- 0xb9, 0x91, 0x9a, 0xdf, 0x34, 0x1c, 0xe6, 0x36, 0x32, 0x22, 0x60, 0xe7, 0x71, 0xf9, 0xfe, 0x1f,
- 0x74, 0x6a, 0x16, 0xbd, 0x72, 0xdf, 0x6e, 0x78, 0x03, 0x85, 0x46, 0xc2, 0xeb, 0x30, 0xe3, 0xf9,
- 0xec, 0x22, 0x42, 0x9a, 0x52, 0xca, 0x64, 0xc2, 0xe4, 0xa8, 0x1e, 0xa1, 0x31, 0x81, 0x80, 0xd3,
- 0x75, 0x68, 0x83, 0x5c, 0x1a, 0x4c, 0x5a, 0x25, 0xb9, 0xb0, 0x88, 0x05, 0x94, 0x5e, 0x80, 0xf9,
- 0xaf, 0xb0, 0x32, 0x9d, 0x7f, 0x01, 0xe6, 0x95, 0x92, 0x12, 0x67, 0x28, 0x25, 0x4e, 0x66, 0x84,
- 0xeb, 0xf8, 0xcd, 0xd5, 0x9a, 0xb8, 0xcb, 0x68, 0xb9, 0x29, 0x9a, 0xab, 0x35, 0xcc, 0x61, 0x68,
- 0x1e, 0x86, 0xd8, 0x0f, 0x19, 0xf9, 0x2a, 0x8f, 0x17, 0xad, 0xd6, 0xb4, 0x9c, 0xca, 0xac, 0x02,
- 0x16, 0x15, 0x99, 0xfd, 0x81, 0x5e, 0x00, 0x99, 0xfd, 0x61, 0xf8, 0x31, 0xed, 0x0f, 0x92, 0x00,
- 0x8e, 0x69, 0xa1, 0x87, 0x70, 0xca, 0xb8, 0x74, 0xab, 0x17, 0xae, 0x90, 0xef, 0x64, 0x93, 0x40,
- 0x5e, 0x38, 0x2f, 0x3a, 0x7d, 0x6a, 0x35, 0x8b, 0x12, 0xce, 0x6e, 0x00, 0xb5, 0x60, 0xa6, 0x91,
- 0x6a, 0x75, 0xa4, 0xff, 0x56, 0xd5, 0xba, 0x48, 0xb7, 0x98, 0x26, 0x8c, 0xde, 0x86, 0x91, 0x8f,
- 0x7c, 0xee, 0x10, 0x2b, 0xee, 0x5f, 0x32, 0x3e, 0xd3, 0xc8, 0x07, 0xb7, 0xeb, 0xac, 0xfc, 0x70,
- 0xbf, 0x3a, 0x56, 0xf3, 0x9b, 0xf2, 0x2f, 0x56, 0x15, 0xd0, 0x5f, 0xb2, 0x60, 0x36, 0x7d, 0xab,
- 0x57, 0x9d, 0x9e, 0xe8, 0xbf, 0xd3, 0xb6, 0x68, 0x74, 0x76, 0x39, 0x97, 0x1c, 0x2e, 0x68, 0x0a,
- 0x7d, 0x91, 0xee, 0xa7, 0xd0, 0x7d, 0xc4, 0x5f, 0xb8, 0x68, 0x0e, 0x09, 0x98, 0x95, 0x1e, 0xee,
- 0x57, 0xa7, 0x38, 0xfb, 0x77, 0x1f, 0xa9, 0x2c, 0x1a, 0xbc, 0x02, 0xfa, 0x31, 0x38, 0x15, 0xa4,
- 0xb5, 0xec, 0x44, 0xde, 0x34, 0x3e, 0xdb, 0xcf, 0x51, 0x92, 0x9c, 0x70, 0x9c, 0x45, 0x10, 0x67,
- 0xb7, 0x83, 0xfe, 0xaa, 0x05, 0xcf, 0x90, 0x7c, 0x0b, 0xae, 0xb8, 0x2a, 0xbc, 0x96, 0xd3, 0x8f,
- 0x02, 0xdb, 0x2f, 0x4b, 0x30, 0xf0, 0x4c, 0x01, 0x02, 0x2e, 0x6a, 0xd7, 0xfe, 0xc7, 0x16, 0xb3,
- 0xfa, 0x08, 0x54, 0x12, 0x76, 0x5b, 0xd1, 0x31, 0x38, 0xc7, 0x2e, 0x1b, 0xae, 0x25, 0x8f, 0xed,
- 0xdd, 0xfa, 0xdf, 0x5a, 0xcc, 0xbb, 0xf5, 0x18, 0xdf, 0xe9, 0x7e, 0x00, 0x23, 0x91, 0x68, 0x4d,
- 0x74, 0x3d, 0xcf, 0x13, 0x4f, 0x76, 0x8a, 0x79, 0xf8, 0xaa, 0x1b, 0xa6, 0x2c, 0xc5, 0x8a, 0x8c,
- 0xfd, 0x5f, 0xf1, 0x19, 0x90, 0x90, 0x63, 0x30, 0x6e, 0x2f, 0x99, 0xc6, 0xed, 0x6a, 0x8f, 0x2f,
- 0xc8, 0x31, 0x72, 0xff, 0x03, 0xb3, 0xdf, 0x4c, 0xb3, 0xfa, 0x49, 0x77, 0xab, 0xb6, 0xbf, 0x6b,
- 0x01, 0xc4, 0xe9, 0x94, 0xfa, 0x48, 0x8c, 0x7f, 0x8d, 0xde, 0x29, 0xfd, 0xc8, 0x6f, 0xf8, 0x2d,
- 0x61, 0x5c, 0x3b, 0x17, 0xdb, 0xd7, 0x79, 0xf9, 0xa1, 0xf6, 0x1b, 0x2b, 0x6c, 0x54, 0x95, 0xf1,
- 0xcd, 0xcb, 0xb1, 0x5b, 0x8b, 0x11, 0xdb, 0xfc, 0x67, 0x2c, 0x38, 0x99, 0xf5, 0xe8, 0x0b, 0xbd,
- 0x02, 0x23, 0x5c, 0xc7, 0xac, 0x5c, 0xde, 0xd5, 0x6c, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xf4, 0xeb,
- 0xfa, 0x7e, 0xc4, 0x54, 0x3f, 0xb7, 0x61, 0xa2, 0x16, 0x10, 0x4d, 0xee, 0x79, 0x37, 0xce, 0x42,
- 0x36, 0xba, 0xf0, 0xca, 0x91, 0x23, 0xa9, 0xd9, 0xbf, 0x54, 0x82, 0x93, 0xdc, 0x71, 0x73, 0x7e,
- 0xd7, 0x77, 0x9b, 0x35, 0xbf, 0x29, 0x9e, 0xea, 0x7f, 0x15, 0xc6, 0x3b, 0x9a, 0x61, 0xa0, 0x28,
- 0x6d, 0x85, 0x6e, 0x40, 0x88, 0x55, 0x99, 0x7a, 0x29, 0x36, 0x68, 0xa1, 0x26, 0x8c, 0x93, 0x5d,
- 0xb7, 0xa1, 0x1c, 0xc3, 0x4a, 0x47, 0x16, 0x1e, 0x54, 0x2b, 0xcb, 0x1a, 0x1d, 0x6c, 0x50, 0xed,
- 0xfb, 0xb9, 0x85, 0x26, 0x3a, 0x0e, 0xf4, 0x70, 0x06, 0xfb, 0x59, 0x0b, 0xce, 0xe4, 0x24, 0xb9,
- 0xa0, 0xcd, 0x3d, 0x60, 0x2e, 0xb2, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0xe3, 0x2c, 0x16, 0x50, 0xf4,
- 0x65, 0x80, 0x4e, 0x9c, 0x1a, 0xb8, 0x47, 0x36, 0x00, 0x23, 0x2e, 0xb8, 0x16, 0xe2, 0x59, 0x65,
- 0x10, 0xd6, 0x68, 0xd9, 0x3f, 0x33, 0x00, 0x83, 0xcc, 0x07, 0x0f, 0xd5, 0x60, 0x78, 0x9b, 0x47,
- 0x20, 0x2d, 0x9c, 0x37, 0x8a, 0x2b, 0x43, 0x9a, 0xc6, 0xf3, 0xa6, 0x95, 0x62, 0x49, 0x06, 0xad,
- 0xc1, 0x09, 0x9e, 0xf6, 0xb8, 0xb5, 0x44, 0x5a, 0xce, 0x9e, 0xd4, 0xb9, 0x97, 0xd8, 0xa7, 0x2a,
- 0xdb, 0xc3, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc2, 0x64, 0xe4, 0xb6, 0x89, 0xdf, 0x8d,
- 0x4c, 0x77, 0x53, 0x75, 0x2d, 0xdc, 0x30, 0xa0, 0x38, 0x81, 0x8d, 0xde, 0x86, 0x89, 0x4e, 0xca,
- 0xba, 0x30, 0x18, 0xab, 0xe1, 0x4c, 0x8b, 0x82, 0x89, 0xcb, 0xde, 0x7d, 0x75, 0xd9, 0x2b, 0xb7,
- 0x8d, 0xed, 0x80, 0x84, 0xdb, 0x7e, 0xab, 0xc9, 0x24, 0xf3, 0x41, 0xed, 0xdd, 0x57, 0x02, 0x8e,
- 0x53, 0x35, 0x28, 0x95, 0x4d, 0xc7, 0x6d, 0x75, 0x03, 0x12, 0x53, 0x19, 0x32, 0xa9, 0xac, 0x24,
- 0xe0, 0x38, 0x55, 0xa3, 0xb7, 0xd9, 0x64, 0xf8, 0xc9, 0x98, 0x4d, 0xec, 0xbf, 0x5b, 0x02, 0x63,
- 0x6a, 0x7f, 0x88, 0xb3, 0x18, 0xbf, 0x03, 0x03, 0x5b, 0x41, 0xa7, 0x21, 0xfc, 0x4d, 0x33, 0xbf,
- 0xec, 0x3a, 0xae, 0x2d, 0xea, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0x53, 0xc2, 0xfb,
- 0x5a, 0x06, 0x29, 0x56, 0xcf, 0x2b, 0x87, 0xa5, 0x26, 0xa2, 0x20, 0x9c, 0xbf, 0x78, 0x23, 0xa6,
- 0xfc, 0xb7, 0x35, 0x53, 0xb8, 0xd0, 0x43, 0x48, 0x2a, 0xe8, 0x2a, 0x8c, 0x89, 0xc4, 0xb2, 0xec,
- 0x15, 0x20, 0xdf, 0x4c, 0xcc, 0x95, 0x74, 0x29, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0x97, 0x4b, 0x70,
- 0x22, 0xe3, 0x19, 0x37, 0x3f, 0x46, 0xb6, 0xdc, 0x30, 0x0a, 0xf6, 0x92, 0x87, 0x13, 0x16, 0xe5,
- 0x58, 0x61, 0x50, 0x5e, 0xc5, 0x0f, 0xaa, 0xe4, 0xe1, 0x24, 0x9e, 0x49, 0x0a, 0xe8, 0xd1, 0x0e,
- 0x27, 0x7a, 0x6c, 0x77, 0x43, 0x22, 0x33, 0x87, 0xa8, 0x63, 0x9b, 0xb9, 0x64, 0x30, 0x08, 0xbd,
- 0x9a, 0x6e, 0x29, 0x3f, 0x03, 0xed, 0x6a, 0xca, 0x3d, 0x0d, 0x38, 0x8c, 0x76, 0x2e, 0x22, 0x9e,
- 0xe3, 0x45, 0xe2, 0x02, 0x1b, 0x47, 0x94, 0x67, 0xa5, 0x58, 0x40, 0xed, 0xef, 0x95, 0xe1, 0x6c,
- 0x6e, 0x60, 0x07, 0xda, 0xf5, 0xb6, 0xef, 0xb9, 0x91, 0xaf, 0x7c, 0x74, 0x79, 0x14, 0x79, 0xd2,
- 0xd9, 0x5e, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0x2c, 0x12, 0xc9, 0xa4, 0x92, 0x78,
- 0x61, 0x89, 0xc7, 0xd8, 0xe5, 0x60, 0xed, 0x54, 0x2f, 0x17, 0x9e, 0xea, 0xcf, 0x51, 0x09, 0xc6,
- 0x6f, 0x25, 0x0f, 0x14, 0xda, 0x5d, 0xdf, 0x6f, 0x61, 0x06, 0x44, 0x2f, 0x88, 0xf1, 0x4a, 0x38,
- 0xa5, 0x62, 0xa7, 0xe9, 0x87, 0xda, 0xa0, 0x71, 0x07, 0xf8, 0xc0, 0xf5, 0xb6, 0x92, 0xce, 0xca,
- 0x37, 0x79, 0x31, 0x96, 0x70, 0xba, 0x97, 0xe2, 0xdc, 0xf8, 0xc3, 0xf9, 0x7b, 0x49, 0x65, 0xc0,
- 0xef, 0x99, 0x16, 0x5f, 0x5f, 0x01, 0x23, 0x3d, 0xc5, 0x93, 0x9f, 0x2a, 0xc3, 0x14, 0x5e, 0x58,
- 0xfa, 0x74, 0x22, 0xee, 0xa4, 0x27, 0xa2, 0x7f, 0xb3, 0xd9, 0x93, 0x9a, 0x8d, 0x7f, 0x68, 0xc1,
- 0x14, 0x4b, 0x6f, 0x2b, 0xa2, 0x32, 0xb9, 0xbe, 0x77, 0x0c, 0x57, 0x81, 0xe7, 0x60, 0x30, 0xa0,
- 0x8d, 0x8a, 0x19, 0x54, 0x7b, 0x9c, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x07, 0x03, 0xac, 0x0b, 0x74,
- 0xf2, 0xc6, 0x39, 0x0b, 0x5e, 0x72, 0x22, 0x07, 0xb3, 0x52, 0x16, 0x1f, 0x16, 0x93, 0x4e, 0xcb,
- 0xe5, 0x9d, 0x8e, 0xfd, 0x45, 0x3e, 0x19, 0x21, 0x9f, 0x32, 0xbb, 0xf6, 0xf1, 0xe2, 0xc3, 0x66,
- 0x93, 0x2c, 0xbe, 0x66, 0xff, 0x71, 0x09, 0x2e, 0x64, 0xd6, 0xeb, 0x3b, 0x3e, 0x6c, 0x71, 0xed,
- 0xa7, 0x99, 0x0c, 0xb3, 0x7c, 0x8c, 0x4f, 0x41, 0x06, 0xfa, 0x95, 0xfe, 0x07, 0xfb, 0x08, 0xdb,
- 0x9a, 0x39, 0x64, 0x9f, 0x90, 0xb0, 0xad, 0x99, 0x7d, 0xcb, 0x51, 0x13, 0xfc, 0x69, 0x29, 0xe7,
- 0x5b, 0x98, 0xc2, 0xe0, 0x32, 0xe5, 0x33, 0x0c, 0x18, 0xca, 0x4b, 0x38, 0xe7, 0x31, 0xbc, 0x0c,
- 0x2b, 0x28, 0x9a, 0x87, 0xa9, 0xb6, 0xeb, 0x51, 0xe6, 0xb3, 0x67, 0x8a, 0xe2, 0xca, 0x90, 0xb4,
- 0x66, 0x82, 0x71, 0x12, 0x1f, 0xb9, 0x5a, 0x48, 0x57, 0xfe, 0x75, 0x6f, 0x1f, 0x69, 0xd7, 0xcd,
- 0x99, 0xbe, 0x34, 0x6a, 0x14, 0x33, 0xc2, 0xbb, 0xae, 0x69, 0x7a, 0xa2, 0x72, 0xff, 0x7a, 0xa2,
- 0xf1, 0x6c, 0x1d, 0xd1, 0xec, 0xdb, 0x30, 0xf1, 0xd8, 0xf6, 0x1f, 0xfb, 0xfb, 0x65, 0x78, 0xa6,
- 0x60, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, 0xc6, 0xeb, 0x53, 0xf3, 0x50, 0x83, 0x93, 0x9b, 0xdd,
- 0x56, 0x6b, 0x8f, 0x3d, 0x6c, 0x25, 0x4d, 0x89, 0x21, 0x64, 0x4a, 0xf5, 0xf4, 0x6d, 0x25, 0x03,
- 0x07, 0x67, 0xd6, 0xa4, 0x57, 0x2c, 0x7a, 0x92, 0xec, 0x29, 0x52, 0x89, 0x2b, 0x16, 0xd6, 0x81,
- 0xd8, 0xc4, 0x45, 0xd7, 0x61, 0xc6, 0xd9, 0x75, 0x5c, 0x9e, 0x4c, 0x48, 0x12, 0xe0, 0x77, 0x2c,
- 0xa5, 0x23, 0x9f, 0x4f, 0x22, 0xe0, 0x74, 0x9d, 0x1c, 0x53, 0x55, 0xf9, 0xb1, 0x4c, 0x55, 0x66,
- 0x70, 0xd1, 0xa1, 0xfc, 0xe0, 0xa2, 0xc5, 0x7c, 0xb1, 0x67, 0x1e, 0xd6, 0x0f, 0x61, 0xe2, 0xa8,
- 0x3e, 0xf1, 0x2f, 0xc1, 0xb0, 0x78, 0xc3, 0x93, 0x7c, 0xaf, 0x29, 0xf3, 0xff, 0x4b, 0xb8, 0xfd,
- 0xbf, 0x5a, 0xa0, 0x74, 0xdc, 0x66, 0x1e, 0x81, 0xb7, 0x99, 0x83, 0x3f, 0xd7, 0xce, 0x6b, 0x6f,
- 0x45, 0x4f, 0x69, 0x0e, 0xfe, 0x31, 0x10, 0x9b, 0xb8, 0x7c, 0xb9, 0x85, 0x71, 0xc4, 0x1a, 0xe3,
- 0x02, 0x21, 0x6c, 0xab, 0x0a, 0x03, 0x7d, 0x05, 0x86, 0x9b, 0xee, 0xae, 0x1b, 0x0a, 0x3d, 0xda,
- 0x91, 0x6d, 0x93, 0xf1, 0xf7, 0x2d, 0x71, 0x32, 0x58, 0xd2, 0xb3, 0xff, 0x8a, 0x05, 0xca, 0x28,
- 0x7c, 0x83, 0x38, 0xad, 0x68, 0x1b, 0xbd, 0x07, 0x20, 0x29, 0x28, 0xdd, 0x9b, 0x74, 0x55, 0x03,
- 0xac, 0x20, 0x87, 0xc6, 0x3f, 0xac, 0xd5, 0x41, 0xef, 0xc2, 0xd0, 0x36, 0xa3, 0x25, 0xbe, 0xed,
- 0x92, 0x32, 0xc1, 0xb1, 0xd2, 0xc3, 0xfd, 0xea, 0x49, 0xb3, 0x4d, 0x79, 0x8a, 0xf1, 0x5a, 0xf6,
- 0x4f, 0x95, 0xe2, 0x39, 0xfd, 0xa0, 0xeb, 0x47, 0xce, 0x31, 0x48, 0x22, 0xd7, 0x0d, 0x49, 0xe4,
- 0x85, 0x22, 0xab, 0x37, 0xeb, 0x52, 0xae, 0x04, 0x72, 0x3b, 0x21, 0x81, 0xbc, 0xd8, 0x9b, 0x54,
- 0xb1, 0xe4, 0xf1, 0x5f, 0x5b, 0x30, 0x63, 0xe0, 0x1f, 0xc3, 0x01, 0xb8, 0x62, 0x1e, 0x80, 0xcf,
- 0xf6, 0xfc, 0x86, 0x9c, 0x83, 0xef, 0x27, 0xca, 0x89, 0xbe, 0xb3, 0x03, 0xef, 0x23, 0x18, 0xd8,
- 0x76, 0x82, 0xa6, 0xb8, 0xd7, 0x5f, 0xe9, 0x6b, 0xac, 0xe7, 0x6e, 0x38, 0x81, 0x70, 0x73, 0x79,
- 0x45, 0x8e, 0x3a, 0x2d, 0xea, 0xe9, 0xe2, 0xc2, 0x9a, 0x42, 0xd7, 0x60, 0x28, 0x6c, 0xf8, 0x1d,
- 0xf5, 0x24, 0xf4, 0x22, 0x1b, 0x68, 0x56, 0x72, 0xb8, 0x5f, 0x45, 0x66, 0x73, 0xb4, 0x18, 0x0b,
- 0x7c, 0xf4, 0x55, 0x98, 0x60, 0xbf, 0x94, 0xcf, 0x69, 0x39, 0x5f, 0x03, 0x53, 0xd7, 0x11, 0xb9,
- 0x43, 0xb6, 0x51, 0x84, 0x4d, 0x52, 0xb3, 0x5b, 0x30, 0xaa, 0x3e, 0xeb, 0xa9, 0x7a, 0x24, 0xfc,
- 0x8b, 0x32, 0x9c, 0xc8, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x63, 0xce,
- 0x45, 0xc8, 0x2e, 0x80, 0x4d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x4e, 0x48, 0x92, 0x8d, 0xd2, 0xa2,
- 0xde, 0x8d, 0xd2, 0xc6, 0x8e, 0x6d, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x4f, 0x75, 0x4e, 0x7f, 0x6b,
- 0x00, 0x4e, 0x66, 0x39, 0xe2, 0xa0, 0x1f, 0x85, 0x21, 0xf6, 0x9c, 0xaf, 0xf0, 0xfd, 0x6b, 0x56,
- 0xcd, 0x39, 0xf6, 0x22, 0x50, 0x84, 0xa2, 0x9e, 0x93, 0xec, 0x88, 0x17, 0xf6, 0x1c, 0x66, 0xd1,
- 0x26, 0x0b, 0x11, 0x27, 0x4e, 0x4f, 0xc9, 0x3e, 0x3e, 0xdf, 0x77, 0x07, 0xc4, 0xf9, 0x1b, 0x26,
- 0xfc, 0xd9, 0x64, 0x71, 0x6f, 0x7f, 0x36, 0xd9, 0x32, 0x5a, 0x85, 0xa1, 0x06, 0x77, 0x94, 0x2a,
- 0xf7, 0x66, 0x61, 0xdc, 0x4b, 0x4a, 0x31, 0x60, 0xe1, 0x1d, 0x25, 0x08, 0xcc, 0xba, 0x30, 0xa6,
- 0x0d, 0xcc, 0x53, 0x5d, 0x3c, 0x3b, 0xf4, 0xe0, 0xd3, 0x86, 0xe0, 0xa9, 0x2e, 0xa0, 0xbf, 0xae,
- 0x9d, 0xfd, 0x82, 0x1f, 0x7c, 0xce, 0x90, 0x9d, 0xce, 0x25, 0x1e, 0x59, 0x26, 0xf6, 0x15, 0x93,
- 0xa5, 0xea, 0x66, 0x0e, 0x87, 0xdc, 0x44, 0x74, 0xe6, 0x81, 0x5f, 0x9c, 0xb7, 0xc1, 0xfe, 0x59,
- 0x0b, 0x12, 0xcf, 0xe0, 0x94, 0xba, 0xd3, 0xca, 0x55, 0x77, 0x5e, 0x84, 0x81, 0xc0, 0x6f, 0x49,
- 0x79, 0x4a, 0x61, 0x60, 0xbf, 0x45, 0x30, 0x83, 0x50, 0x8c, 0x28, 0x56, 0x62, 0x8d, 0xeb, 0x17,
- 0x74, 0x71, 0xf5, 0x7e, 0x0e, 0x06, 0x5b, 0x64, 0x97, 0xb4, 0x92, 0xf9, 0x98, 0x6f, 0xd1, 0x42,
- 0xcc, 0x61, 0xf6, 0x3f, 0x1c, 0x80, 0xf3, 0x85, 0x91, 0x24, 0xa9, 0x80, 0xb9, 0xe5, 0x44, 0xe4,
- 0x81, 0xb3, 0x97, 0xcc, 0x43, 0x7a, 0x9d, 0x17, 0x63, 0x09, 0x67, 0xef, 0xee, 0x79, 0x6e, 0xad,
- 0x84, 0x72, 0x58, 0xa4, 0xd4, 0x12, 0x50, 0x53, 0xd9, 0x58, 0x7e, 0x12, 0xca, 0xc6, 0xd7, 0x00,
- 0xc2, 0xb0, 0xc5, 0xbd, 0x5d, 0x9b, 0xe2, 0x41, 0x7f, 0x1c, 0xe9, 0xa4, 0x7e, 0x4b, 0x40, 0xb0,
- 0x86, 0x85, 0x96, 0x60, 0xba, 0x13, 0xf8, 0x11, 0xd7, 0xb5, 0x2f, 0x71, 0x87, 0xf0, 0x41, 0x33,
- 0x88, 0x5f, 0x2d, 0x01, 0xc7, 0xa9, 0x1a, 0xe8, 0x4d, 0x18, 0x13, 0x81, 0xfd, 0x6a, 0xbe, 0xdf,
- 0x12, 0xea, 0x3d, 0xe5, 0x23, 0x5d, 0x8f, 0x41, 0x58, 0xc7, 0xd3, 0xaa, 0x31, 0x05, 0xfe, 0x70,
- 0x66, 0x35, 0xae, 0xc4, 0xd7, 0xf0, 0x12, 0x49, 0x40, 0x46, 0xfa, 0x4a, 0x02, 0x12, 0x2b, 0x3c,
- 0x47, 0xfb, 0xb6, 0x27, 0x43, 0x4f, 0x15, 0xe1, 0xaf, 0x0c, 0xc0, 0x09, 0xb1, 0x70, 0x9e, 0xf6,
- 0x72, 0xb9, 0x93, 0x5e, 0x2e, 0x4f, 0x42, 0x25, 0xfa, 0xe9, 0x9a, 0x39, 0xee, 0x35, 0xf3, 0xd3,
- 0x16, 0x98, 0x32, 0x24, 0xfa, 0x8f, 0x72, 0x13, 0x39, 0xbf, 0x99, 0x2b, 0x93, 0xc6, 0x19, 0x02,
- 0x3e, 0x5e, 0x4a, 0x67, 0xfb, 0x7f, 0xb6, 0xe0, 0xd9, 0x9e, 0x14, 0xd1, 0x32, 0x8c, 0x32, 0x41,
- 0x57, 0xbb, 0x17, 0xbf, 0xa8, 0x1e, 0x8c, 0x48, 0x40, 0x8e, 0xdc, 0x1d, 0xd7, 0x44, 0xcb, 0xa9,
- 0x8c, 0xd9, 0x2f, 0x65, 0x64, 0xcc, 0x3e, 0x65, 0x0c, 0xcf, 0x63, 0xa6, 0xcc, 0xfe, 0x49, 0x7a,
- 0xe2, 0x98, 0xaf, 0x4e, 0x3f, 0x6f, 0xa8, 0x73, 0xed, 0x84, 0x3a, 0x17, 0x99, 0xd8, 0xda, 0x19,
- 0xf2, 0x1e, 0x4c, 0xb3, 0x88, 0xbf, 0xec, 0xf9, 0x92, 0x78, 0xae, 0x5a, 0x8a, 0xbd, 0x9d, 0x6f,
- 0x25, 0x60, 0x38, 0x85, 0x6d, 0xff, 0x9b, 0x32, 0x0c, 0xf1, 0xed, 0x77, 0x0c, 0x17, 0xdf, 0x97,
- 0x61, 0xd4, 0x6d, 0xb7, 0xbb, 0x3c, 0x09, 0xf2, 0x60, 0xec, 0xf0, 0xbe, 0x2a, 0x0b, 0x71, 0x0c,
- 0x47, 0x2b, 0xc2, 0x92, 0x50, 0x90, 0x54, 0x80, 0x77, 0x7c, 0x6e, 0xc9, 0x89, 0x1c, 0x2e, 0xc5,
- 0xa9, 0x73, 0x36, 0xb6, 0x39, 0xa0, 0x6f, 0x00, 0x84, 0x51, 0xe0, 0x7a, 0x5b, 0xb4, 0x4c, 0x64,
- 0x9e, 0xf9, 0x6c, 0x01, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x63, 0x9e, 0xa3, 0x00, 0x58, 0xa3, 0x88,
- 0xe6, 0x8c, 0x93, 0x7e, 0x36, 0x31, 0x77, 0xc0, 0xa9, 0xc6, 0x73, 0x36, 0xfb, 0x05, 0x18, 0x55,
- 0xc4, 0x7b, 0xe9, 0x15, 0xc7, 0x75, 0x81, 0xed, 0x4b, 0x30, 0x95, 0xe8, 0xdb, 0x91, 0xd4, 0x92,
- 0xbf, 0x6e, 0xc1, 0x14, 0xef, 0xcc, 0xb2, 0xb7, 0x2b, 0x4e, 0x83, 0x47, 0x70, 0xb2, 0x95, 0xc1,
- 0x95, 0xc5, 0xf4, 0xf7, 0xcf, 0xc5, 0x95, 0x1a, 0x32, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9,
- 0x8e, 0xa3, 0x5c, 0xd7, 0x69, 0x89, 0x98, 0x2b, 0xe3, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6,
- 0x1f, 0x58, 0x30, 0xc3, 0x7b, 0x7e, 0x93, 0xec, 0x29, 0xde, 0xf4, 0x83, 0xec, 0xbb, 0x48, 0xbf,
- 0x5f, 0xca, 0x49, 0xbf, 0xaf, 0x7f, 0x5a, 0xb9, 0xf0, 0xd3, 0x7e, 0xc9, 0x02, 0xb1, 0x42, 0x8e,
- 0x41, 0xd3, 0xf2, 0x23, 0xa6, 0xa6, 0x65, 0x36, 0x7f, 0x13, 0xe4, 0xa8, 0x58, 0xfe, 0xbd, 0x05,
- 0xd3, 0x1c, 0x41, 0x8b, 0x62, 0xf7, 0x83, 0x9c, 0x87, 0x05, 0xf3, 0x8b, 0x32, 0xdd, 0x5a, 0x6f,
- 0x92, 0xbd, 0x0d, 0xbf, 0xe6, 0x44, 0xdb, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xe1, 0x64, 0x35,
- 0xe5, 0x06, 0x32, 0x12, 0xad, 0xf6, 0x50, 0x00, 0x1f, 0x35, 0xd1, 0xaa, 0xfd, 0x47, 0x16, 0x20,
- 0xde, 0x8c, 0x21, 0xb8, 0x51, 0x71, 0x88, 0x95, 0x66, 0x06, 0x0b, 0x54, 0x10, 0xac, 0x61, 0x3d,
- 0x91, 0xe1, 0x49, 0xb8, 0xb2, 0x94, 0x7b, 0xbb, 0xb2, 0x1c, 0x61, 0x44, 0x7f, 0x69, 0x18, 0x92,
- 0x0f, 0x56, 0xd1, 0x5d, 0x18, 0x6f, 0x38, 0x1d, 0xe7, 0xbe, 0xdb, 0x72, 0x23, 0x97, 0x84, 0x45,
- 0x7e, 0x6e, 0x8b, 0x1a, 0x9e, 0x70, 0x3e, 0xd0, 0x4a, 0xb0, 0x41, 0x07, 0xcd, 0x01, 0x74, 0x02,
- 0x77, 0xd7, 0x6d, 0x91, 0x2d, 0xa6, 0x10, 0x62, 0x51, 0x9e, 0xb8, 0xd3, 0x9d, 0x2c, 0xc5, 0x1a,
- 0x46, 0x46, 0x70, 0x95, 0xf2, 0x53, 0x0e, 0xae, 0x02, 0xc7, 0x16, 0x5c, 0x65, 0xe0, 0x48, 0xc1,
- 0x55, 0x46, 0x8e, 0x1c, 0x5c, 0x65, 0xb0, 0xaf, 0xe0, 0x2a, 0x18, 0x4e, 0x4b, 0xd9, 0x93, 0xfe,
- 0x5f, 0x71, 0x5b, 0x44, 0x5c, 0x38, 0x78, 0x68, 0xaa, 0xd9, 0x83, 0xfd, 0xea, 0x69, 0x9c, 0x89,
- 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x43, 0xc5, 0x69, 0xb5, 0xfc, 0x07, 0x6a, 0x52, 0x97, 0xc3, 0x86,
- 0xd3, 0x8a, 0xc3, 0x32, 0x8e, 0x2c, 0x9c, 0x3b, 0xd8, 0xaf, 0x56, 0xe6, 0x73, 0x70, 0x70, 0x6e,
- 0x6d, 0xf4, 0x0e, 0x8c, 0x76, 0x02, 0xbf, 0xb1, 0xa6, 0xbd, 0xaa, 0xbf, 0x40, 0x07, 0xb0, 0x26,
- 0x0b, 0x0f, 0xf7, 0xab, 0x13, 0xea, 0x0f, 0x3b, 0xf0, 0xe3, 0x0a, 0x19, 0x71, 0x4b, 0xc6, 0x9e,
- 0x76, 0xdc, 0x92, 0xf1, 0x27, 0x1c, 0xb7, 0xc4, 0xde, 0x81, 0x13, 0x75, 0x12, 0xb8, 0x4e, 0xcb,
- 0x7d, 0x44, 0x65, 0x72, 0xc9, 0x03, 0x37, 0x60, 0x34, 0x48, 0x70, 0xfd, 0xbe, 0x92, 0x09, 0x68,
- 0x7a, 0x19, 0xc9, 0xe5, 0x63, 0x42, 0xf6, 0xff, 0x6b, 0xc1, 0xb0, 0x78, 0x04, 0x7b, 0x0c, 0x92,
- 0xe9, 0xbc, 0x61, 0x92, 0xa9, 0x66, 0x4f, 0x0a, 0xeb, 0x4c, 0xae, 0x31, 0x66, 0x35, 0x61, 0x8c,
- 0x79, 0xb6, 0x88, 0x48, 0xb1, 0x19, 0xe6, 0x3f, 0x2b, 0xd3, 0x1b, 0x82, 0x11, 0x8e, 0xe1, 0xe9,
- 0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0x22, 0x1c, 0x40, 0x29, 0xff, 0x8d, 0x51, 0x72, 0x12, 0x63, 0x1f,
- 0x48, 0x11, 0x00, 0x40, 0x12, 0xc9, 0x8c, 0x33, 0x50, 0x7e, 0x8a, 0x71, 0x06, 0x7a, 0x05, 0xac,
- 0x18, 0x78, 0x12, 0x01, 0x2b, 0xec, 0xdf, 0x60, 0xa7, 0xb3, 0x5e, 0x7e, 0x0c, 0x82, 0xdb, 0x75,
- 0xf3, 0x1c, 0xb7, 0x0b, 0x56, 0x96, 0xe8, 0x54, 0x8e, 0x00, 0xf7, 0x6b, 0x16, 0x9c, 0xcf, 0xf8,
- 0x2a, 0x4d, 0x9a, 0x7b, 0x05, 0x46, 0x9c, 0x6e, 0xd3, 0x55, 0x7b, 0x59, 0xb3, 0x16, 0xcf, 0x8b,
- 0x72, 0xac, 0x30, 0xd0, 0x22, 0xcc, 0x90, 0x54, 0x7c, 0x61, 0x1e, 0xb9, 0x8b, 0xbd, 0x9c, 0x4e,
- 0x07, 0x17, 0x4e, 0xe3, 0xab, 0xa0, 0x77, 0xe5, 0xdc, 0xa0, 0x77, 0x7f, 0xcf, 0x82, 0x31, 0xf5,
- 0x20, 0xfe, 0xa9, 0x8f, 0xf6, 0x7b, 0xe6, 0x68, 0x3f, 0x53, 0x30, 0xda, 0x39, 0xc3, 0xfc, 0x7b,
- 0x25, 0xd5, 0xdf, 0x9a, 0x1f, 0x44, 0x7d, 0x48, 0x89, 0x8f, 0xff, 0xec, 0xe5, 0x2a, 0x8c, 0x39,
- 0x9d, 0x8e, 0x04, 0x48, 0xff, 0x45, 0x96, 0x1a, 0x26, 0x2e, 0xc6, 0x3a, 0x8e, 0x7a, 0x85, 0x53,
- 0xce, 0x7d, 0x85, 0xd3, 0x04, 0x88, 0x9c, 0x60, 0x8b, 0x44, 0xb4, 0x4c, 0xb8, 0x5b, 0xe7, 0xf3,
- 0x9b, 0x6e, 0xe4, 0xb6, 0xe6, 0x5c, 0x2f, 0x0a, 0xa3, 0x60, 0x6e, 0xd5, 0x8b, 0x6e, 0x07, 0xfc,
- 0x9a, 0xaa, 0x85, 0x96, 0x54, 0xb4, 0xb0, 0x46, 0x57, 0x06, 0x7f, 0x61, 0x6d, 0x0c, 0x9a, 0x8e,
- 0x30, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0b, 0xec, 0xf4, 0x61, 0x63, 0x7a, 0xb4, 0x90, 0x89,
- 0x7f, 0x3c, 0xae, 0x66, 0x83, 0x99, 0x84, 0x97, 0xf4, 0xc0, 0x8c, 0xc5, 0xcc, 0x9e, 0x36, 0xac,
- 0xbf, 0xb3, 0x8d, 0xa3, 0x37, 0xa2, 0xaf, 0xa5, 0x9c, 0x9b, 0x5e, 0xed, 0x71, 0x6a, 0x1c, 0xc1,
- 0x9d, 0x89, 0xe5, 0x89, 0x64, 0x59, 0xf4, 0x56, 0x6b, 0x62, 0x5f, 0x68, 0x79, 0x22, 0x05, 0x00,
- 0xc7, 0x38, 0x54, 0x60, 0x53, 0x7f, 0xc2, 0x0a, 0x8a, 0xd3, 0x09, 0x28, 0xec, 0x10, 0x6b, 0x18,
- 0xe8, 0x8a, 0x50, 0x5a, 0x70, 0xdb, 0xc3, 0x33, 0x09, 0xa5, 0x85, 0x1c, 0x2e, 0x4d, 0xd3, 0x74,
- 0x15, 0xc6, 0xc8, 0xc3, 0x88, 0x04, 0x9e, 0xd3, 0xa2, 0x2d, 0x0c, 0xc6, 0xc1, 0x91, 0x97, 0xe3,
- 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0a, 0xb9, 0x2e, 0x4f, 0x25, 0xb1, 0xe1, 0x3a, 0xd1, 0xcf,
- 0xaa, 0x50, 0x04, 0x26, 0xf8, 0x90, 0x15, 0x71, 0xee, 0x24, 0x03, 0xb4, 0x24, 0x49, 0xa0, 0x77,
- 0x61, 0xb2, 0xe5, 0x3b, 0xcd, 0x05, 0xa7, 0xe5, 0x78, 0x0d, 0x36, 0x3e, 0x23, 0x46, 0x94, 0xce,
- 0xc9, 0x5b, 0x06, 0x14, 0x27, 0xb0, 0xa9, 0x80, 0xa8, 0x97, 0x88, 0xc4, 0x4b, 0x8e, 0xb7, 0x45,
- 0xc2, 0xca, 0x28, 0xfb, 0x2a, 0x26, 0x20, 0xde, 0xca, 0xc1, 0xc1, 0xb9, 0xb5, 0xd1, 0x35, 0x18,
- 0x97, 0x9f, 0xaf, 0xc5, 0x33, 0x8a, 0x1f, 0x34, 0x69, 0x30, 0x6c, 0x60, 0xa2, 0x10, 0x4e, 0xc9,
- 0xff, 0x1b, 0x81, 0xb3, 0xb9, 0xe9, 0x36, 0x44, 0x90, 0x0f, 0xfe, 0x28, 0xfd, 0x4b, 0xf2, 0x05,
- 0xec, 0x72, 0x16, 0xd2, 0xe1, 0x7e, 0xf5, 0x9c, 0x18, 0xb5, 0x4c, 0x38, 0xce, 0xa6, 0x8d, 0xd6,
- 0xe0, 0x04, 0xf7, 0x81, 0x59, 0xdc, 0x26, 0x8d, 0x1d, 0xb9, 0xe1, 0x98, 0xd4, 0xa8, 0x3d, 0xfc,
- 0xb9, 0x91, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x87, 0x50, 0xe9, 0x74, 0xef, 0xb7, 0xdc, 0x70, 0x7b,
- 0xdd, 0x8f, 0x98, 0x0b, 0xd9, 0x7c, 0xb3, 0x19, 0x90, 0x90, 0xbf, 0x59, 0x66, 0x47, 0xaf, 0x8c,
- 0x41, 0x55, 0xcb, 0xc1, 0xc3, 0xb9, 0x14, 0xd0, 0x23, 0x38, 0x95, 0x58, 0x08, 0x22, 0x98, 0xcc,
- 0x64, 0x7e, 0x0a, 0xbb, 0x7a, 0x56, 0x05, 0x11, 0x97, 0x29, 0x0b, 0x84, 0xb3, 0x9b, 0x40, 0x6f,
- 0x01, 0xb8, 0x9d, 0x15, 0xa7, 0xed, 0xb6, 0xe8, 0x75, 0xf4, 0x04, 0x5b, 0x23, 0xf4, 0x6a, 0x02,
- 0xab, 0x35, 0x59, 0x4a, 0x79, 0xb3, 0xf8, 0xb7, 0x87, 0x35, 0x6c, 0x74, 0x0b, 0x26, 0xc5, 0xbf,
- 0x3d, 0x31, 0xa5, 0x33, 0x2a, 0xdb, 0xf1, 0xa4, 0xac, 0xa1, 0xe6, 0x31, 0x51, 0x82, 0x13, 0x75,
- 0xd1, 0x16, 0x9c, 0x97, 0xa9, 0x96, 0xf5, 0xf5, 0x29, 0xe7, 0x20, 0x64, 0x79, 0xe3, 0x46, 0xf8,
- 0x9b, 0xa2, 0xf9, 0x22, 0x44, 0x5c, 0x4c, 0x87, 0x9e, 0xeb, 0xfa, 0x32, 0xe7, 0x2f, 0xd9, 0x4f,
- 0xc5, 0xb1, 0x4e, 0x6f, 0x25, 0x81, 0x38, 0x8d, 0x8f, 0x7c, 0x38, 0xe5, 0x7a, 0x59, 0xab, 0xfa,
- 0x34, 0x23, 0xf4, 0x45, 0xfe, 0x88, 0xbf, 0x78, 0x45, 0x67, 0xc2, 0x71, 0x36, 0x5d, 0xb4, 0x0a,
- 0x27, 0x22, 0x5e, 0xb0, 0xe4, 0x86, 0x3c, 0x2d, 0x15, 0xbd, 0xf6, 0x9d, 0x61, 0xcd, 0x9d, 0xa1,
- 0xab, 0x79, 0x23, 0x0d, 0xc6, 0x59, 0x75, 0x3e, 0x9e, 0x03, 0xe8, 0xef, 0x5b, 0xb4, 0xb6, 0x26,
- 0xe8, 0xa3, 0x6f, 0xc2, 0xb8, 0x3e, 0x3e, 0x42, 0x68, 0xb9, 0x94, 0x2d, 0x07, 0x6b, 0xec, 0x85,
- 0x5f, 0x13, 0x14, 0x0b, 0xd1, 0x61, 0xd8, 0xa0, 0x88, 0x1a, 0x19, 0xc1, 0x37, 0xae, 0xf4, 0x27,
- 0x14, 0xf5, 0xef, 0xff, 0x48, 0x20, 0x7b, 0xe7, 0xa0, 0x5b, 0x30, 0xd2, 0x68, 0xb9, 0xc4, 0x8b,
- 0x56, 0x6b, 0x45, 0x21, 0x68, 0x17, 0x05, 0x8e, 0xd8, 0x8a, 0x22, 0x9b, 0x1c, 0x2f, 0xc3, 0x8a,
- 0x82, 0x7d, 0x0d, 0xc6, 0xea, 0x2d, 0x42, 0x3a, 0xfc, 0x1d, 0x17, 0x7a, 0x89, 0x5d, 0x4c, 0x98,
- 0x68, 0x69, 0x31, 0xd1, 0x52, 0xbf, 0x73, 0x30, 0xa1, 0x52, 0xc2, 0xed, 0xdf, 0x2e, 0x41, 0xb5,
- 0x47, 0x52, 0xc3, 0x84, 0xbd, 0xcd, 0xea, 0xcb, 0xde, 0x36, 0x0f, 0x53, 0xf1, 0x3f, 0x5d, 0x95,
- 0xa7, 0x9c, 0xa1, 0xef, 0x9a, 0x60, 0x9c, 0xc4, 0xef, 0xfb, 0x5d, 0x8b, 0x6e, 0xb2, 0x1b, 0xe8,
- 0xf9, 0x32, 0xcb, 0x30, 0xd5, 0x0f, 0xf6, 0x7f, 0xf7, 0xce, 0x35, 0xbb, 0xda, 0xbf, 0x51, 0x82,
- 0x53, 0x6a, 0x08, 0x7f, 0x78, 0x07, 0xee, 0x4e, 0x7a, 0xe0, 0x9e, 0x80, 0xd1, 0xda, 0xbe, 0x0d,
- 0x43, 0x3c, 0x2e, 0x6e, 0x1f, 0x32, 0xff, 0x73, 0x66, 0x1e, 0x06, 0x25, 0x66, 0x1a, 0xb9, 0x18,
- 0xfe, 0x92, 0x05, 0x53, 0x89, 0x07, 0x92, 0x08, 0x6b, 0xaf, 0xe8, 0x1f, 0x47, 0x2e, 0xcf, 0x92,
- 0xf8, 0x2f, 0xc2, 0xc0, 0xb6, 0xaf, 0x9c, 0x94, 0x15, 0xc6, 0x0d, 0x3f, 0x8c, 0x30, 0x83, 0xd8,
- 0xff, 0xd2, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0x69, 0xfd, 0xb0, 0x72, 0xac, 0x1f, 0xfd, 0x7c,
- 0x17, 0x7a, 0x13, 0x86, 0xc8, 0xe6, 0x26, 0x69, 0x44, 0x62, 0x56, 0x65, 0x94, 0x8f, 0xa1, 0x65,
- 0x56, 0x4a, 0x85, 0x50, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa3, 0x91, 0xdb, 0x26,
- 0xf3, 0xcd, 0xa6, 0xf0, 0x09, 0x78, 0x8c, 0xd0, 0x34, 0x1b, 0x92, 0x00, 0x8e, 0x69, 0xd9, 0xdf,
- 0x2b, 0x01, 0xc4, 0x71, 0xf8, 0x7a, 0x7d, 0xe2, 0x42, 0xca, 0x5a, 0x7c, 0x29, 0xc3, 0x5a, 0x8c,
- 0x62, 0x82, 0x19, 0xa6, 0x62, 0x35, 0x4c, 0xe5, 0xbe, 0x86, 0x69, 0xe0, 0x28, 0xc3, 0xb4, 0x08,
- 0x33, 0x71, 0x1c, 0x41, 0x33, 0x8c, 0x2a, 0x3b, 0xbf, 0x37, 0x92, 0x40, 0x9c, 0xc6, 0xb7, 0x09,
- 0x5c, 0x54, 0xe1, 0xd4, 0xc4, 0x59, 0xc8, 0x9e, 0x12, 0xe8, 0xd6, 0xf7, 0x1e, 0xe3, 0x14, 0x9b,
- 0xc3, 0x4b, 0xb9, 0xe6, 0xf0, 0xbf, 0x69, 0xc1, 0xc9, 0x64, 0x3b, 0xec, 0xdd, 0xfd, 0x77, 0x2d,
- 0x38, 0x15, 0xe7, 0xf4, 0x4a, 0xbb, 0x20, 0xbc, 0x51, 0x18, 0x22, 0x2e, 0xa7, 0xc7, 0x71, 0x38,
- 0x99, 0xb5, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xff, 0x19, 0x80, 0x4a, 0x5e, 0x6c, 0x39, 0xf6,
- 0xd2, 0xc8, 0x79, 0x58, 0xdf, 0x21, 0x0f, 0xc4, 0x7b, 0x8e, 0xf8, 0xa5, 0x11, 0x2f, 0xc6, 0x12,
- 0x9e, 0x4c, 0xe3, 0x56, 0xea, 0x33, 0x8d, 0xdb, 0x36, 0xcc, 0x3c, 0xd8, 0x26, 0xde, 0x1d, 0x2f,
- 0x74, 0x22, 0x37, 0xdc, 0x74, 0x99, 0x01, 0x9d, 0xaf, 0x9b, 0xb7, 0xe4, 0xab, 0x8b, 0x7b, 0x49,
- 0x84, 0xc3, 0xfd, 0xea, 0x79, 0xa3, 0x20, 0xee, 0x32, 0x67, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x05,
- 0x6f, 0xe0, 0x29, 0x67, 0xc1, 0x6b, 0xbb, 0xc2, 0xed, 0x46, 0x3e, 0x23, 0x61, 0xd7, 0xd6, 0x35,
- 0x55, 0x8a, 0x35, 0x0c, 0xf4, 0x75, 0x40, 0x7a, 0x1a, 0x53, 0x23, 0xb4, 0xef, 0xab, 0x07, 0xfb,
- 0x55, 0xb4, 0x9e, 0x82, 0x1e, 0xee, 0x57, 0x4f, 0xd0, 0xd2, 0x55, 0x8f, 0x5e, 0x7f, 0xe3, 0x78,
- 0x88, 0x19, 0x84, 0xd0, 0x3d, 0x98, 0xa6, 0xa5, 0x6c, 0x47, 0xc9, 0xb8, 0xc1, 0xfc, 0xca, 0xfa,
- 0xf2, 0xc1, 0x7e, 0x75, 0x7a, 0x3d, 0x01, 0xcb, 0x23, 0x9d, 0x22, 0x92, 0x91, 0x0c, 0x6f, 0xa4,
- 0xdf, 0x64, 0x78, 0xf6, 0x77, 0x2d, 0x38, 0x4b, 0x0f, 0xb8, 0xe6, 0xad, 0x1c, 0x2b, 0xba, 0xd3,
- 0x71, 0xb9, 0x9d, 0x46, 0x1c, 0x35, 0x4c, 0x57, 0x57, 0x5b, 0xe5, 0x56, 0x1a, 0x05, 0xa5, 0x1c,
- 0x7e, 0xc7, 0xf5, 0x9a, 0x49, 0x0e, 0x7f, 0xd3, 0xf5, 0x9a, 0x98, 0x41, 0xd4, 0x91, 0x55, 0xce,
- 0xcd, 0x43, 0xf0, 0x2b, 0x74, 0xaf, 0xd2, 0xbe, 0xfc, 0x40, 0xbb, 0x81, 0x5e, 0xd6, 0x6d, 0xaa,
- 0xc2, 0x7d, 0x32, 0xd7, 0x9e, 0xfa, 0x1d, 0x0b, 0xc4, 0xeb, 0xf7, 0x3e, 0xce, 0xe4, 0xaf, 0xc2,
- 0xf8, 0x6e, 0x3a, 0xc5, 0xf3, 0xc5, 0xfc, 0x70, 0x00, 0x22, 0xb1, 0xb3, 0x12, 0xd1, 0x8d, 0x74,
- 0xce, 0x06, 0x2d, 0xbb, 0x09, 0x02, 0xba, 0x44, 0x98, 0x55, 0xa3, 0x77, 0x6f, 0x5e, 0x03, 0x68,
- 0x32, 0x5c, 0x96, 0xec, 0xac, 0x64, 0x4a, 0x5c, 0x4b, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x17, 0xca,
- 0x30, 0x26, 0x53, 0x0a, 0x77, 0xbd, 0x7e, 0x74, 0x8f, 0xba, 0xe0, 0x54, 0xea, 0x29, 0x38, 0x7d,
- 0x08, 0x33, 0x01, 0x69, 0x74, 0x83, 0xd0, 0xdd, 0x25, 0x12, 0x2c, 0x36, 0xc9, 0x1c, 0x4f, 0x83,
- 0x91, 0x00, 0x1e, 0xb2, 0xd0, 0x5d, 0x89, 0x42, 0x66, 0x34, 0x4e, 0x13, 0x42, 0x57, 0x60, 0x94,
- 0xa9, 0xde, 0x6b, 0xb1, 0x42, 0x58, 0x29, 0xbe, 0xd6, 0x24, 0x00, 0xc7, 0x38, 0xec, 0x72, 0xd0,
- 0xbd, 0xaf, 0x65, 0xa2, 0x8b, 0x2f, 0x07, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x32, 0x4c, 0xf3, 0x7a,
- 0x81, 0xdf, 0x71, 0xb6, 0xb8, 0x49, 0x70, 0x50, 0x85, 0xd7, 0x99, 0x5e, 0x4b, 0xc0, 0x0e, 0xf7,
- 0xab, 0x27, 0x93, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf3, 0x8f, 0x37, 0x42, 0xcf, 0x8c, 0x94,
- 0xc3, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0x27, 0x16, 0xcc, 0x68, 0x53, 0xd5, 0x77, 0x26, 0x12,
- 0x63, 0x90, 0x4a, 0x7d, 0x0c, 0xd2, 0xd1, 0xa2, 0x3d, 0x64, 0xce, 0xf0, 0xc0, 0x13, 0x9a, 0x61,
- 0xfb, 0x9b, 0x80, 0xd2, 0xf9, 0xaa, 0xd1, 0xfb, 0xdc, 0x91, 0xdf, 0x0d, 0x48, 0xb3, 0xc8, 0xe0,
- 0xaf, 0x47, 0xce, 0x91, 0x2f, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x7f, 0x32, 0x00, 0xd3, 0xc9,
- 0x58, 0x1d, 0xe8, 0x06, 0x0c, 0x71, 0x29, 0x5d, 0x90, 0x2f, 0xf0, 0x27, 0xd3, 0x22, 0x7c, 0xf0,
- 0x2c, 0x41, 0x5c, 0xba, 0x17, 0xf5, 0xd1, 0x87, 0x30, 0xd6, 0xf4, 0x1f, 0x78, 0x0f, 0x9c, 0xa0,
- 0x39, 0x5f, 0x5b, 0x15, 0x1c, 0x22, 0x53, 0x01, 0xb5, 0x14, 0xa3, 0xe9, 0x51, 0x43, 0x98, 0xef,
- 0x44, 0x0c, 0xc2, 0x3a, 0x39, 0xb4, 0xc1, 0x12, 0x57, 0x6d, 0xba, 0x5b, 0x6b, 0x4e, 0xa7, 0xe8,
- 0x55, 0xd7, 0xa2, 0x44, 0xd2, 0x28, 0x4f, 0x88, 0xec, 0x56, 0x1c, 0x80, 0x63, 0x42, 0xe8, 0x47,
- 0xe1, 0x44, 0x98, 0x63, 0x12, 0xcb, 0x71, 0x38, 0x28, 0xb4, 0x12, 0x71, 0x65, 0x4a, 0x96, 0xf1,
- 0x2c, 0xab, 0x19, 0xf4, 0x10, 0x90, 0x50, 0x3d, 0x6f, 0x04, 0xdd, 0x30, 0xe2, 0x29, 0x20, 0xc5,
- 0xa5, 0xeb, 0x73, 0xd9, 0x7a, 0x82, 0x24, 0xb6, 0xd6, 0x36, 0x0b, 0x9c, 0x9c, 0xc6, 0xc0, 0x19,
- 0x6d, 0xa0, 0x6d, 0x98, 0xec, 0x18, 0xd9, 0x37, 0xd9, 0xde, 0xcc, 0x89, 0x2e, 0x9c, 0x97, 0xa7,
- 0x93, 0x9f, 0xd2, 0x26, 0x14, 0x27, 0xe8, 0xda, 0xdf, 0x19, 0x80, 0x59, 0x99, 0x8a, 0x3e, 0xe3,
- 0x9d, 0xcc, 0xb7, 0xad, 0xc4, 0x43, 0x99, 0xb7, 0xf2, 0x8f, 0x94, 0xa7, 0xf6, 0x5c, 0xe6, 0x27,
- 0xd3, 0xcf, 0x65, 0xde, 0x39, 0x62, 0x37, 0x9e, 0xd8, 0xa3, 0x99, 0x1f, 0xda, 0x97, 0x2e, 0x07,
- 0x27, 0xc1, 0x10, 0x02, 0x10, 0xe6, 0xf1, 0xef, 0x6b, 0xd2, 0x48, 0x95, 0xa3, 0x68, 0xb8, 0x21,
- 0x70, 0x0c, 0xb1, 0x62, 0x5c, 0x46, 0xc9, 0x67, 0x1c, 0x5d, 0xd1, 0xa1, 0x34, 0x49, 0xbb, 0x13,
- 0xed, 0x2d, 0xb9, 0x81, 0xe8, 0x71, 0x26, 0xcd, 0x65, 0x81, 0x93, 0xa6, 0x29, 0x21, 0x58, 0xd1,
- 0x41, 0xbb, 0x30, 0xb3, 0xc5, 0x62, 0x4b, 0x69, 0x59, 0xe1, 0x05, 0x07, 0xca, 0xe4, 0x10, 0xd7,
- 0x17, 0x97, 0xf3, 0x53, 0xc8, 0xf3, 0x6b, 0x66, 0x0a, 0x05, 0xa7, 0x9b, 0xa0, 0x5b, 0xe3, 0xa4,
- 0xf3, 0x20, 0x5c, 0x6e, 0x39, 0x61, 0xe4, 0x36, 0x16, 0x5a, 0x7e, 0x63, 0xa7, 0x1e, 0xf9, 0x81,
- 0xcc, 0x2a, 0x9a, 0x79, 0xcb, 0x9b, 0xbf, 0x57, 0x4f, 0xe1, 0x1b, 0xcd, 0xb3, 0xec, 0xb6, 0x59,
- 0x58, 0x38, 0xb3, 0x2d, 0xb4, 0x0e, 0xc3, 0x5b, 0x6e, 0x84, 0x49, 0xc7, 0x17, 0x7c, 0x29, 0x93,
- 0xe9, 0x5e, 0xe7, 0x28, 0x46, 0x4b, 0x2c, 0xf6, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xaf, 0x8e,
- 0x9b, 0xa1, 0x7c, 0x55, 0x6f, 0xda, 0xcb, 0x2f, 0xf3, 0xc0, 0x79, 0x17, 0xca, 0xde, 0x66, 0x58,
- 0x14, 0xf5, 0x67, 0x7d, 0xc5, 0xd0, 0xd4, 0x2d, 0x0c, 0xd3, 0x4b, 0xf8, 0xfa, 0x4a, 0x1d, 0xd3,
- 0x8a, 0xec, 0x81, 0x6d, 0xd8, 0x08, 0x5d, 0x91, 0xbc, 0x2b, 0xf3, 0xbd, 0xf1, 0x6a, 0x7d, 0xb1,
- 0xbe, 0x6a, 0xd0, 0x60, 0xf1, 0x13, 0x59, 0x31, 0xe6, 0xd5, 0xd1, 0x5d, 0x18, 0xdd, 0xe2, 0x2c,
- 0x76, 0x93, 0x87, 0xb5, 0xcd, 0x39, 0xf6, 0xae, 0x4b, 0x24, 0x83, 0x1e, 0x3b, 0x9c, 0x14, 0x08,
- 0xc7, 0xa4, 0xd0, 0x77, 0x2c, 0x38, 0xd5, 0x49, 0xe8, 0x6a, 0xd9, 0xb3, 0x38, 0xe1, 0x10, 0x97,
- 0xf9, 0xd4, 0xa0, 0x96, 0x55, 0xc1, 0x68, 0x90, 0x19, 0x7a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74,
- 0xa0, 0x83, 0xfb, 0xcd, 0xa2, 0x7c, 0x4f, 0x89, 0x10, 0x48, 0x7c, 0xa0, 0xf1, 0xc2, 0x12, 0xa6,
- 0x15, 0xd1, 0x06, 0xc0, 0x66, 0x8b, 0x88, 0xd8, 0x92, 0xc2, 0xfd, 0x2a, 0x53, 0xce, 0x58, 0x51,
- 0x58, 0x82, 0x0e, 0xbb, 0xf3, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xc3, 0xf5, 0x9a, 0x24,
- 0x60, 0x66, 0xb4, 0x9c, 0xa5, 0xb4, 0xc8, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1,
- 0x22, 0x9d, 0xed, 0xcd, 0xb0, 0x28, 0xb3, 0xc8, 0x22, 0xe9, 0x6c, 0x27, 0x16, 0x14, 0xa7, 0xc5,
- 0xca, 0xb1, 0xa0, 0x40, 0xb7, 0xcc, 0x26, 0xdd, 0x40, 0x24, 0xa8, 0x4c, 0xe5, 0x6f, 0x99, 0x15,
- 0x8e, 0x92, 0xde, 0x32, 0x02, 0x80, 0x25, 0x11, 0xf4, 0x0d, 0x53, 0xae, 0x9a, 0x66, 0x34, 0x5f,
- 0xee, 0x21, 0x57, 0x19, 0x74, 0x8b, 0x25, 0xab, 0xb7, 0xa0, 0xb4, 0xd9, 0x60, 0xe6, 0xb7, 0x1c,
- 0xeb, 0xc4, 0xca, 0xa2, 0x41, 0x8d, 0x45, 0xea, 0x5f, 0x59, 0xc4, 0xa5, 0xcd, 0x06, 0x5d, 0xfa,
- 0xce, 0xa3, 0x6e, 0x40, 0x56, 0xdc, 0x16, 0x11, 0xa1, 0x83, 0x33, 0x97, 0xfe, 0xbc, 0x44, 0x4a,
- 0x2f, 0x7d, 0x05, 0xc2, 0x31, 0x29, 0x4a, 0x37, 0x96, 0xf6, 0x4e, 0xe4, 0xd3, 0x55, 0x42, 0x5d,
- 0x9a, 0x6e, 0xa6, 0xbc, 0xb7, 0x03, 0x13, 0xbb, 0x61, 0x67, 0x9b, 0x48, 0xae, 0xc8, 0x0c, 0x83,
- 0x39, 0x31, 0x31, 0xee, 0x0a, 0x44, 0x37, 0x88, 0xba, 0x4e, 0x2b, 0xc5, 0xc8, 0x99, 0x12, 0xe7,
- 0xae, 0x4e, 0x0c, 0x9b, 0xb4, 0xe9, 0x42, 0xf8, 0x88, 0x07, 0xae, 0x63, 0x26, 0xc2, 0x9c, 0x85,
- 0x90, 0x11, 0xdb, 0x8e, 0x2f, 0x04, 0x01, 0xc0, 0x92, 0x88, 0x1a, 0x6c, 0x76, 0x00, 0x9d, 0xee,
- 0x31, 0xd8, 0xa9, 0xfe, 0xc6, 0x83, 0xcd, 0x0e, 0x9c, 0x98, 0x14, 0x3b, 0x68, 0x3a, 0xdb, 0x7e,
- 0xe4, 0x7b, 0x89, 0x43, 0xee, 0x4c, 0xfe, 0x41, 0x53, 0xcb, 0xc0, 0x4f, 0x1f, 0x34, 0x59, 0x58,
- 0x38, 0xb3, 0x2d, 0xfa, 0x71, 0x1d, 0x19, 0x83, 0x50, 0x64, 0x42, 0x79, 0x29, 0x27, 0x84, 0x67,
- 0x3a, 0x50, 0x21, 0xff, 0x38, 0x05, 0xc2, 0x31, 0x29, 0xd4, 0xa4, 0x92, 0xae, 0x1e, 0xdb, 0x96,
- 0x65, 0x74, 0xc9, 0x91, 0x0b, 0xb2, 0xa2, 0xe0, 0x4a, 0x29, 0x57, 0x87, 0xe0, 0x04, 0x4d, 0xe6,
- 0x23, 0xc8, 0x1f, 0x15, 0xb2, 0x84, 0x2f, 0x39, 0x53, 0x9d, 0xf1, 0xee, 0x90, 0x4f, 0xb5, 0x00,
- 0x60, 0x49, 0x84, 0x8e, 0x86, 0x78, 0x0a, 0xe7, 0x87, 0x2c, 0x6f, 0x52, 0x9e, 0x29, 0x3f, 0xcb,
- 0x20, 0x25, 0x03, 0xcd, 0x0b, 0x10, 0x8e, 0x49, 0x51, 0x4e, 0x4e, 0x0f, 0xbc, 0x73, 0xf9, 0x9c,
- 0x3c, 0x79, 0xdc, 0x31, 0x4e, 0x4e, 0x0f, 0xbb, 0xb2, 0x38, 0xea, 0x54, 0x5c, 0x74, 0x96, 0xf3,
- 0x25, 0xa7, 0x5f, 0x2a, 0xb0, 0x7a, 0xba, 0x5f, 0x0a, 0x84, 0x63, 0x52, 0xec, 0x28, 0x66, 0x41,
- 0xf0, 0x2e, 0x14, 0x1c, 0xc5, 0x14, 0x21, 0xe3, 0x28, 0xd6, 0x82, 0xe4, 0xd9, 0x7f, 0xb9, 0x04,
- 0x17, 0x8a, 0xf7, 0x6d, 0x6c, 0xad, 0xab, 0xc5, 0xde, 0x51, 0x09, 0x6b, 0x1d, 0xd7, 0x1d, 0xc5,
- 0x58, 0x7d, 0x87, 0x36, 0xbe, 0x0e, 0x33, 0xea, 0xe1, 0x63, 0xcb, 0x6d, 0xec, 0x69, 0x89, 0x5e,
- 0x55, 0x10, 0xa0, 0x7a, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0x79, 0x98, 0x32, 0x0a, 0x57, 0x97, 0x84,
- 0xa2, 0x21, 0xce, 0x56, 0x62, 0x82, 0x71, 0x12, 0xdf, 0xfe, 0x45, 0x0b, 0xce, 0xf0, 0x40, 0xbc,
- 0xa4, 0x59, 0xf3, 0x9b, 0x52, 0xa3, 0x70, 0xa4, 0xc8, 0xbd, 0x9b, 0x30, 0xd5, 0x31, 0xab, 0xf6,
- 0x08, 0x36, 0xae, 0xa3, 0xc6, 0x7d, 0x4d, 0x00, 0x70, 0x92, 0xa8, 0xfd, 0xf3, 0x25, 0x38, 0x5f,
- 0xe8, 0xc9, 0x8f, 0x30, 0x9c, 0xde, 0x6a, 0x87, 0xce, 0x62, 0x40, 0x9a, 0xc4, 0x8b, 0x5c, 0xa7,
- 0x55, 0xef, 0x90, 0x86, 0x66, 0x6f, 0x65, 0x2e, 0xf1, 0xd7, 0xd7, 0xea, 0xf3, 0x69, 0x0c, 0x9c,
- 0x53, 0x13, 0xad, 0x00, 0x4a, 0x43, 0xc4, 0x0c, 0xb3, 0xcb, 0x74, 0x9a, 0x1e, 0xce, 0xa8, 0x81,
- 0xbe, 0x00, 0x13, 0xea, 0x85, 0x80, 0x36, 0xe3, 0xec, 0x80, 0xc0, 0x3a, 0x00, 0x9b, 0x78, 0xe8,
- 0x2a, 0x4f, 0x63, 0x25, 0x12, 0x9e, 0x09, 0xe3, 0xec, 0x94, 0xcc, 0x51, 0x25, 0x8a, 0xb1, 0x8e,
- 0xb3, 0x70, 0xed, 0x77, 0xfe, 0xf0, 0xc2, 0x67, 0x7e, 0xf7, 0x0f, 0x2f, 0x7c, 0xe6, 0x0f, 0xfe,
- 0xf0, 0xc2, 0x67, 0x7e, 0xfc, 0xe0, 0x82, 0xf5, 0x3b, 0x07, 0x17, 0xac, 0xdf, 0x3d, 0xb8, 0x60,
- 0xfd, 0xc1, 0xc1, 0x05, 0xeb, 0x7f, 0x3b, 0xb8, 0x60, 0x7d, 0xef, 0x7f, 0xbf, 0xf0, 0x99, 0xaf,
- 0xa2, 0x38, 0x16, 0xf6, 0x15, 0x3a, 0x3b, 0x57, 0x76, 0xaf, 0xfe, 0x87, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0xba, 0xfb, 0xfc, 0xdd, 0x18, 0x2e, 0x01, 0x00,
-}
+func (m *VolumeMountStatus) Reset() { *m = VolumeMountStatus{} }
+
+func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
+
+func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
+
+func (m *VolumeResourceRequirements) Reset() { *m = VolumeResourceRequirements{} }
+
+func (m *VolumeSource) Reset() { *m = VolumeSource{} }
+
+func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
+
+func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
+
+func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
+
+func (m *WorkloadReference) Reset() { *m = WorkloadReference{} }
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -8495,7 +1001,7 @@ func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, erro
for k := range m.VolumeAttributes {
keysForVolumeAttributes = append(keysForVolumeAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+ sort.Strings(keysForVolumeAttributes)
for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- {
v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])]
baseI := i
@@ -8577,7 +1083,7 @@ func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.VolumeAttributes {
keysForVolumeAttributes = append(keysForVolumeAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+ sort.Strings(keysForVolumeAttributes)
for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- {
v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])]
baseI := i
@@ -9166,7 +1672,7 @@ func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.BinaryData {
keysForBinaryData = append(keysForBinaryData, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData)
+ sort.Strings(keysForBinaryData)
for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- {
v := m.BinaryData[string(keysForBinaryData[iNdEx])]
baseI := i
@@ -9192,7 +1698,7 @@ func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Data {
keysForData = append(keysForData, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+ sort.Strings(keysForData)
for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- {
v := m.Data[string(keysForData[iNdEx])]
baseI := i
@@ -10317,7 +2823,7 @@ func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.AllocatedResources {
keysForAllocatedResources = append(keysForAllocatedResources, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+ sort.Strings(keysForAllocatedResources)
for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- {
v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])]
baseI := i
@@ -11859,7 +4365,7 @@ func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, err
for k := range m.Options {
keysForOptions = append(keysForOptions, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+ sort.Strings(keysForOptions)
for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- {
v := m.Options[string(keysForOptions[iNdEx])]
baseI := i
@@ -11936,7 +4442,7 @@ func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Options {
keysForOptions = append(keysForOptions, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+ sort.Strings(keysForOptions)
for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- {
v := m.Options[string(keysForOptions[iNdEx])]
baseI := i
@@ -12880,7 +5386,7 @@ func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.MaxLimitRequestRatio {
keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio)
+ sort.Strings(keysForMaxLimitRequestRatio)
for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- {
v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])]
baseI := i
@@ -12909,7 +5415,7 @@ func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.DefaultRequest {
keysForDefaultRequest = append(keysForDefaultRequest, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest)
+ sort.Strings(keysForDefaultRequest)
for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- {
v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])]
baseI := i
@@ -12938,7 +5444,7 @@ func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Default {
keysForDefault = append(keysForDefault, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDefault)
+ sort.Strings(keysForDefault)
for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- {
v := m.Default[ResourceName(keysForDefault[iNdEx])]
baseI := i
@@ -12967,7 +5473,7 @@ func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Min {
keysForMin = append(keysForMin, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMin)
+ sort.Strings(keysForMin)
for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- {
v := m.Min[ResourceName(keysForMin[iNdEx])]
baseI := i
@@ -12996,7 +5502,7 @@ func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Max {
keysForMax = append(keysForMax, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMax)
+ sort.Strings(keysForMax)
for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- {
v := m.Max[ResourceName(keysForMax[iNdEx])]
baseI := i
@@ -14402,6 +6908,15 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if len(m.DeclaredFeatures) > 0 {
+ for iNdEx := len(m.DeclaredFeatures) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.DeclaredFeatures[iNdEx])
+ copy(dAtA[i:], m.DeclaredFeatures[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeclaredFeatures[iNdEx])))
+ i--
+ dAtA[i] = 0x72
+ }
+ }
if m.Features != nil {
{
size, err := m.Features.MarshalToSizedBuffer(dAtA[:i])
@@ -14535,7 +7050,7 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Allocatable {
keysForAllocatable = append(keysForAllocatable, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable)
+ sort.Strings(keysForAllocatable)
for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- {
v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])]
baseI := i
@@ -14564,7 +7079,7 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.Capacity[ResourceName(keysForCapacity[iNdEx])]
baseI := i
@@ -15159,7 +7674,7 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er
for k := range m.AllocatedResourceStatuses {
keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses)
+ sort.Strings(keysForAllocatedResourceStatuses)
for iNdEx := len(keysForAllocatedResourceStatuses) - 1; iNdEx >= 0; iNdEx-- {
v := m.AllocatedResourceStatuses[ResourceName(keysForAllocatedResourceStatuses[iNdEx])]
baseI := i
@@ -15183,7 +7698,7 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er
for k := range m.AllocatedResources {
keysForAllocatedResources = append(keysForAllocatedResources, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+ sort.Strings(keysForAllocatedResources)
for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- {
v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])]
baseI := i
@@ -15226,7 +7741,7 @@ func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, er
for k := range m.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.Capacity[ResourceName(keysForCapacity[iNdEx])]
baseI := i
@@ -15795,7 +8310,7 @@ func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.Capacity[ResourceName(keysForCapacity[iNdEx])]
baseI := i
@@ -16219,6 +8734,30 @@ func (m *PodCertificateProjection) MarshalToSizedBuffer(dAtA []byte) (int, error
_ = i
var l int
_ = l
+ if len(m.UserAnnotations) > 0 {
+ keysForUserAnnotations := make([]string, 0, len(m.UserAnnotations))
+ for k := range m.UserAnnotations {
+ keysForUserAnnotations = append(keysForUserAnnotations, string(k))
+ }
+ sort.Strings(keysForUserAnnotations)
+ for iNdEx := len(keysForUserAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.UserAnnotations[string(keysForUserAnnotations[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForUserAnnotations[iNdEx])
+ copy(dAtA[i:], keysForUserAnnotations[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUserAnnotations[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
i -= len(m.CertificateChainPath)
copy(dAtA[i:], m.CertificateChainPath)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CertificateChainPath)))
@@ -17100,6 +9639,20 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.WorkloadRef != nil {
+ {
+ size, err := m.WorkloadRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xd2
+ }
if m.HostnameOverride != nil {
i -= len(*m.HostnameOverride)
copy(dAtA[i:], *m.HostnameOverride)
@@ -17230,7 +9783,7 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Overhead {
keysForOverhead = append(keysForOverhead, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead)
+ sort.Strings(keysForOverhead)
for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- {
v := m.Overhead[ResourceName(keysForOverhead[iNdEx])]
baseI := i
@@ -17507,7 +10060,7 @@ func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.NodeSelector[string(keysForNodeSelector[iNdEx])]
baseI := i
@@ -17597,6 +10150,51 @@ func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Resources != nil {
+ {
+ size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xa2
+ }
+ if len(m.AllocatedResources) > 0 {
+ keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources))
+ for k := range m.AllocatedResources {
+ keysForAllocatedResources = append(keysForAllocatedResources, string(k))
+ }
+ sort.Strings(keysForAllocatedResources)
+ for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])]
+ baseI := i
+ {
+ size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForAllocatedResources[iNdEx])
+ copy(dAtA[i:], keysForAllocatedResources[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x9a
+ }
+ }
if m.ExtendedResourceClaimStatus != nil {
{
size, err := m.ExtendedResourceClaimStatus.MarshalToSizedBuffer(dAtA[:i])
@@ -18759,7 +11357,7 @@ func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro
for k := range m.Selector {
keysForSelector = append(keysForSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.Selector[string(keysForSelector[iNdEx])]
baseI := i
@@ -19093,7 +11691,7 @@ func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Hard {
keysForHard = append(keysForHard, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+ sort.Strings(keysForHard)
for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- {
v := m.Hard[ResourceName(keysForHard[iNdEx])]
baseI := i
@@ -19145,7 +11743,7 @@ func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Used {
keysForUsed = append(keysForUsed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForUsed)
+ sort.Strings(keysForUsed)
for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- {
v := m.Used[ResourceName(keysForUsed[iNdEx])]
baseI := i
@@ -19174,7 +11772,7 @@ func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Hard {
keysForHard = append(keysForHard, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+ sort.Strings(keysForHard)
for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- {
v := m.Hard[ResourceName(keysForHard[iNdEx])]
baseI := i
@@ -19240,7 +11838,7 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
v := m.Requests[ResourceName(keysForRequests[iNdEx])]
baseI := i
@@ -19269,7 +11867,7 @@ func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Limits {
keysForLimits = append(keysForLimits, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+ sort.Strings(keysForLimits)
for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- {
v := m.Limits[ResourceName(keysForLimits[iNdEx])]
baseI := i
@@ -19702,7 +12300,7 @@ func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.StringData {
keysForStringData = append(keysForStringData, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForStringData)
+ sort.Strings(keysForStringData)
for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- {
v := m.StringData[string(keysForStringData[iNdEx])]
baseI := i
@@ -19731,7 +12329,7 @@ func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Data {
keysForData = append(keysForData, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+ sort.Strings(keysForData)
for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- {
v := m.Data[string(keysForData[iNdEx])]
baseI := i
@@ -20729,7 +13327,7 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Selector {
keysForSelector = append(keysForSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.Selector[string(keysForSelector[iNdEx])]
baseI := i
@@ -21734,7 +14332,7 @@ func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, err
for k := range m.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
v := m.Requests[ResourceName(keysForRequests[iNdEx])]
baseI := i
@@ -21763,7 +14361,7 @@ func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, err
for k := range m.Limits {
keysForLimits = append(keysForLimits, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+ sort.Strings(keysForLimits)
for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- {
v := m.Limits[ResourceName(keysForLimits[iNdEx])]
baseI := i
@@ -22336,6 +14934,44 @@ func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int,
return len(dAtA) - i, nil
}
+func (m *WorkloadReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkloadReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkloadReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.PodGroupReplicaKey)
+ copy(dAtA[i:], m.PodGroupReplicaKey)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodGroupReplicaKey)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.PodGroup)
+ copy(dAtA[i:], m.PodGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodGroup)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
@@ -24786,6 +17422,12 @@ func (m *NodeStatus) Size() (n int) {
l = m.Features.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if len(m.DeclaredFeatures) > 0 {
+ for _, s := range m.DeclaredFeatures {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -25389,6 +18031,14 @@ func (m *PodCertificateProjection) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = len(m.CertificateChainPath)
n += 1 + l + sovGenerated(uint64(l))
+ if len(m.UserAnnotations) > 0 {
+ for k, v := range m.UserAnnotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
return n
}
@@ -25885,6 +18535,10 @@ func (m *PodSpec) Size() (n int) {
l = len(*m.HostnameOverride)
n += 2 + l + sovGenerated(uint64(l))
}
+ if m.WorkloadRef != nil {
+ l = m.WorkloadRef.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -25961,6 +18615,19 @@ func (m *PodStatus) Size() (n int) {
l = m.ExtendedResourceClaimStatus.Size()
n += 2 + l + sovGenerated(uint64(l))
}
+ if len(m.AllocatedResources) > 0 {
+ for k, v := range m.AllocatedResources {
+ _ = k
+ _ = v
+ l = v.Size()
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+ n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.Resources != nil {
+ l = m.Resources.Size()
+ n += 2 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -27632,6 +20299,21 @@ func (m *WindowsSecurityContextOptions) Size() (n int) {
return n
}
+func (m *WorkloadReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.PodGroupReplicaKey)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -27759,7 +20441,7 @@ func (this *CSIPersistentVolumeSource) String() string {
for k := range this.VolumeAttributes {
keysForVolumeAttributes = append(keysForVolumeAttributes, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+ sort.Strings(keysForVolumeAttributes)
mapStringForVolumeAttributes := "map[string]string{"
for _, k := range keysForVolumeAttributes {
mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k])
@@ -27788,7 +20470,7 @@ func (this *CSIVolumeSource) String() string {
for k := range this.VolumeAttributes {
keysForVolumeAttributes = append(keysForVolumeAttributes, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+ sort.Strings(keysForVolumeAttributes)
mapStringForVolumeAttributes := "map[string]string{"
for _, k := range keysForVolumeAttributes {
mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k])
@@ -27948,7 +20630,7 @@ func (this *ConfigMap) String() string {
for k := range this.Data {
keysForData = append(keysForData, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+ sort.Strings(keysForData)
mapStringForData := "map[string]string{"
for _, k := range keysForData {
mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k])
@@ -27958,7 +20640,7 @@ func (this *ConfigMap) String() string {
for k := range this.BinaryData {
keysForBinaryData = append(keysForBinaryData, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData)
+ sort.Strings(keysForBinaryData)
mapStringForBinaryData := "map[string][]byte{"
for _, k := range keysForBinaryData {
mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k])
@@ -28267,7 +20949,7 @@ func (this *ContainerStatus) String() string {
for k := range this.AllocatedResources {
keysForAllocatedResources = append(keysForAllocatedResources, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+ sort.Strings(keysForAllocatedResources)
mapStringForAllocatedResources := "ResourceList{"
for _, k := range keysForAllocatedResources {
mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)])
@@ -28688,7 +21370,7 @@ func (this *FlexPersistentVolumeSource) String() string {
for k := range this.Options {
keysForOptions = append(keysForOptions, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+ sort.Strings(keysForOptions)
mapStringForOptions := "map[string]string{"
for _, k := range keysForOptions {
mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
@@ -28712,7 +21394,7 @@ func (this *FlexVolumeSource) String() string {
for k := range this.Options {
keysForOptions = append(keysForOptions, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+ sort.Strings(keysForOptions)
mapStringForOptions := "map[string]string{"
for _, k := range keysForOptions {
mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
@@ -28969,7 +21651,7 @@ func (this *LimitRangeItem) String() string {
for k := range this.Max {
keysForMax = append(keysForMax, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMax)
+ sort.Strings(keysForMax)
mapStringForMax := "ResourceList{"
for _, k := range keysForMax {
mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)])
@@ -28979,7 +21661,7 @@ func (this *LimitRangeItem) String() string {
for k := range this.Min {
keysForMin = append(keysForMin, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMin)
+ sort.Strings(keysForMin)
mapStringForMin := "ResourceList{"
for _, k := range keysForMin {
mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)])
@@ -28989,7 +21671,7 @@ func (this *LimitRangeItem) String() string {
for k := range this.Default {
keysForDefault = append(keysForDefault, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDefault)
+ sort.Strings(keysForDefault)
mapStringForDefault := "ResourceList{"
for _, k := range keysForDefault {
mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)])
@@ -28999,7 +21681,7 @@ func (this *LimitRangeItem) String() string {
for k := range this.DefaultRequest {
keysForDefaultRequest = append(keysForDefaultRequest, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest)
+ sort.Strings(keysForDefaultRequest)
mapStringForDefaultRequest := "ResourceList{"
for _, k := range keysForDefaultRequest {
mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)])
@@ -29009,7 +21691,7 @@ func (this *LimitRangeItem) String() string {
for k := range this.MaxLimitRequestRatio {
keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio)
+ sort.Strings(keysForMaxLimitRequestRatio)
mapStringForMaxLimitRequestRatio := "ResourceList{"
for _, k := range keysForMaxLimitRequestRatio {
mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)])
@@ -29477,7 +22159,7 @@ func (this *NodeStatus) String() string {
for k := range this.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
mapStringForCapacity := "ResourceList{"
for _, k := range keysForCapacity {
mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)])
@@ -29487,7 +22169,7 @@ func (this *NodeStatus) String() string {
for k := range this.Allocatable {
keysForAllocatable = append(keysForAllocatable, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable)
+ sort.Strings(keysForAllocatable)
mapStringForAllocatable := "ResourceList{"
for _, k := range keysForAllocatable {
mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)])
@@ -29507,6 +22189,7 @@ func (this *NodeStatus) String() string {
`Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`,
`RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`,
`Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`,
+ `DeclaredFeatures:` + fmt.Sprintf("%v", this.DeclaredFeatures) + `,`,
`}`,
}, "")
return s
@@ -29654,7 +22337,7 @@ func (this *PersistentVolumeClaimStatus) String() string {
for k := range this.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
mapStringForCapacity := "ResourceList{"
for _, k := range keysForCapacity {
mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)])
@@ -29664,7 +22347,7 @@ func (this *PersistentVolumeClaimStatus) String() string {
for k := range this.AllocatedResources {
keysForAllocatedResources = append(keysForAllocatedResources, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+ sort.Strings(keysForAllocatedResources)
mapStringForAllocatedResources := "ResourceList{"
for _, k := range keysForAllocatedResources {
mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)])
@@ -29674,7 +22357,7 @@ func (this *PersistentVolumeClaimStatus) String() string {
for k := range this.AllocatedResourceStatuses {
keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses)
+ sort.Strings(keysForAllocatedResourceStatuses)
mapStringForAllocatedResourceStatuses := "map[ResourceName]ClaimResourceStatus{"
for _, k := range keysForAllocatedResourceStatuses {
mapStringForAllocatedResourceStatuses += fmt.Sprintf("%v: %v,", k, this.AllocatedResourceStatuses[ResourceName(k)])
@@ -29770,7 +22453,7 @@ func (this *PersistentVolumeSpec) String() string {
for k := range this.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
mapStringForCapacity := "ResourceList{"
for _, k := range keysForCapacity {
mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)])
@@ -29902,6 +22585,16 @@ func (this *PodCertificateProjection) String() string {
if this == nil {
return "nil"
}
+ keysForUserAnnotations := make([]string, 0, len(this.UserAnnotations))
+ for k := range this.UserAnnotations {
+ keysForUserAnnotations = append(keysForUserAnnotations, k)
+ }
+ sort.Strings(keysForUserAnnotations)
+ mapStringForUserAnnotations := "map[string]string{"
+ for _, k := range keysForUserAnnotations {
+ mapStringForUserAnnotations += fmt.Sprintf("%v: %v,", k, this.UserAnnotations[k])
+ }
+ mapStringForUserAnnotations += "}"
s := strings.Join([]string{`&PodCertificateProjection{`,
`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
`KeyType:` + fmt.Sprintf("%v", this.KeyType) + `,`,
@@ -29909,6 +22602,7 @@ func (this *PodCertificateProjection) String() string {
`CredentialBundlePath:` + fmt.Sprintf("%v", this.CredentialBundlePath) + `,`,
`KeyPath:` + fmt.Sprintf("%v", this.KeyPath) + `,`,
`CertificateChainPath:` + fmt.Sprintf("%v", this.CertificateChainPath) + `,`,
+ `UserAnnotations:` + mapStringForUserAnnotations + `,`,
`}`,
}, "")
return s
@@ -30206,7 +22900,7 @@ func (this *PodSpec) String() string {
for k := range this.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
mapStringForNodeSelector := "map[string]string{"
for _, k := range keysForNodeSelector {
mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k])
@@ -30216,7 +22910,7 @@ func (this *PodSpec) String() string {
for k := range this.Overhead {
keysForOverhead = append(keysForOverhead, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead)
+ sort.Strings(keysForOverhead)
mapStringForOverhead := "ResourceList{"
for _, k := range keysForOverhead {
mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)])
@@ -30264,6 +22958,7 @@ func (this *PodSpec) String() string {
`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
`HostnameOverride:` + valueToStringGenerated(this.HostnameOverride) + `,`,
+ `WorkloadRef:` + strings.Replace(this.WorkloadRef.String(), "WorkloadReference", "WorkloadReference", 1) + `,`,
`}`,
}, "")
return s
@@ -30307,6 +23002,16 @@ func (this *PodStatus) String() string {
repeatedStringForHostIPs += strings.Replace(strings.Replace(f.String(), "HostIP", "HostIP", 1), `&`, ``, 1) + ","
}
repeatedStringForHostIPs += "}"
+ keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources))
+ for k := range this.AllocatedResources {
+ keysForAllocatedResources = append(keysForAllocatedResources, string(k))
+ }
+ sort.Strings(keysForAllocatedResources)
+ mapStringForAllocatedResources := "ResourceList{"
+ for _, k := range keysForAllocatedResources {
+ mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)])
+ }
+ mapStringForAllocatedResources += "}"
s := strings.Join([]string{`&PodStatus{`,
`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
@@ -30326,6 +23031,8 @@ func (this *PodStatus) String() string {
`HostIPs:` + repeatedStringForHostIPs + `,`,
`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
`ExtendedResourceClaimStatus:` + strings.Replace(this.ExtendedResourceClaimStatus.String(), "PodExtendedResourceClaimStatus", "PodExtendedResourceClaimStatus", 1) + `,`,
+ `AllocatedResources:` + mapStringForAllocatedResources + `,`,
+ `Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
`}`,
}, "")
return s
@@ -30593,7 +23300,7 @@ func (this *ReplicationControllerSpec) String() string {
for k := range this.Selector {
keysForSelector = append(keysForSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
mapStringForSelector := "map[string]string{"
for _, k := range keysForSelector {
mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
@@ -30698,7 +23405,7 @@ func (this *ResourceQuotaSpec) String() string {
for k := range this.Hard {
keysForHard = append(keysForHard, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+ sort.Strings(keysForHard)
mapStringForHard := "ResourceList{"
for _, k := range keysForHard {
mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)])
@@ -30720,7 +23427,7 @@ func (this *ResourceQuotaStatus) String() string {
for k := range this.Hard {
keysForHard = append(keysForHard, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+ sort.Strings(keysForHard)
mapStringForHard := "ResourceList{"
for _, k := range keysForHard {
mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)])
@@ -30730,7 +23437,7 @@ func (this *ResourceQuotaStatus) String() string {
for k := range this.Used {
keysForUsed = append(keysForUsed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForUsed)
+ sort.Strings(keysForUsed)
mapStringForUsed := "ResourceList{"
for _, k := range keysForUsed {
mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)])
@@ -30756,7 +23463,7 @@ func (this *ResourceRequirements) String() string {
for k := range this.Limits {
keysForLimits = append(keysForLimits, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+ sort.Strings(keysForLimits)
mapStringForLimits := "ResourceList{"
for _, k := range keysForLimits {
mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)])
@@ -30766,7 +23473,7 @@ func (this *ResourceRequirements) String() string {
for k := range this.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
mapStringForRequests := "ResourceList{"
for _, k := range keysForRequests {
mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)])
@@ -30893,7 +23600,7 @@ func (this *Secret) String() string {
for k := range this.Data {
keysForData = append(keysForData, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+ sort.Strings(keysForData)
mapStringForData := "map[string][]byte{"
for _, k := range keysForData {
mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k])
@@ -30903,7 +23610,7 @@ func (this *Secret) String() string {
for k := range this.StringData {
keysForStringData = append(keysForStringData, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForStringData)
+ sort.Strings(keysForStringData)
mapStringForStringData := "map[string]string{"
for _, k := range keysForStringData {
mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k])
@@ -31152,7 +23859,7 @@ func (this *ServiceSpec) String() string {
for k := range this.Selector {
keysForSelector = append(keysForSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
mapStringForSelector := "map[string]string{"
for _, k := range keysForSelector {
mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
@@ -31448,7 +24155,7 @@ func (this *VolumeResourceRequirements) String() string {
for k := range this.Limits {
keysForLimits = append(keysForLimits, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+ sort.Strings(keysForLimits)
mapStringForLimits := "ResourceList{"
for _, k := range keysForLimits {
mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)])
@@ -31458,7 +24165,7 @@ func (this *VolumeResourceRequirements) String() string {
for k := range this.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
mapStringForRequests := "ResourceList{"
for _, k := range keysForRequests {
mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)])
@@ -31547,6 +24254,18 @@ func (this *WindowsSecurityContextOptions) String() string {
}, "")
return s
}
+func (this *WorkloadReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WorkloadReference{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `PodGroup:` + fmt.Sprintf("%v", this.PodGroup) + `,`,
+ `PodGroupReplicaKey:` + fmt.Sprintf("%v", this.PodGroupReplicaKey) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -52070,14 +44789,214 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Conditions = append(m.Conditions, NodeCondition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Conditions = append(m.Conditions, NodeCondition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, NodeAddress{})
+ if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Images = append(m.Images, ContainerImage{})
+ if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{})
+ if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 5:
+ case 11:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -52104,47 +45023,16 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Addresses = append(m.Addresses, NodeAddress{})
- if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if m.Config == nil {
+ m.Config = &NodeConfigStatus{}
}
- if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 7:
+ case 12:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandlers", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -52171,13 +45059,14 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.RuntimeHandlers = append(m.RuntimeHandlers, NodeRuntimeHandler{})
+ if err := m.RuntimeHandlers[len(m.RuntimeHandlers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 8:
+ case 13:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -52204,14 +45093,16 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Images = append(m.Images, ContainerImage{})
- if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.Features == nil {
+ m.Features = &NodeFeatures{}
+ }
+ if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 9:
+ case 14:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field DeclaredFeatures", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -52239,147 +45130,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{})
- if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Config == nil {
- m.Config = &NodeConfigStatus{}
- }
- if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandlers", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RuntimeHandlers = append(m.RuntimeHandlers, NodeRuntimeHandler{})
- if err := m.RuntimeHandlers[len(m.RuntimeHandlers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 13:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Features == nil {
- m.Features = &NodeFeatures{}
- }
- if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.DeclaredFeatures = append(m.DeclaredFeatures, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -57720,31 +50471,63 @@ func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.KeyType = string(dAtA[iNdEx:postIndex])
+ m.KeyType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.MaxExpirationSeconds = &v
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CredentialBundlePath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MaxExpirationSeconds", wireType)
- }
- var v int32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.MaxExpirationSeconds = &v
- case 4:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CredentialBundlePath", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -57772,11 +50555,11 @@ func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.CredentialBundlePath = string(dAtA[iNdEx:postIndex])
+ m.KeyPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 5:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeyPath", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -57804,13 +50587,13 @@ func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.KeyPath = string(dAtA[iNdEx:postIndex])
+ m.CertificateChainPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 6:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CertificateChainPath", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field UserAnnotations", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -57820,23 +50603,118 @@ func (m *PodCertificateProjection) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.CertificateChainPath = string(dAtA[iNdEx:postIndex])
+ if m.UserAnnotations == nil {
+ m.UserAnnotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.UserAnnotations[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -61923,6 +54801,42 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error {
s := string(dAtA[iNdEx:postIndex])
m.HostnameOverride = &s
iNdEx = postIndex
+ case 42:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WorkloadRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.WorkloadRef == nil {
+ m.WorkloadRef = &WorkloadReference{}
+ }
+ if err := m.WorkloadRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -62498,33 +55412,198 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.HostIPs = append(m.HostIPs, HostIP{})
- if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 17:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ m.HostIPs = append(m.HostIPs, HostIP{})
+ if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+ }
+ m.ObservedGeneration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ObservedGeneration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 18:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceClaimStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExtendedResourceClaimStatus == nil {
+ m.ExtendedResourceClaimStatus = &PodExtendedResourceClaimStatus{}
+ }
+ if err := m.ExtendedResourceClaimStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 19:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AllocatedResources == nil {
+ m.AllocatedResources = make(ResourceList)
}
- m.ObservedGeneration = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ var mapkey ResourceName
+ mapvalue := &resource.Quantity{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- b := dAtA[iNdEx]
- iNdEx++
- m.ObservedGeneration |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &resource.Quantity{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
}
- case 18:
+ m.AllocatedResources[ResourceName(mapkey)] = *mapvalue
+ iNdEx = postIndex
+ case 20:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExtendedResourceClaimStatus", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -62551,10 +55630,10 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ExtendedResourceClaimStatus == nil {
- m.ExtendedResourceClaimStatus = &PodExtendedResourceClaimStatus{}
+ if m.Resources == nil {
+ m.Resources = &ResourceRequirements{}
}
- if err := m.ExtendedResourceClaimStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -77305,6 +70384,152 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *WorkloadReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkloadReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkloadReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodGroup = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodGroupReplicaKey", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodGroupReplicaKey = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/operator/vendor/k8s.io/api/core/v1/generated.proto b/operator/vendor/k8s.io/api/core/v1/generated.proto
index fb269531..570b4d34 100644
--- a/operator/vendor/k8s.io/api/core/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/core/v1/generated.proto
@@ -762,6 +762,7 @@ message Container {
optional ResourceRequirements resources = 8;
// Resources resize policy for the container.
+ // This field cannot be set on ephemeral containers.
// +featureGate=InPlacePodVerticalScaling
// +optional
// +listType=atomic
@@ -782,7 +783,6 @@ message Container {
// container. Instead, the next init container starts immediately after this
// init container is started, or after any startupProbe has successfully
// completed.
- // +featureGate=SidecarContainers
// +optional
optional string restartPolicy = 24;
@@ -1148,7 +1148,6 @@ message ContainerStatus {
// +patchStrategy=merge
// +listType=map
// +listMapKey=mountPath
- // +featureGate=RecursiveReadOnlyMounts
repeated VolumeMountStatus volumeMounts = 12;
// User represents user identity information initially attached to the first process of the container
@@ -1582,7 +1581,6 @@ message EphemeralContainerCommon {
// Restart policy for the container to manage the restart behavior of each
// container within a pod.
// You cannot set this field on ephemeral containers.
- // +featureGate=SidecarContainers
// +optional
optional string restartPolicy = 24;
@@ -2346,7 +2344,6 @@ message LifecycleHandler {
optional TCPSocketAction tcpSocket = 3;
// Sleep represents a duration that the container should sleep.
- // +featureGate=PodLifecycleSleepAction
// +optional
optional SleepAction sleep = 4;
}
@@ -2811,7 +2808,6 @@ message NodeRuntimeHandler {
// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
message NodeRuntimeHandlerFeatures {
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
- // +featureGate=RecursiveReadOnlyMounts
// +optional
optional bool recursiveReadOnlyMounts = 1;
@@ -2978,7 +2974,6 @@ message NodeStatus {
optional NodeConfigStatus config = 11;
// The available runtime handlers.
- // +featureGate=RecursiveReadOnlyMounts
// +featureGate=UserNamespacesSupport
// +optional
// +listType=atomic
@@ -2988,6 +2983,12 @@ message NodeStatus {
// +featureGate=SupplementalGroupsPolicy
// +optional
optional NodeFeatures features = 13;
+
+ // DeclaredFeatures represents the features related to feature gates that are declared by the node.
+ // +featureGate=NodeDeclaredFeatures
+ // +optional
+ // +listType=atomic
+ repeated string declaredFeatures = 14;
}
// NodeSwapStatus represents swap memory information.
@@ -3206,7 +3207,7 @@ message PersistentVolumeClaimSpec {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
// resources represents the minimum resources the volume should have.
- // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ // Users are allowed to specify resource requirements
// that are lower than previous value but must still be higher than capacity recorded in the
// status field of the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
@@ -3324,9 +3325,6 @@ message PersistentVolumeClaimStatus {
// should ignore the update for the purpose it was designed. For example - a controller that
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
// resources associated with PVC.
- //
- // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
- // +featureGate=RecoverVolumeExpansionFailure
// +optional
map allocatedResources = 5;
@@ -3363,9 +3361,6 @@ message PersistentVolumeClaimStatus {
// should ignore the update for the purpose it was designed. For example - a controller that
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
// resources associated with PVC.
- //
- // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
- // +featureGate=RecoverVolumeExpansionFailure
// +mapType=granular
// +optional
map allocatedResourceStatuses = 7;
@@ -3609,6 +3604,7 @@ message PersistentVolumeSpec {
// nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
+ // This field is mutable if MutablePVNodeAffinity feature gate is enabled.
// +optional
optional VolumeNodeAffinity nodeAffinity = 9;
@@ -3898,6 +3894,21 @@ message PodCertificateProjection {
//
// +optional
optional string certificateChainPath = 6;
+
+ // userAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // These values are copied verbatim into the `spec.unverifiedUserAnnotations` field of
+ // the PodCertificateRequest objects that Kubelet creates.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ map userAnnotations = 7;
}
// PodCondition contains details for the current condition of this pod.
@@ -3907,7 +3918,7 @@ message PodCondition {
optional string type = 1;
// If set, this represents the .metadata.generation that the pod condition was set based upon.
- // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
optional int64 observedGeneration = 7;
@@ -4681,8 +4692,8 @@ message PodSpec {
// will be made available to those containers which consume them
// by name.
//
- // This is an alpha field and requires enabling the
- // DynamicResourceAllocation feature gate.
+ // This is a stable field but requires that the
+ // DynamicResourceAllocation feature gate is enabled.
//
// This field is immutable.
//
@@ -4723,6 +4734,18 @@ message PodSpec {
// +featureGate=HostnameOverride
// +optional
optional string hostnameOverride = 41;
+
+ // WorkloadRef provides a reference to the Workload object that this Pod belongs to.
+ // This field is used by the scheduler to identify the PodGroup and apply the
+ // correct group scheduling policies. The Workload object referenced
+ // by this field may not exist at the time the Pod is created.
+ // This field is immutable, but a Workload object with the same name
+ // may be recreated with different policies. Doing this during pod scheduling
+ // may result in the placement not conforming to the expected policies.
+ //
+ // +featureGate=GenericWorkload
+ // +optional
+ optional WorkloadReference workloadRef = 42;
}
// PodStatus represents information about the status of a pod. Status may trail the actual
@@ -4730,7 +4753,7 @@ message PodSpec {
// plane.
message PodStatus {
// If set, this represents the .metadata.generation that the pod status was set based upon.
- // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
optional int64 observedGeneration = 17;
@@ -4886,6 +4909,20 @@ message PodStatus {
// +featureGate=DRAExtendedResource
// +optional
optional PodExtendedResourceClaimStatus extendedResourceClaimStatus = 18;
+
+ // AllocatedResources is the total requests allocated for this pod by the node.
+ // If pod-level requests are not set, this will be the total requests aggregated
+ // across containers in the pod.
+ // +featureGate=InPlacePodLevelResourcesVerticalScaling
+ // +optional
+ map allocatedResources = 19;
+
+ // Resources represents the compute resource requests and limits that have been
+ // applied at the pod level if pod-level requests or limits are set in
+ // PodSpec.Resources
+ // +featureGate=InPlacePodLevelResourcesVerticalScaling
+ // +optional
+ optional ResourceRequirements resources = 20;
}
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
@@ -5263,6 +5300,8 @@ message ReplicationController {
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines the specification of the desired behavior of the replication controller.
@@ -6378,7 +6417,6 @@ message ServiceSpec {
// field is not set, the implementation will apply its default routing
// strategy. If set to "PreferClose", implementations should prioritize
// endpoints that are in the same zone.
- // +featureGate=ServiceTrafficDistribution
// +optional
optional string trafficDistribution = 23;
}
@@ -6526,9 +6564,10 @@ message Toleration {
optional string key = 1;
// Operator represents a key's relationship to the value.
- // Valid operators are Exists and Equal. Defaults to Equal.
+ // Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
+ // Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).
// +optional
optional string operator = 2;
@@ -6800,8 +6839,6 @@ message VolumeMount {
// None (or be unspecified, which defaults to None).
//
// If this field is not specified, it is treated as an equivalent of Disabled.
- //
- // +featureGate=RecursiveReadOnlyMounts
// +optional
optional string recursiveReadOnly = 7;
@@ -6846,7 +6883,6 @@ message VolumeMountStatus {
// RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts).
// An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled,
// depending on the mount result.
- // +featureGate=RecursiveReadOnlyMounts
// +optional
optional string recursiveReadOnly = 4;
}
@@ -6929,7 +6965,8 @@ message VolumeProjection {
// issues; consult the signer implementation's documentation to learn how to
// use the certificates it issues.
//
- // +featureGate=PodCertificateProjection +optional
+ // +featureGate=PodCertificateProjection
+ // +optional
optional PodCertificateProjection podCertificate = 6;
}
@@ -7212,3 +7249,33 @@ message WindowsSecurityContextOptions {
optional bool hostProcess = 4;
}
+// WorkloadReference identifies the Workload object and PodGroup membership
+// that a Pod belongs to. The scheduler uses this information to apply
+// workload-aware scheduling semantics.
+message WorkloadReference {
+ // Name defines the name of the Workload object this Pod belongs to.
+ // Workload must be in the same namespace as the Pod.
+ // If it doesn't match any existing Workload, the Pod will remain unschedulable
+ // until a Workload object is created and observed by the kube-scheduler.
+ // It must be a DNS subdomain.
+ //
+ // +required
+ optional string name = 1;
+
+ // PodGroup is the name of the PodGroup within the Workload that this Pod
+ // belongs to. If it doesn't match any existing PodGroup within the Workload,
+ // the Pod will remain unschedulable until the Workload object is recreated
+ // and observed by the kube-scheduler. It must be a DNS label.
+ //
+ // +required
+ optional string podGroup = 2;
+
+ // PodGroupReplicaKey specifies the replica key of the PodGroup to which this
+ // Pod belongs. It is used to distinguish pods belonging to different replicas
+ // of the same pod group. The pod group policy is applied separately to each replica.
+ // When set, it must be a DNS label.
+ //
+ // +optional
+ optional string podGroupReplicaKey = 3;
+}
+
diff --git a/operator/vendor/k8s.io/api/core/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/core/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..cf74ffa0
--- /dev/null
+++ b/operator/vendor/k8s.io/api/core/v1/generated.protomessage.pb.go
@@ -0,0 +1,498 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {}
+
+func (*Affinity) ProtoMessage() {}
+
+func (*AppArmorProfile) ProtoMessage() {}
+
+func (*AttachedVolume) ProtoMessage() {}
+
+func (*AvoidPods) ProtoMessage() {}
+
+func (*AzureDiskVolumeSource) ProtoMessage() {}
+
+func (*AzureFilePersistentVolumeSource) ProtoMessage() {}
+
+func (*AzureFileVolumeSource) ProtoMessage() {}
+
+func (*Binding) ProtoMessage() {}
+
+func (*CSIPersistentVolumeSource) ProtoMessage() {}
+
+func (*CSIVolumeSource) ProtoMessage() {}
+
+func (*Capabilities) ProtoMessage() {}
+
+func (*CephFSPersistentVolumeSource) ProtoMessage() {}
+
+func (*CephFSVolumeSource) ProtoMessage() {}
+
+func (*CinderPersistentVolumeSource) ProtoMessage() {}
+
+func (*CinderVolumeSource) ProtoMessage() {}
+
+func (*ClientIPConfig) ProtoMessage() {}
+
+func (*ClusterTrustBundleProjection) ProtoMessage() {}
+
+func (*ComponentCondition) ProtoMessage() {}
+
+func (*ComponentStatus) ProtoMessage() {}
+
+func (*ComponentStatusList) ProtoMessage() {}
+
+func (*ConfigMap) ProtoMessage() {}
+
+func (*ConfigMapEnvSource) ProtoMessage() {}
+
+func (*ConfigMapKeySelector) ProtoMessage() {}
+
+func (*ConfigMapList) ProtoMessage() {}
+
+func (*ConfigMapNodeConfigSource) ProtoMessage() {}
+
+func (*ConfigMapProjection) ProtoMessage() {}
+
+func (*ConfigMapVolumeSource) ProtoMessage() {}
+
+func (*Container) ProtoMessage() {}
+
+func (*ContainerExtendedResourceRequest) ProtoMessage() {}
+
+func (*ContainerImage) ProtoMessage() {}
+
+func (*ContainerPort) ProtoMessage() {}
+
+func (*ContainerResizePolicy) ProtoMessage() {}
+
+func (*ContainerRestartRule) ProtoMessage() {}
+
+func (*ContainerRestartRuleOnExitCodes) ProtoMessage() {}
+
+func (*ContainerState) ProtoMessage() {}
+
+func (*ContainerStateRunning) ProtoMessage() {}
+
+func (*ContainerStateTerminated) ProtoMessage() {}
+
+func (*ContainerStateWaiting) ProtoMessage() {}
+
+func (*ContainerStatus) ProtoMessage() {}
+
+func (*ContainerUser) ProtoMessage() {}
+
+func (*DaemonEndpoint) ProtoMessage() {}
+
+func (*DownwardAPIProjection) ProtoMessage() {}
+
+func (*DownwardAPIVolumeFile) ProtoMessage() {}
+
+func (*DownwardAPIVolumeSource) ProtoMessage() {}
+
+func (*EmptyDirVolumeSource) ProtoMessage() {}
+
+func (*EndpointAddress) ProtoMessage() {}
+
+func (*EndpointPort) ProtoMessage() {}
+
+func (*EndpointSubset) ProtoMessage() {}
+
+func (*Endpoints) ProtoMessage() {}
+
+func (*EndpointsList) ProtoMessage() {}
+
+func (*EnvFromSource) ProtoMessage() {}
+
+func (*EnvVar) ProtoMessage() {}
+
+func (*EnvVarSource) ProtoMessage() {}
+
+func (*EphemeralContainer) ProtoMessage() {}
+
+func (*EphemeralContainerCommon) ProtoMessage() {}
+
+func (*EphemeralVolumeSource) ProtoMessage() {}
+
+func (*Event) ProtoMessage() {}
+
+func (*EventList) ProtoMessage() {}
+
+func (*EventSeries) ProtoMessage() {}
+
+func (*EventSource) ProtoMessage() {}
+
+func (*ExecAction) ProtoMessage() {}
+
+func (*FCVolumeSource) ProtoMessage() {}
+
+func (*FileKeySelector) ProtoMessage() {}
+
+func (*FlexPersistentVolumeSource) ProtoMessage() {}
+
+func (*FlexVolumeSource) ProtoMessage() {}
+
+func (*FlockerVolumeSource) ProtoMessage() {}
+
+func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
+
+func (*GRPCAction) ProtoMessage() {}
+
+func (*GitRepoVolumeSource) ProtoMessage() {}
+
+func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
+
+func (*GlusterfsVolumeSource) ProtoMessage() {}
+
+func (*HTTPGetAction) ProtoMessage() {}
+
+func (*HTTPHeader) ProtoMessage() {}
+
+func (*HostAlias) ProtoMessage() {}
+
+func (*HostIP) ProtoMessage() {}
+
+func (*HostPathVolumeSource) ProtoMessage() {}
+
+func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
+
+func (*ISCSIVolumeSource) ProtoMessage() {}
+
+func (*ImageVolumeSource) ProtoMessage() {}
+
+func (*KeyToPath) ProtoMessage() {}
+
+func (*Lifecycle) ProtoMessage() {}
+
+func (*LifecycleHandler) ProtoMessage() {}
+
+func (*LimitRange) ProtoMessage() {}
+
+func (*LimitRangeItem) ProtoMessage() {}
+
+func (*LimitRangeList) ProtoMessage() {}
+
+func (*LimitRangeSpec) ProtoMessage() {}
+
+func (*LinuxContainerUser) ProtoMessage() {}
+
+func (*List) ProtoMessage() {}
+
+func (*LoadBalancerIngress) ProtoMessage() {}
+
+func (*LoadBalancerStatus) ProtoMessage() {}
+
+func (*LocalObjectReference) ProtoMessage() {}
+
+func (*LocalVolumeSource) ProtoMessage() {}
+
+func (*ModifyVolumeStatus) ProtoMessage() {}
+
+func (*NFSVolumeSource) ProtoMessage() {}
+
+func (*Namespace) ProtoMessage() {}
+
+func (*NamespaceCondition) ProtoMessage() {}
+
+func (*NamespaceList) ProtoMessage() {}
+
+func (*NamespaceSpec) ProtoMessage() {}
+
+func (*NamespaceStatus) ProtoMessage() {}
+
+func (*Node) ProtoMessage() {}
+
+func (*NodeAddress) ProtoMessage() {}
+
+func (*NodeAffinity) ProtoMessage() {}
+
+func (*NodeCondition) ProtoMessage() {}
+
+func (*NodeConfigSource) ProtoMessage() {}
+
+func (*NodeConfigStatus) ProtoMessage() {}
+
+func (*NodeDaemonEndpoints) ProtoMessage() {}
+
+func (*NodeFeatures) ProtoMessage() {}
+
+func (*NodeList) ProtoMessage() {}
+
+func (*NodeProxyOptions) ProtoMessage() {}
+
+func (*NodeRuntimeHandler) ProtoMessage() {}
+
+func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
+
+func (*NodeSelector) ProtoMessage() {}
+
+func (*NodeSelectorRequirement) ProtoMessage() {}
+
+func (*NodeSelectorTerm) ProtoMessage() {}
+
+func (*NodeSpec) ProtoMessage() {}
+
+func (*NodeStatus) ProtoMessage() {}
+
+func (*NodeSwapStatus) ProtoMessage() {}
+
+func (*NodeSystemInfo) ProtoMessage() {}
+
+func (*ObjectFieldSelector) ProtoMessage() {}
+
+func (*ObjectReference) ProtoMessage() {}
+
+func (*PersistentVolume) ProtoMessage() {}
+
+func (*PersistentVolumeClaim) ProtoMessage() {}
+
+func (*PersistentVolumeClaimCondition) ProtoMessage() {}
+
+func (*PersistentVolumeClaimList) ProtoMessage() {}
+
+func (*PersistentVolumeClaimSpec) ProtoMessage() {}
+
+func (*PersistentVolumeClaimStatus) ProtoMessage() {}
+
+func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
+
+func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
+
+func (*PersistentVolumeList) ProtoMessage() {}
+
+func (*PersistentVolumeSource) ProtoMessage() {}
+
+func (*PersistentVolumeSpec) ProtoMessage() {}
+
+func (*PersistentVolumeStatus) ProtoMessage() {}
+
+func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
+
+func (*Pod) ProtoMessage() {}
+
+func (*PodAffinity) ProtoMessage() {}
+
+func (*PodAffinityTerm) ProtoMessage() {}
+
+func (*PodAntiAffinity) ProtoMessage() {}
+
+func (*PodAttachOptions) ProtoMessage() {}
+
+func (*PodCertificateProjection) ProtoMessage() {}
+
+func (*PodCondition) ProtoMessage() {}
+
+func (*PodDNSConfig) ProtoMessage() {}
+
+func (*PodDNSConfigOption) ProtoMessage() {}
+
+func (*PodExecOptions) ProtoMessage() {}
+
+func (*PodExtendedResourceClaimStatus) ProtoMessage() {}
+
+func (*PodIP) ProtoMessage() {}
+
+func (*PodList) ProtoMessage() {}
+
+func (*PodLogOptions) ProtoMessage() {}
+
+func (*PodOS) ProtoMessage() {}
+
+func (*PodPortForwardOptions) ProtoMessage() {}
+
+func (*PodProxyOptions) ProtoMessage() {}
+
+func (*PodReadinessGate) ProtoMessage() {}
+
+func (*PodResourceClaim) ProtoMessage() {}
+
+func (*PodResourceClaimStatus) ProtoMessage() {}
+
+func (*PodSchedulingGate) ProtoMessage() {}
+
+func (*PodSecurityContext) ProtoMessage() {}
+
+func (*PodSignature) ProtoMessage() {}
+
+func (*PodSpec) ProtoMessage() {}
+
+func (*PodStatus) ProtoMessage() {}
+
+func (*PodStatusResult) ProtoMessage() {}
+
+func (*PodTemplate) ProtoMessage() {}
+
+func (*PodTemplateList) ProtoMessage() {}
+
+func (*PodTemplateSpec) ProtoMessage() {}
+
+func (*PortStatus) ProtoMessage() {}
+
+func (*PortworxVolumeSource) ProtoMessage() {}
+
+func (*Preconditions) ProtoMessage() {}
+
+func (*PreferAvoidPodsEntry) ProtoMessage() {}
+
+func (*PreferredSchedulingTerm) ProtoMessage() {}
+
+func (*Probe) ProtoMessage() {}
+
+func (*ProbeHandler) ProtoMessage() {}
+
+func (*ProjectedVolumeSource) ProtoMessage() {}
+
+func (*QuobyteVolumeSource) ProtoMessage() {}
+
+func (*RBDPersistentVolumeSource) ProtoMessage() {}
+
+func (*RBDVolumeSource) ProtoMessage() {}
+
+func (*RangeAllocation) ProtoMessage() {}
+
+func (*ReplicationController) ProtoMessage() {}
+
+func (*ReplicationControllerCondition) ProtoMessage() {}
+
+func (*ReplicationControllerList) ProtoMessage() {}
+
+func (*ReplicationControllerSpec) ProtoMessage() {}
+
+func (*ReplicationControllerStatus) ProtoMessage() {}
+
+func (*ResourceClaim) ProtoMessage() {}
+
+func (*ResourceFieldSelector) ProtoMessage() {}
+
+func (*ResourceHealth) ProtoMessage() {}
+
+func (*ResourceQuota) ProtoMessage() {}
+
+func (*ResourceQuotaList) ProtoMessage() {}
+
+func (*ResourceQuotaSpec) ProtoMessage() {}
+
+func (*ResourceQuotaStatus) ProtoMessage() {}
+
+func (*ResourceRequirements) ProtoMessage() {}
+
+func (*ResourceStatus) ProtoMessage() {}
+
+func (*SELinuxOptions) ProtoMessage() {}
+
+func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
+
+func (*ScaleIOVolumeSource) ProtoMessage() {}
+
+func (*ScopeSelector) ProtoMessage() {}
+
+func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
+
+func (*SeccompProfile) ProtoMessage() {}
+
+func (*Secret) ProtoMessage() {}
+
+func (*SecretEnvSource) ProtoMessage() {}
+
+func (*SecretKeySelector) ProtoMessage() {}
+
+func (*SecretList) ProtoMessage() {}
+
+func (*SecretProjection) ProtoMessage() {}
+
+func (*SecretReference) ProtoMessage() {}
+
+func (*SecretVolumeSource) ProtoMessage() {}
+
+func (*SecurityContext) ProtoMessage() {}
+
+func (*SerializedReference) ProtoMessage() {}
+
+func (*Service) ProtoMessage() {}
+
+func (*ServiceAccount) ProtoMessage() {}
+
+func (*ServiceAccountList) ProtoMessage() {}
+
+func (*ServiceAccountTokenProjection) ProtoMessage() {}
+
+func (*ServiceList) ProtoMessage() {}
+
+func (*ServicePort) ProtoMessage() {}
+
+func (*ServiceProxyOptions) ProtoMessage() {}
+
+func (*ServiceSpec) ProtoMessage() {}
+
+func (*ServiceStatus) ProtoMessage() {}
+
+func (*SessionAffinityConfig) ProtoMessage() {}
+
+func (*SleepAction) ProtoMessage() {}
+
+func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
+
+func (*StorageOSVolumeSource) ProtoMessage() {}
+
+func (*Sysctl) ProtoMessage() {}
+
+func (*TCPSocketAction) ProtoMessage() {}
+
+func (*Taint) ProtoMessage() {}
+
+func (*Toleration) ProtoMessage() {}
+
+func (*TopologySelectorLabelRequirement) ProtoMessage() {}
+
+func (*TopologySelectorTerm) ProtoMessage() {}
+
+func (*TopologySpreadConstraint) ProtoMessage() {}
+
+func (*TypedLocalObjectReference) ProtoMessage() {}
+
+func (*TypedObjectReference) ProtoMessage() {}
+
+func (*Volume) ProtoMessage() {}
+
+func (*VolumeDevice) ProtoMessage() {}
+
+func (*VolumeMount) ProtoMessage() {}
+
+func (*VolumeMountStatus) ProtoMessage() {}
+
+func (*VolumeNodeAffinity) ProtoMessage() {}
+
+func (*VolumeProjection) ProtoMessage() {}
+
+func (*VolumeResourceRequirements) ProtoMessage() {}
+
+func (*VolumeSource) ProtoMessage() {}
+
+func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
+
+func (*WeightedPodAffinityTerm) ProtoMessage() {}
+
+func (*WindowsSecurityContextOptions) ProtoMessage() {}
+
+func (*WorkloadReference) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/core/v1/toleration.go b/operator/vendor/k8s.io/api/core/v1/toleration.go
index e803d518..080c228d 100644
--- a/operator/vendor/k8s.io/api/core/v1/toleration.go
+++ b/operator/vendor/k8s.io/api/core/v1/toleration.go
@@ -16,6 +16,16 @@ limitations under the License.
package v1
+import (
+ "errors"
+ "strconv"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/validate/content"
+
+ "k8s.io/klog/v2"
+)
+
// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by ,
// if the two tolerations have same combination, regard as they match.
// TODO: uniqueness check for tolerations in api validations.
@@ -35,7 +45,11 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
// 3. Empty toleration.key means to match all taint keys.
// If toleration.key is empty, toleration.operator must be 'Exists';
// this combination means to match all taint values and all taint keys.
-func (t *Toleration) ToleratesTaint(taint *Taint) bool {
+// 4. If toleration.operator is 'Lt' or 'Gt', numeric comparison is performed
+// between toleration.value and taint.value.
+// 5. If enableComparisonOperators is false and the toleration uses 'Lt' or 'Gt'
+// operators, the toleration does not match (returns false).
+func (t *Toleration) ToleratesTaint(logger klog.Logger, taint *Taint, enableComparisonOperators bool) bool {
if len(t.Effect) > 0 && t.Effect != taint.Effect {
return false
}
@@ -51,6 +65,47 @@ func (t *Toleration) ToleratesTaint(taint *Taint) bool {
return t.Value == taint.Value
case TolerationOpExists:
return true
+ case TolerationOpLt, TolerationOpGt:
+ // If comparison operators are disabled, this toleration doesn't match
+ if !enableComparisonOperators {
+ return false
+ }
+ return compareNumericValues(logger, t.Value, taint.Value, t.Operator)
+ default:
+ return false
+ }
+}
+
+// compareNumericValues performs numeric comparison between toleration and taint values
+func compareNumericValues(logger klog.Logger, tolerationVal, taintVal string, op TolerationOperator) bool {
+
+ errorMsgs := content.IsDecimalInteger(tolerationVal)
+ if len(errorMsgs) > 0 {
+ logger.Error(errors.New(strings.Join(errorMsgs, ",")), "failed to parse toleration value as int64", "toleration", tolerationVal)
+ return false
+ }
+ tVal, err := strconv.ParseInt(tolerationVal, 10, 64)
+ if err != nil {
+ logger.Error(err, "failed to parse toleration value as int64", "toleration", tolerationVal)
+ return false
+ }
+
+ errorMsgs = content.IsDecimalInteger(taintVal)
+ if len(errorMsgs) > 0 {
+ logger.Error(errors.New(strings.Join(errorMsgs, ",")), "failed to parse taint value as int64", "taint", taintVal)
+ return false
+ }
+ tntVal, err := strconv.ParseInt(taintVal, 10, 64)
+ if err != nil {
+ logger.Error(err, "failed to parse taint value as int64", "taint", taintVal)
+ return false
+ }
+
+ switch op {
+ case TolerationOpLt:
+ return tntVal < tVal
+ case TolerationOpGt:
+ return tntVal > tVal
default:
return false
}
diff --git a/operator/vendor/k8s.io/api/core/v1/types.go b/operator/vendor/k8s.io/api/core/v1/types.go
index 08b6d351..705c8208 100644
--- a/operator/vendor/k8s.io/api/core/v1/types.go
+++ b/operator/vendor/k8s.io/api/core/v1/types.go
@@ -427,6 +427,7 @@ type PersistentVolumeSpec struct {
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
+ // This field is mutable if MutablePVNodeAffinity feature gate is enabled.
// +optional
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
// Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
@@ -558,7 +559,7 @@ type PersistentVolumeClaimSpec struct {
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// resources represents the minimum resources the volume should have.
- // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ // Users are allowed to specify resource requirements
// that are lower than previous value but must still be higher than capacity recorded in the
// status field of the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
@@ -648,7 +649,7 @@ type TypedObjectReference struct {
// Valid values are:
// - "Resizing", "FileSystemResizePending"
//
-// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected:
+// The following additional values can be expected:
// - "ControllerResizeError", "NodeResizeError"
//
// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected:
@@ -796,9 +797,6 @@ type PersistentVolumeClaimStatus struct {
// should ignore the update for the purpose it was designed. For example - a controller that
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
// resources associated with PVC.
- //
- // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
- // +featureGate=RecoverVolumeExpansionFailure
// +optional
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
@@ -838,9 +836,6 @@ type PersistentVolumeClaimStatus struct {
// should ignore the update for the purpose it was designed. For example - a controller that
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
// resources associated with PVC.
- //
- // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
- // +featureGate=RecoverVolumeExpansionFailure
// +mapType=granular
// +optional
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
@@ -2056,6 +2051,21 @@ type PodCertificateProjection struct {
//
// +optional
CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"`
+
+ // userAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // These values are copied verbatim into the `spec.unverifiedUserAnnotations` field of
+ // the PodCertificateRequest objects that Kubelet creates.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ UserAnnotations map[string]string `json:"userAnnotations,omitempty" protobuf:"bytes,7,rep,name=userAnnotations"`
}
// Represents a projected volume source
@@ -2144,7 +2154,8 @@ type VolumeProjection struct {
// issues; consult the signer implementation's documentation to learn how to
// use the certificates it issues.
//
- // +featureGate=PodCertificateProjection +optional
+ // +featureGate=PodCertificateProjection
+ // +optional
PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"`
}
@@ -2379,8 +2390,6 @@ type VolumeMount struct {
// None (or be unspecified, which defaults to None).
//
// If this field is not specified, it is treated as an equivalent of Disabled.
- //
- // +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,7,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"`
// Path within the container at which the volume should be mounted. Must
@@ -2965,6 +2974,7 @@ type Container struct {
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Resources resize policy for the container.
+ // This field cannot be set on ephemeral containers.
// +featureGate=InPlacePodVerticalScaling
// +optional
// +listType=atomic
@@ -2984,7 +2994,6 @@ type Container struct {
// container. Instead, the next init container starts immediately after this
// init container is started, or after any startupProbe has successfully
// completed.
- // +featureGate=SidecarContainers
// +optional
RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
// Represents a list of rules to be checked to determine if the
@@ -3128,7 +3137,6 @@ type LifecycleHandler struct {
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
// Sleep represents a duration that the container should sleep.
- // +featureGate=PodLifecycleSleepAction
// +optional
Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
}
@@ -3369,7 +3377,6 @@ type ContainerStatus struct {
// +patchStrategy=merge
// +listType=map
// +listMapKey=mountPath
- // +featureGate=RecursiveReadOnlyMounts
VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
// User represents user identity information initially attached to the first process of the container
// +featureGate=SupplementalGroupsPolicy
@@ -3523,6 +3530,8 @@ const (
// If both PodResizePending and PodResizeInProgress are set, it means that a new resize was
// requested in the middle of a previous pod resize that is still in progress.
PodResizeInProgress PodConditionType = "PodResizeInProgress"
+ // AllContainersRestarting indicates that all containers of the pod is being restarted.
+ AllContainersRestarting PodConditionType = "AllContainersRestarting"
)
// These are reasons for a pod's transition to a condition.
@@ -3566,7 +3575,7 @@ type PodCondition struct {
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// If set, this represents the .metadata.generation that the pod condition was set based upon.
- // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,7,opt,name=observedGeneration"`
@@ -3612,7 +3621,6 @@ type VolumeMountStatus struct {
// RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts).
// An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled,
// depending on the mount result.
- // +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,4,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"`
}
@@ -3660,7 +3668,8 @@ type ContainerRestartRuleAction string
// The only valid action is Restart.
const (
- ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart"
+ ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart"
+ ContainerRestartRuleActionRestartAllContainers ContainerRestartRuleAction = "RestartAllContainers"
)
// ContainerRestartRuleOnExitCodes describes the condition
@@ -4048,9 +4057,10 @@ type Toleration struct {
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
- // Valid operators are Exists and Equal. Defaults to Equal.
+ // Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
+ // Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
@@ -4076,6 +4086,8 @@ type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
+ TolerationOpLt TolerationOperator = "Lt"
+ TolerationOpGt TolerationOperator = "Gt"
)
// PodReadinessGate contains the reference to a pod condition
@@ -4388,8 +4400,8 @@ type PodSpec struct {
// will be made available to those containers which consume them
// by name.
//
- // This is an alpha field and requires enabling the
- // DynamicResourceAllocation feature gate.
+ // This is a stable field but requires that the
+ // DynamicResourceAllocation feature gate is enabled.
//
// This field is immutable.
//
@@ -4428,6 +4440,17 @@ type PodSpec struct {
// +featureGate=HostnameOverride
// +optional
HostnameOverride *string `json:"hostnameOverride,omitempty" protobuf:"bytes,41,opt,name=hostnameOverride"`
+ // WorkloadRef provides a reference to the Workload object that this Pod belongs to.
+ // This field is used by the scheduler to identify the PodGroup and apply the
+ // correct group scheduling policies. The Workload object referenced
+ // by this field may not exist at the time the Pod is created.
+ // This field is immutable, but a Workload object with the same name
+ // may be recreated with different policies. Doing this during pod scheduling
+ // may result in the placement not conforming to the expected policies.
+ //
+ // +featureGate=GenericWorkload
+ // +optional
+ WorkloadRef *WorkloadReference `json:"workloadRef,omitempty" protobuf:"bytes,42,opt,name=workloadRef"`
}
// PodResourceClaim references exactly one ResourceClaim, either directly
@@ -4539,6 +4562,36 @@ type PodSchedulingGate struct {
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
}
+// WorkloadReference identifies the Workload object and PodGroup membership
+// that a Pod belongs to. The scheduler uses this information to apply
+// workload-aware scheduling semantics.
+type WorkloadReference struct {
+ // Name defines the name of the Workload object this Pod belongs to.
+ // Workload must be in the same namespace as the Pod.
+ // If it doesn't match any existing Workload, the Pod will remain unschedulable
+ // until a Workload object is created and observed by the kube-scheduler.
+ // It must be a DNS subdomain.
+ //
+ // +required
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // PodGroup is the name of the PodGroup within the Workload that this Pod
+ // belongs to. If it doesn't match any existing PodGroup within the Workload,
+ // the Pod will remain unschedulable until the Workload object is recreated
+ // and observed by the kube-scheduler. It must be a DNS label.
+ //
+ // +required
+ PodGroup string `json:"podGroup" protobuf:"bytes,2,opt,name=podGroup"`
+
+ // PodGroupReplicaKey specifies the replica key of the PodGroup to which this
+ // Pod belongs. It is used to distinguish pods belonging to different replicas
+ // of the same pod group. The pod group policy is applied separately to each replica.
+ // When set, it must be a DNS label.
+ //
+ // +optional
+ PodGroupReplicaKey string `json:"podGroupReplicaKey,omitempty" protobuf:"bytes,3,opt,name=podGroupReplicaKey"`
+}
+
// +enum
type UnsatisfiableConstraintAction string
@@ -5074,7 +5127,6 @@ type EphemeralContainerCommon struct {
// Restart policy for the container to manage the restart behavior of each
// container within a pod.
// You cannot set this field on ephemeral containers.
- // +featureGate=SidecarContainers
// +optional
RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
// Represents a list of rules to be checked to determine if the
@@ -5198,7 +5250,7 @@ type EphemeralContainer struct {
// plane.
type PodStatus struct {
// If set, this represents the .metadata.generation that the pod status was set based upon.
- // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,17,opt,name=observedGeneration"`
@@ -5348,6 +5400,20 @@ type PodStatus struct {
// +featureGate=DRAExtendedResource
// +optional
ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus `json:"extendedResourceClaimStatus,omitempty" protobuf:"bytes,18,opt,name=extendedResourceClaimStatus"`
+
+ // AllocatedResources is the total requests allocated for this pod by the node.
+ // If pod-level requests are not set, this will be the total requests aggregated
+ // across containers in the pod.
+ // +featureGate=InPlacePodLevelResourcesVerticalScaling
+ // +optional
+ AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,19,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
+
+ // Resources represents the compute resource requests and limits that have been
+ // applied at the pod level if pod-level requests or limits are set in
+ // PodSpec.Resources
+ // +featureGate=InPlacePodLevelResourcesVerticalScaling
+ // +optional
+ Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,20,opt,name=resources"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -5578,6 +5644,8 @@ type ReplicationController struct {
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
@@ -5678,8 +5746,11 @@ const (
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local"
)
-// for backwards compat
+// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they
+// receive on the ClusterIP.
// +enum
+//
+// Deprecated: use ServiceInternalTrafficPolicy instead.
type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy
// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they
@@ -5698,8 +5769,12 @@ const (
ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local"
)
-// for backwards compat
+// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they
+// receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs,
+// and LoadBalancer IPs.
// +enum
+//
+// Deprecated: use ServiceExternalTrafficPolicy instead.
type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy
const (
@@ -5709,27 +5784,25 @@ const (
// These are valid values for the TrafficDistribution field of a Service.
const (
- // Indicates a preference for routing traffic to endpoints that are in the same
- // zone as the client. Users should not set this value unless they have ensured
- // that clients and endpoints are distributed in such a way that the "same zone"
- // preference will not result in endpoints getting overloaded.
- ServiceTrafficDistributionPreferClose = "PreferClose"
-
- // Indicates a preference for routing traffic to endpoints that are in the same
- // zone as the client. Users should not set this value unless they have ensured
- // that clients and endpoints are distributed in such a way that the "same zone"
- // preference will not result in endpoints getting overloaded.
- // This is an alias for "PreferClose", but it is an Alpha feature and is only
- // recognized if the PreferSameTrafficDistribution feature gate is enabled.
+ // ServiceTrafficDistributionPreferSameZone indicates a preference for routing
+ // traffic to endpoints that are in the same zone as the client. Users should only
+ // set this value if they have ensured that clients and endpoints are distributed
+ // in such a way that the "same zone" preference will not result in endpoints
+ // getting overloaded.
ServiceTrafficDistributionPreferSameZone = "PreferSameZone"
- // Indicates a preference for routing traffic to endpoints that are on the same
- // node as the client. Users should not set this value unless they have ensured
- // that clients and endpoints are distributed in such a way that the "same node"
- // preference will not result in endpoints getting overloaded.
- // This is an Alpha feature and is only recognized if the
- // PreferSameTrafficDistribution feature gate is enabled.
+ // ServiceTrafficDistributionPreferSameNode indicates a preference for routing
+ // traffic to endpoints that are on the same node as the client. Users should only
+ // set this value if they have ensured that clients and endpoints are distributed
+ // in such a way that the "same node" preference will not result in endpoints
+ // getting overloaded.
ServiceTrafficDistributionPreferSameNode = "PreferSameNode"
+
+ // ServiceTrafficDistributionPreferClose is the original name of "PreferSameZone".
+ // Despite the generic-sounding name, it has exactly the same meaning as
+ // "PreferSameZone".
+ // Deprecated: use "PreferSameZone" instead.
+ ServiceTrafficDistributionPreferClose = "PreferClose"
)
// These are the valid conditions of a service.
@@ -5833,8 +5906,10 @@ const (
IPFamilyPolicyRequireDualStack IPFamilyPolicy = "RequireDualStack"
)
-// for backwards compat
+// IPFamilyPolicy represents the dual-stack-ness requested or required by a Service
// +enum
+//
+// Deprecated: use IPFamilyPolicy instead.
type IPFamilyPolicyType = IPFamilyPolicy
// ServiceSpec describes the attributes that a user creates on a service.
@@ -6083,7 +6158,6 @@ type ServiceSpec struct {
// field is not set, the implementation will apply its default routing
// strategy. If set to "PreferClose", implementations should prioritize
// endpoints that are in the same zone.
- // +featureGate=ServiceTrafficDistribution
// +optional
TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
}
@@ -6508,7 +6582,6 @@ type NodeDaemonEndpoints struct {
// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
type NodeRuntimeHandlerFeatures struct {
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
- // +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
@@ -6683,7 +6756,6 @@ type NodeStatus struct {
// +optional
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
// The available runtime handlers.
- // +featureGate=RecursiveReadOnlyMounts
// +featureGate=UserNamespacesSupport
// +optional
// +listType=atomic
@@ -6692,6 +6764,11 @@ type NodeStatus struct {
// +featureGate=SupplementalGroupsPolicy
// +optional
Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"`
+ // DeclaredFeatures represents the features related to feature gates that are declared by the node.
+ // +featureGate=NodeDeclaredFeatures
+ // +optional
+ // +listType=atomic
+ DeclaredFeatures []string `json:"declaredFeatures,omitempty" protobuf:"bytes,14,rep,name=declaredFeatures"`
}
type UniqueVolumeName string
@@ -7645,6 +7722,8 @@ const (
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
// resource.k8s.io devices requested with a certain DeviceClass, number
ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
+ // resource.k8s.io devices requested with a certain DeviceClass by implicit extended resource name, number
+ ResourceImplicitExtendedClaimsPerClass string = "requests.deviceclass.resource.kubernetes.io/"
)
// The following identify resource prefix for Kubernetes object types
diff --git a/operator/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 12043076..0f5e44e9 100644
--- a/operator/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -359,7 +359,7 @@ var map_Container = map[string]string{
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source may consist of any printable ASCII characters except '='. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
- "resizePolicy": "Resources resize policy for the container.",
+ "resizePolicy": "Resources resize policy for the container. This field cannot be set on ephemeral containers.",
"restartPolicy": "RestartPolicy defines the restart behavior of individual containers in a pod. This overrides the pod-level restart policy. When this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Additionally, setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
"restartPolicyRules": "Represents a list of rules to be checked to determine if the container should be restarted on exit. The rules are evaluated in order. Once a rule matches a container exit condition, the remaining rules are ignored. If no rule matches the container exit condition, the Container-level restart policy determines the whether the container is restarted or not. Constraints on the rules: - At most 20 rules are allowed. - Rules can have the same action. - Identical rules are not forbidden in validations. When rules are specified, container MUST set RestartPolicy explicitly even it if matches the Pod's RestartPolicy.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
@@ -1363,20 +1363,21 @@ func (NodeSpec) SwaggerDoc() map[string]string {
}
var map_NodeStatus = map[string]string{
- "": "NodeStatus is information about the current status of a node.",
- "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
- "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
- "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
- "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition",
- "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
- "daemonEndpoints": "Endpoints of daemons running on the Node.",
- "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info",
- "images": "List of container images on this node",
- "volumesInUse": "List of attachable volumes in use (mounted) by the node.",
- "volumesAttached": "List of volumes that are attached to the node.",
- "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
- "runtimeHandlers": "The available runtime handlers.",
- "features": "Features describes the set of features implemented by the CRI implementation.",
+ "": "NodeStatus is information about the current status of a node.",
+ "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
+ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
+ "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
+ "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition",
+ "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
+ "daemonEndpoints": "Endpoints of daemons running on the Node.",
+ "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info",
+ "images": "List of container images on this node",
+ "volumesInUse": "List of attachable volumes in use (mounted) by the node.",
+ "volumesAttached": "List of volumes that are attached to the node.",
+ "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.",
+ "runtimeHandlers": "The available runtime handlers.",
+ "features": "Features describes the set of features implemented by the CRI implementation.",
+ "declaredFeatures": "DeclaredFeatures represents the features related to feature gates that are declared by the node.",
}
func (NodeStatus) SwaggerDoc() map[string]string {
@@ -1486,7 +1487,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
"accessModes": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
"selector": "selector is a label query over volumes to consider for binding.",
- "resources": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
+ "resources": "resources represents the minimum resources the volume should have. Users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
"volumeName": "volumeName is the binding reference to the PersistentVolume backing this claim.",
"storageClassName": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
@@ -1505,8 +1506,8 @@ var map_PersistentVolumeClaimStatus = map[string]string{
"accessModes": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
"capacity": "capacity represents the actual resources of the underlying volume.",
"conditions": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
- "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
- "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
+ "allocatedResources": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.",
+ "allocatedResourceStatuses": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.",
"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim",
"modifyVolumeStatus": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted.",
}
@@ -1584,7 +1585,7 @@ var map_PersistentVolumeSpec = map[string]string{
"storageClassName": "storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
"mountOptions": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
- "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
+ "nodeAffinity": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume. This field is mutable if MutablePVNodeAffinity feature gate is enabled.",
"volumeAttributesClassName": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process.",
}
@@ -1680,6 +1681,7 @@ var map_PodCertificateProjection = map[string]string{
"credentialBundlePath": "Write the credential bundle at this path in the projected volume.\n\nThe credential bundle is a single file that contains multiple PEM blocks. The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private key.\n\nThe remaining blocks are CERTIFICATE blocks, containing the issued certificate chain from the signer (leaf and any intermediates).\n\nUsing credentialBundlePath lets your Pod's application code make a single atomic read that retrieves a consistent key and certificate chain. If you project them to separate files, your application code will need to additionally check that the leaf certificate was issued to the key.",
"keyPath": "Write the key at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
"certificateChainPath": "Write the certificate chain at this path in the projected volume.\n\nMost applications should use credentialBundlePath. When using keyPath and certificateChainPath, your application needs to check that the key and leaf certificate are consistent, because it is possible to read the files mid-rotation.",
+ "userAnnotations": "userAnnotations allow pod authors to pass additional information to the signer implementation. Kubernetes does not restrict or validate this metadata in any way.\n\nThese values are copied verbatim into the `spec.unverifiedUserAnnotations` field of the PodCertificateRequest objects that Kubelet creates.\n\nEntries are subject to the same validation as object metadata annotations, with the addition that all keys must be domain-prefixed. No restrictions are placed on values, except an overall size limitation on the entire field.\n\nSigners should document the keys and values they support. Signers should deny requests that contain keys they do not recognize.",
}
func (PodCertificateProjection) SwaggerDoc() map[string]string {
@@ -1689,7 +1691,7 @@ func (PodCertificateProjection) SwaggerDoc() map[string]string {
var map_PodCondition = map[string]string{
"": "PodCondition contains details for the current condition of this pod.",
"type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
- "observedGeneration": "If set, this represents the .metadata.generation that the pod condition was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
+ "observedGeneration": "If set, this represents the .metadata.generation that the pod condition was set based upon. The PodObservedGenerationTracking feature gate must be enabled to use this field.",
"status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"lastProbeTime": "Last time we probed the condition.",
"lastTransitionTime": "Last time the condition transitioned from one status to another.",
@@ -1919,9 +1921,10 @@ var map_PodSpec = map[string]string{
"os": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.resources - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
"hostUsers": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
"schedulingGates": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
- "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
+ "resourceClaims": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is a stable field but requires that the DynamicResourceAllocation feature gate is enabled.\n\nThis field is immutable.",
"resources": "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\", \"memory\" and \"hugepages-\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
"hostnameOverride": "HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod. This field only specifies the pod's hostname and does not affect its DNS records. When this field is set to a non-empty string: - It takes precedence over the values set in `hostname` and `subdomain`. - The Pod's hostname will be set to this value. - `setHostnameAsFQDN` must be nil or set to false. - `hostNetwork` must be set to false.\n\nThis field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters. Requires the HostnameOverride feature gate to be enabled.",
+ "workloadRef": "WorkloadRef provides a reference to the Workload object that this Pod belongs to. This field is used by the scheduler to identify the PodGroup and apply the correct group scheduling policies. The Workload object referenced by this field may not exist at the time the Pod is created. This field is immutable, but a Workload object with the same name may be recreated with different policies. Doing this during pod scheduling may result in the placement not conforming to the expected policies.",
}
func (PodSpec) SwaggerDoc() map[string]string {
@@ -1930,7 +1933,7 @@ func (PodSpec) SwaggerDoc() map[string]string {
var map_PodStatus = map[string]string{
"": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
- "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.",
+ "observedGeneration": "If set, this represents the .metadata.generation that the pod status was set based upon. The PodObservedGenerationTracking feature gate must be enabled to use this field.",
"phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
"conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
"message": "A human readable message indicating details about why the pod is in this condition.",
@@ -1948,6 +1951,8 @@ var map_PodStatus = map[string]string{
"resize": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\" Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress. PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources. PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.",
"resourceClaimStatuses": "Status of resource claims.",
"extendedResourceClaimStatus": "Status of extended resource claim backed by DRA.",
+ "allocatedResources": "AllocatedResources is the total requests allocated for this pod by the node. If pod-level requests are not set, this will be the total requests aggregated across containers in the pod.",
+ "resources": "Resources represents the compute resource requests and limits that have been applied at the pod level if pod-level requests or limits are set in PodSpec.Resources",
}
func (PodStatus) SwaggerDoc() map[string]string {
@@ -2669,7 +2674,7 @@ func (Taint) SwaggerDoc() map[string]string {
var map_Toleration = map[string]string{
"": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .",
"key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.",
- "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
+ "operator": "Operator represents a key's relationship to the value. Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).",
"value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
"effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.",
"tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.",
@@ -2888,4 +2893,15 @@ func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
return map_WindowsSecurityContextOptions
}
+var map_WorkloadReference = map[string]string{
+ "": "WorkloadReference identifies the Workload object and PodGroup membership that a Pod belongs to. The scheduler uses this information to apply workload-aware scheduling semantics.",
+ "name": "Name defines the name of the Workload object this Pod belongs to. Workload must be in the same namespace as the Pod. If it doesn't match any existing Workload, the Pod will remain unschedulable until a Workload object is created and observed by the kube-scheduler. It must be a DNS subdomain.",
+ "podGroup": "PodGroup is the name of the PodGroup within the Workload that this Pod belongs to. If it doesn't match any existing PodGroup within the Workload, the Pod will remain unschedulable until the Workload object is recreated and observed by the kube-scheduler. It must be a DNS label.",
+ "podGroupReplicaKey": "PodGroupReplicaKey specifies the replica key of the PodGroup to which this Pod belongs. It is used to distinguish pods belonging to different replicas of the same pod group. The pod group policy is applied separately to each replica. When set, it must be a DNS label.",
+}
+
+func (WorkloadReference) SwaggerDoc() map[string]string {
+ return map_WorkloadReference
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/operator/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index bcd91bd0..15bc2ee0 100644
--- a/operator/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -3145,6 +3145,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = new(NodeFeatures)
(*in).DeepCopyInto(*out)
}
+ if in.DeclaredFeatures != nil {
+ in, out := &in.DeclaredFeatures, &out.DeclaredFeatures
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -3903,6 +3908,13 @@ func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection)
*out = new(int32)
**out = **in
}
+ if in.UserAnnotations != nil {
+ in, out := &in.UserAnnotations, &out.UserAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
return
}
@@ -4557,6 +4569,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = new(string)
**out = **in
}
+ if in.WorkloadRef != nil {
+ in, out := &in.WorkloadRef, &out.WorkloadRef
+ *out = new(WorkloadReference)
+ **out = **in
+ }
return
}
@@ -4627,6 +4644,18 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
*out = new(PodExtendedResourceClaimStatus)
(*in).DeepCopyInto(*out)
}
+ if in.AllocatedResources != nil {
+ in, out := &in.AllocatedResources, &out.AllocatedResources
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -6844,3 +6873,19 @@ func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptio
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkloadReference) DeepCopyInto(out *WorkloadReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadReference.
+func (in *WorkloadReference) DeepCopy() *WorkloadReference {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkloadReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/operator/vendor/k8s.io/api/core/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/core/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..523bb3a8
--- /dev/null
+++ b/operator/vendor/k8s.io/api/core/v1/zz_generated.model_name.go
@@ -0,0 +1,1212 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AWSElasticBlockStoreVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Affinity) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Affinity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AppArmorProfile) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AppArmorProfile"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AttachedVolume) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AttachedVolume"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AvoidPods) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AvoidPods"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AzureDiskVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AzureDiskVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AzureFilePersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AzureFilePersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AzureFileVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.AzureFileVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Binding) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Binding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.CSIPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.CSIVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Capabilities) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Capabilities"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CephFSPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.CephFSPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CephFSVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.CephFSVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CinderPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.CinderPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CinderVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.CinderVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClientIPConfig) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ClientIPConfig"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterTrustBundleProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ClusterTrustBundleProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ComponentCondition) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ComponentCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ComponentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ComponentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ComponentStatusList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ComponentStatusList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMap) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMap"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMapEnvSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMapEnvSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMapKeySelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMapKeySelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMapList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMapList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMapNodeConfigSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMapNodeConfigSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMapProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMapProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ConfigMapVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ConfigMapVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Container) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Container"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerExtendedResourceRequest) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerExtendedResourceRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerImage) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerImage"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerPort) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerResizePolicy) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerResizePolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerRestartRule) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerRestartRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerRestartRuleOnExitCodes) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerRestartRuleOnExitCodes"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerState) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerState"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerStateRunning) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerStateRunning"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerStateTerminated) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerStateTerminated"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerStateWaiting) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerStateWaiting"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ContainerUser) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ContainerUser"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonEndpoint) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.DaemonEndpoint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DownwardAPIProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.DownwardAPIProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DownwardAPIVolumeFile) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.DownwardAPIVolumeFile"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DownwardAPIVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.DownwardAPIVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EmptyDirVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EmptyDirVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointAddress) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EndpointAddress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointPort) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EndpointPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointSubset) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EndpointSubset"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Endpoints) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Endpoints"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointsList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EndpointsList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EnvFromSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EnvFromSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EnvVar) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EnvVar"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EnvVarSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EnvVarSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EphemeralContainer) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EphemeralContainer"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EphemeralContainerCommon) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EphemeralContainerCommon"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EphemeralVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EphemeralVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Event) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Event"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EventList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventSeries) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EventSeries"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.EventSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecAction) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ExecAction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FCVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.FCVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FileKeySelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.FileKeySelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlexPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.FlexPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlexVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.FlexVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlockerVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.FlockerVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GCEPersistentDiskVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.GCEPersistentDiskVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GRPCAction) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.GRPCAction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GitRepoVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.GitRepoVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GlusterfsPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.GlusterfsPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GlusterfsVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.GlusterfsVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPGetAction) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.HTTPGetAction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPHeader) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.HTTPHeader"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HostAlias) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.HostAlias"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HostIP) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.HostIP"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HostPathVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.HostPathVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ISCSIPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ISCSIPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ISCSIVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ISCSIVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ImageVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ImageVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in KeyToPath) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.KeyToPath"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Lifecycle) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Lifecycle"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LifecycleHandler) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LifecycleHandler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitRange) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LimitRange"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitRangeItem) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LimitRangeItem"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitRangeList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LimitRangeList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitRangeSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LimitRangeSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LinuxContainerUser) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LinuxContainerUser"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in List) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.List"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LoadBalancerIngress) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LoadBalancerIngress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LoadBalancerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LoadBalancerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LocalObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LocalObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LocalVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.LocalVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ModifyVolumeStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ModifyVolumeStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NFSVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NFSVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Namespace) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Namespace"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamespaceCondition) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NamespaceCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamespaceList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NamespaceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamespaceSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NamespaceSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NamespaceStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NamespaceStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Node) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Node"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeAddress) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeAddress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeAffinity) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeAffinity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeCondition) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeConfigSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeConfigSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeConfigStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeConfigStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeDaemonEndpoints) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeDaemonEndpoints"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeFeatures) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeFeatures"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeProxyOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeProxyOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeRuntimeHandler) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeRuntimeHandler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeRuntimeHandlerFeatures) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeRuntimeHandlerFeatures"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeSelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeSelectorRequirement) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeSelectorRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeSelectorTerm) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeSelectorTerm"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeSwapStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeSwapStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NodeSystemInfo) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.NodeSystemInfo"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectFieldSelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ObjectFieldSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolume) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolume"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaim) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaimCondition) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaimCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaimList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaimList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaimSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaimSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaimStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaimStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaimTemplate) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaimTemplate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeClaimVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PersistentVolumeStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PersistentVolumeStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PhotonPersistentDiskVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Pod) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Pod"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodAffinity) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodAffinity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodAffinityTerm) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodAffinityTerm"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodAntiAffinity) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodAntiAffinity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodAttachOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodAttachOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodCertificateProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodCertificateProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodCondition) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDNSConfig) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodDNSConfig"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDNSConfigOption) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodDNSConfigOption"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodExecOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodExecOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodExtendedResourceClaimStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodExtendedResourceClaimStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodIP) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodIP"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodLogOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodLogOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodOS) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodOS"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodPortForwardOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodPortForwardOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodProxyOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodProxyOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodReadinessGate) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodReadinessGate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodResourceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodResourceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodResourceClaimStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodResourceClaimStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodSchedulingGate) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodSchedulingGate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodSecurityContext) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodSecurityContext"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodSignature) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodSignature"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodStatusResult) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodStatusResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodTemplate) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodTemplate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodTemplateList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodTemplateList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodTemplateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PodTemplateSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PortStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PortStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PortworxVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PortworxVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Preconditions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Preconditions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PreferAvoidPodsEntry) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PreferAvoidPodsEntry"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PreferredSchedulingTerm) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.PreferredSchedulingTerm"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Probe) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Probe"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ProbeHandler) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ProbeHandler"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ProjectedVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ProjectedVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QuobyteVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.QuobyteVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RBDPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.RBDPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RBDVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.RBDVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RangeAllocation) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.RangeAllocation"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicationController) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ReplicationController"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicationControllerCondition) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ReplicationControllerCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicationControllerList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ReplicationControllerList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicationControllerSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ReplicationControllerSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicationControllerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ReplicationControllerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceFieldSelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceFieldSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceHealth) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceHealth"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceQuota) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceQuota"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceQuotaList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceQuotaList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceQuotaSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceQuotaSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceQuotaStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceQuotaStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceRequirements) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceRequirements"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ResourceStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SELinuxOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SELinuxOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleIOPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ScaleIOPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleIOVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ScaleIOVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScopeSelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ScopeSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScopedResourceSelectorRequirement) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ScopedResourceSelectorRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SeccompProfile) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SeccompProfile"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Secret) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Secret"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecretEnvSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecretEnvSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecretKeySelector) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecretKeySelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecretList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecretList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecretProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecretProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecretReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecretReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecretVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecretVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SecurityContext) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SecurityContext"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SerializedReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SerializedReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Service) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Service"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccount) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceAccount"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccountList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceAccountList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccountTokenProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceAccountTokenProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceList) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServicePort) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServicePort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceProxyOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceProxyOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceSpec) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.ServiceStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SessionAffinityConfig) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SessionAffinityConfig"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SleepAction) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.SleepAction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageOSPersistentVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.StorageOSPersistentVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageOSVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.StorageOSVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Sysctl) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Sysctl"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TCPSocketAction) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.TCPSocketAction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Taint) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Taint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Toleration) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Toleration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TopologySelectorLabelRequirement) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.TopologySelectorLabelRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TopologySelectorTerm) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.TopologySelectorTerm"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TopologySpreadConstraint) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.TopologySpreadConstraint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypedLocalObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.TypedLocalObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypedObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.TypedObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Volume) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.Volume"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeDevice) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeDevice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeMount) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeMount"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeMountStatus) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeMountStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeNodeAffinity) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeNodeAffinity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeProjection) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeProjection"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeResourceRequirements) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeResourceRequirements"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VsphereVirtualDiskVolumeSource) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WeightedPodAffinityTerm) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.WeightedPodAffinityTerm"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WindowsSecurityContextOptions) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.WindowsSecurityContextOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WorkloadReference) OpenAPIModelName() string {
+ return "io.k8s.api.core.v1.WorkloadReference"
+}
diff --git a/operator/vendor/k8s.io/api/discovery/v1/doc.go b/operator/vendor/k8s.io/api/discovery/v1/doc.go
index 43e30b7f..2f4d40a4 100644
--- a/operator/vendor/k8s.io/api/discovery/v1/doc.go
+++ b/operator/vendor/k8s.io/api/discovery/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.discovery.v1
+
// +groupName=discovery.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/discovery/v1/generated.pb.go b/operator/vendor/k8s.io/api/discovery/v1/generated.pb.go
index 443ff8f8..39c1db71 100644
--- a/operator/vendor/k8s.io/api/discovery/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/discovery/v1/generated.pb.go
@@ -23,329 +23,31 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Endpoint) Reset() { *m = Endpoint{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
-func (m *Endpoint) Reset() { *m = Endpoint{} }
-func (*Endpoint) ProtoMessage() {}
-func (*Endpoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{0}
-}
-func (m *Endpoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Endpoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Endpoint.Merge(m, src)
-}
-func (m *Endpoint) XXX_Size() int {
- return m.Size()
-}
-func (m *Endpoint) XXX_DiscardUnknown() {
- xxx_messageInfo_Endpoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Endpoint proto.InternalMessageInfo
-
-func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
-func (*EndpointConditions) ProtoMessage() {}
-func (*EndpointConditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{1}
-}
-func (m *EndpointConditions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointConditions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointConditions.Merge(m, src)
-}
-func (m *EndpointConditions) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointConditions) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointConditions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo
-
-func (m *EndpointHints) Reset() { *m = EndpointHints{} }
-func (*EndpointHints) ProtoMessage() {}
-func (*EndpointHints) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{2}
-}
-func (m *EndpointHints) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointHints) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointHints.Merge(m, src)
-}
-func (m *EndpointHints) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointHints) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointHints.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointHints proto.InternalMessageInfo
-
-func (m *EndpointPort) Reset() { *m = EndpointPort{} }
-func (*EndpointPort) ProtoMessage() {}
-func (*EndpointPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{3}
-}
-func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointPort.Merge(m, src)
-}
-func (m *EndpointPort) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointPort) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointPort.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
-
-func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
-func (*EndpointSlice) ProtoMessage() {}
-func (*EndpointSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{4}
-}
-func (m *EndpointSlice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointSlice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointSlice.Merge(m, src)
-}
-func (m *EndpointSlice) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointSlice) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointSlice.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo
-
-func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
-func (*EndpointSliceList) ProtoMessage() {}
-func (*EndpointSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{5}
-}
-func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointSliceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointSliceList.Merge(m, src)
-}
-func (m *EndpointSliceList) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointSliceList) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointSliceList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
-
-func (m *ForNode) Reset() { *m = ForNode{} }
-func (*ForNode) ProtoMessage() {}
-func (*ForNode) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{6}
-}
-func (m *ForNode) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ForNode) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ForNode.Merge(m, src)
-}
-func (m *ForNode) XXX_Size() int {
- return m.Size()
-}
-func (m *ForNode) XXX_DiscardUnknown() {
- xxx_messageInfo_ForNode.DiscardUnknown(m)
-}
+func (m *EndpointHints) Reset() { *m = EndpointHints{} }
-var xxx_messageInfo_ForNode proto.InternalMessageInfo
-
-func (m *ForZone) Reset() { *m = ForZone{} }
-func (*ForZone) ProtoMessage() {}
-func (*ForZone) Descriptor() ([]byte, []int) {
- return fileDescriptor_2237b452324cf77e, []int{7}
-}
-func (m *ForZone) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ForZone) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ForZone.Merge(m, src)
-}
-func (m *ForZone) XXX_Size() int {
- return m.Size()
-}
-func (m *ForZone) XXX_DiscardUnknown() {
- xxx_messageInfo_ForZone.DiscardUnknown(m)
-}
+func (m *EndpointPort) Reset() { *m = EndpointPort{} }
-var xxx_messageInfo_ForZone proto.InternalMessageInfo
+func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
-func init() {
- proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1.Endpoint")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1.Endpoint.DeprecatedTopologyEntry")
- proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1.EndpointConditions")
- proto.RegisterType((*EndpointHints)(nil), "k8s.io.api.discovery.v1.EndpointHints")
- proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1.EndpointPort")
- proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1.EndpointSlice")
- proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1.EndpointSliceList")
- proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1.ForNode")
- proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1.ForZone")
-}
+func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
-func init() {
- proto.RegisterFile("k8s.io/api/discovery/v1/generated.proto", fileDescriptor_2237b452324cf77e)
-}
+func (m *ForNode) Reset() { *m = ForNode{} }
-var fileDescriptor_2237b452324cf77e = []byte{
- // 902 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
- 0x14, 0x8e, 0x9b, 0x9a, 0xda, 0xe3, 0x56, 0xec, 0x8e, 0x90, 0x6a, 0x05, 0x64, 0x07, 0xa3, 0x85,
- 0x48, 0x15, 0x0e, 0xad, 0x10, 0x5a, 0x90, 0x38, 0xd4, 0x6c, 0xd9, 0xe5, 0x57, 0xa9, 0x66, 0x7b,
- 0x5a, 0x21, 0x81, 0x6b, 0xbf, 0x3a, 0xa6, 0x8d, 0xc7, 0xf2, 0x4c, 0x22, 0x85, 0x13, 0x17, 0xce,
- 0xf0, 0x9f, 0xf0, 0x1f, 0x70, 0x44, 0x3d, 0xee, 0x8d, 0x3d, 0x59, 0xd4, 0xfc, 0x0b, 0x9c, 0xf6,
- 0x84, 0x66, 0xfc, 0x33, 0xa4, 0x51, 0xf6, 0xe6, 0xf9, 0xe6, 0x7b, 0xdf, 0x7b, 0xf3, 0xcd, 0x7b,
- 0x23, 0xa3, 0xf7, 0xae, 0x1e, 0x32, 0x37, 0xa6, 0x63, 0x3f, 0x8d, 0xc7, 0x61, 0xcc, 0x02, 0x3a,
- 0x87, 0x6c, 0x31, 0x9e, 0x1f, 0x8e, 0x23, 0x48, 0x20, 0xf3, 0x39, 0x84, 0x6e, 0x9a, 0x51, 0x4e,
- 0xf1, 0x7e, 0x49, 0x74, 0xfd, 0x34, 0x76, 0x1b, 0xa2, 0x3b, 0x3f, 0x1c, 0xbc, 0x1f, 0xc5, 0x7c,
- 0x32, 0xbb, 0x70, 0x03, 0x3a, 0x1d, 0x47, 0x34, 0xa2, 0x63, 0xc9, 0xbf, 0x98, 0x5d, 0xca, 0x95,
- 0x5c, 0xc8, 0xaf, 0x52, 0x67, 0xe0, 0x74, 0x12, 0x06, 0x34, 0x83, 0x3b, 0x72, 0x0d, 0x3e, 0x6c,
- 0x39, 0x53, 0x3f, 0x98, 0xc4, 0x89, 0xa8, 0x29, 0xbd, 0x8a, 0x04, 0xc0, 0xc6, 0x53, 0xe0, 0xfe,
- 0x5d, 0x51, 0xe3, 0x75, 0x51, 0xd9, 0x2c, 0xe1, 0xf1, 0x14, 0x56, 0x02, 0x3e, 0xda, 0x14, 0xc0,
- 0x82, 0x09, 0x4c, 0xfd, 0xff, 0xc7, 0x39, 0xff, 0x6e, 0x23, 0xed, 0x24, 0x09, 0x53, 0x1a, 0x27,
- 0x1c, 0x1f, 0x20, 0xdd, 0x0f, 0xc3, 0x0c, 0x18, 0x03, 0x66, 0x2a, 0xc3, 0xfe, 0x48, 0xf7, 0xf6,
- 0x8a, 0xdc, 0xd6, 0x8f, 0x6b, 0x90, 0xb4, 0xfb, 0xf8, 0x7b, 0x84, 0x02, 0x9a, 0x84, 0x31, 0x8f,
- 0x69, 0xc2, 0xcc, 0xad, 0xa1, 0x32, 0x32, 0x8e, 0x0e, 0xdc, 0x35, 0xce, 0xba, 0x75, 0x8e, 0xcf,
- 0x9a, 0x10, 0x0f, 0xdf, 0xe4, 0x76, 0xaf, 0xc8, 0x6d, 0xd4, 0x62, 0xa4, 0x23, 0x89, 0x47, 0x48,
- 0x9b, 0x50, 0xc6, 0x13, 0x7f, 0x0a, 0x66, 0x7f, 0xa8, 0x8c, 0x74, 0x6f, 0xb7, 0xc8, 0x6d, 0xed,
- 0x49, 0x85, 0x91, 0x66, 0x17, 0x9f, 0x21, 0x9d, 0xfb, 0x59, 0x04, 0x9c, 0xc0, 0xa5, 0xb9, 0x2d,
- 0x2b, 0x79, 0xa7, 0x5b, 0x89, 0xb8, 0x1b, 0x51, 0xc4, 0xb7, 0x17, 0x3f, 0x42, 0x20, 0x48, 0x90,
- 0x41, 0x12, 0x40, 0x79, 0xb8, 0xf3, 0x3a, 0x92, 0xb4, 0x22, 0xf8, 0x17, 0x05, 0xe1, 0x10, 0xd2,
- 0x0c, 0x02, 0xe1, 0xd5, 0x39, 0x4d, 0xe9, 0x35, 0x8d, 0x16, 0xa6, 0x3a, 0xec, 0x8f, 0x8c, 0xa3,
- 0x8f, 0x37, 0x9e, 0xd2, 0x7d, 0xb4, 0x12, 0x7b, 0x92, 0xf0, 0x6c, 0xe1, 0x0d, 0xaa, 0x33, 0xe3,
- 0x55, 0x02, 0xb9, 0x23, 0xa1, 0xf0, 0x20, 0xa1, 0x21, 0x9c, 0x0a, 0x0f, 0x5e, 0x6b, 0x3d, 0x38,
- 0xad, 0x30, 0xd2, 0xec, 0xe2, 0xb7, 0xd0, 0xf6, 0x4f, 0x34, 0x01, 0x73, 0x47, 0xb2, 0xb4, 0x22,
- 0xb7, 0xb7, 0x9f, 0xd1, 0x04, 0x88, 0x44, 0xf1, 0x63, 0xa4, 0x4e, 0xe2, 0x84, 0x33, 0x53, 0x93,
- 0xee, 0xbc, 0xbb, 0xf1, 0x04, 0x4f, 0x04, 0xdb, 0xd3, 0x8b, 0xdc, 0x56, 0xe5, 0x27, 0x29, 0xe3,
- 0x07, 0x27, 0x68, 0x7f, 0xcd, 0xd9, 0xf0, 0x3d, 0xd4, 0xbf, 0x82, 0x85, 0xa9, 0x88, 0x02, 0x88,
- 0xf8, 0xc4, 0x6f, 0x20, 0x75, 0xee, 0x5f, 0xcf, 0x40, 0x76, 0x87, 0x4e, 0xca, 0xc5, 0x27, 0x5b,
- 0x0f, 0x15, 0xe7, 0x57, 0x05, 0xe1, 0xd5, 0x96, 0xc0, 0x36, 0x52, 0x33, 0xf0, 0xc3, 0x52, 0x44,
- 0x2b, 0xd3, 0x13, 0x01, 0x90, 0x12, 0xc7, 0x0f, 0xd0, 0x0e, 0x83, 0x6c, 0x1e, 0x27, 0x91, 0xd4,
- 0xd4, 0x3c, 0xa3, 0xc8, 0xed, 0x9d, 0xa7, 0x25, 0x44, 0xea, 0x3d, 0x7c, 0x88, 0x0c, 0x0e, 0xd9,
- 0x34, 0x4e, 0x7c, 0x2e, 0xa8, 0x7d, 0x49, 0x7d, 0xbd, 0xc8, 0x6d, 0xe3, 0xbc, 0x85, 0x49, 0x97,
- 0xe3, 0xfc, 0xae, 0xa0, 0xbd, 0xa5, 0xc3, 0xe3, 0x53, 0xa4, 0x5d, 0xd2, 0x4c, 0x98, 0x58, 0x0e,
- 0x83, 0x71, 0x34, 0x5c, 0x6b, 0xdb, 0xe7, 0x25, 0xd1, 0xbb, 0x57, 0xdd, 0xaf, 0x56, 0x01, 0x8c,
- 0x34, 0x1a, 0x95, 0x9e, 0xb8, 0x3a, 0x31, 0x2e, 0x1b, 0xf5, 0x04, 0x71, 0x49, 0x4f, 0x46, 0x92,
- 0x46, 0xc3, 0xf9, 0x53, 0x41, 0xbb, 0x75, 0xc5, 0x67, 0x34, 0xe3, 0xa2, 0x05, 0xe4, 0xb0, 0x28,
- 0x6d, 0x0b, 0xc8, 0x26, 0x91, 0x28, 0x7e, 0x8c, 0x34, 0x39, 0xf2, 0x01, 0xbd, 0x2e, 0xef, 0xc3,
- 0x3b, 0x10, 0xc2, 0x67, 0x15, 0xf6, 0x32, 0xb7, 0xdf, 0x5c, 0x7d, 0xce, 0xdc, 0x7a, 0x9b, 0x34,
- 0xc1, 0x22, 0x4d, 0x4a, 0x33, 0x2e, 0x5d, 0x55, 0xcb, 0x34, 0x22, 0x3d, 0x91, 0xa8, 0xb0, 0xde,
- 0x4f, 0xd3, 0x3a, 0x4c, 0x4e, 0xa3, 0x5e, 0x5a, 0x7f, 0xdc, 0xc2, 0xa4, 0xcb, 0x71, 0xfe, 0xda,
- 0x6a, 0xad, 0x7f, 0x7a, 0x1d, 0x07, 0x80, 0x7f, 0x40, 0x9a, 0x78, 0x19, 0x43, 0x9f, 0xfb, 0xf2,
- 0x34, 0xc6, 0xd1, 0x07, 0x1d, 0xab, 0x9a, 0x07, 0xce, 0x4d, 0xaf, 0x22, 0x01, 0x30, 0x57, 0xb0,
- 0xdb, 0x09, 0xff, 0x06, 0xb8, 0xdf, 0x3e, 0x2f, 0x2d, 0x46, 0x1a, 0x55, 0xfc, 0x08, 0x19, 0xd5,
- 0x53, 0x76, 0xbe, 0x48, 0xa1, 0x2a, 0xd3, 0xa9, 0x42, 0x8c, 0xe3, 0x76, 0xeb, 0xe5, 0xf2, 0x92,
- 0x74, 0xc3, 0x30, 0x41, 0x3a, 0x54, 0x85, 0xd7, 0x77, 0xfa, 0xf6, 0xc6, 0xd1, 0xf2, 0xee, 0x57,
- 0x69, 0xf4, 0x1a, 0x61, 0xa4, 0x95, 0xc1, 0x5f, 0x22, 0x55, 0x18, 0xc9, 0xcc, 0xbe, 0xd4, 0x7b,
- 0xb0, 0x51, 0x4f, 0x98, 0xef, 0xed, 0x55, 0x9a, 0xaa, 0x58, 0x31, 0x52, 0x4a, 0x38, 0x7f, 0x28,
- 0xe8, 0xfe, 0x92, 0xb3, 0x5f, 0xc7, 0x8c, 0xe3, 0xef, 0x56, 0xdc, 0x75, 0x5f, 0xcd, 0x5d, 0x11,
- 0x2d, 0xbd, 0x6d, 0xda, 0xb2, 0x46, 0x3a, 0xce, 0x7e, 0x85, 0xd4, 0x98, 0xc3, 0xb4, 0xf6, 0x63,
- 0xf3, 0x53, 0x23, 0x0b, 0x6b, 0x0f, 0xf0, 0x85, 0x08, 0x26, 0xa5, 0x86, 0x73, 0x80, 0x76, 0xaa,
- 0xce, 0xc7, 0xc3, 0xa5, 0xee, 0xde, 0xad, 0xe8, 0x9d, 0x0e, 0xaf, 0xc8, 0x62, 0xd8, 0x36, 0x93,
- 0xbd, 0x4f, 0x6f, 0x6e, 0xad, 0xde, 0xf3, 0x5b, 0xab, 0xf7, 0xe2, 0xd6, 0xea, 0xfd, 0x5c, 0x58,
- 0xca, 0x4d, 0x61, 0x29, 0xcf, 0x0b, 0x4b, 0x79, 0x51, 0x58, 0xca, 0xdf, 0x85, 0xa5, 0xfc, 0xf6,
- 0x8f, 0xd5, 0x7b, 0xb6, 0xbf, 0xe6, 0x97, 0xe2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xfc,
- 0xbe, 0xad, 0x6c, 0x08, 0x00, 0x00,
-}
+func (m *ForZone) Reset() { *m = ForZone{} }
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -398,7 +100,7 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.DeprecatedTopology {
keysForDeprecatedTopology = append(keysForDeprecatedTopology, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology)
+ sort.Strings(keysForDeprecatedTopology)
for iNdEx := len(keysForDeprecatedTopology) - 1; iNdEx >= 0; iNdEx-- {
v := m.DeprecatedTopology[string(keysForDeprecatedTopology[iNdEx])]
baseI := i
@@ -977,7 +679,7 @@ func (this *Endpoint) String() string {
for k := range this.DeprecatedTopology {
keysForDeprecatedTopology = append(keysForDeprecatedTopology, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology)
+ sort.Strings(keysForDeprecatedTopology)
mapStringForDeprecatedTopology := "map[string]string{"
for _, k := range keysForDeprecatedTopology {
mapStringForDeprecatedTopology += fmt.Sprintf("%v: %v,", k, this.DeprecatedTopology[k])
diff --git a/operator/vendor/k8s.io/api/discovery/v1/generated.proto b/operator/vendor/k8s.io/api/discovery/v1/generated.proto
index 569d8a91..97582a1b 100644
--- a/operator/vendor/k8s.io/api/discovery/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/discovery/v1/generated.proto
@@ -114,8 +114,6 @@ message EndpointHints {
// forNodes indicates the node(s) this endpoint should be consumed by when
// using topology aware routing. May contain a maximum of 8 entries.
- // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
- // feature gate is enabled.
// +listType=atomic
repeated ForNode forNodes = 2;
}
diff --git a/operator/vendor/k8s.io/api/discovery/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/discovery/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..9e246b87
--- /dev/null
+++ b/operator/vendor/k8s.io/api/discovery/v1/generated.protomessage.pb.go
@@ -0,0 +1,38 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*Endpoint) ProtoMessage() {}
+
+func (*EndpointConditions) ProtoMessage() {}
+
+func (*EndpointHints) ProtoMessage() {}
+
+func (*EndpointPort) ProtoMessage() {}
+
+func (*EndpointSlice) ProtoMessage() {}
+
+func (*EndpointSliceList) ProtoMessage() {}
+
+func (*ForNode) ProtoMessage() {}
+
+func (*ForZone) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/discovery/v1/types.go b/operator/vendor/k8s.io/api/discovery/v1/types.go
index 6f269531..ca78ce38 100644
--- a/operator/vendor/k8s.io/api/discovery/v1/types.go
+++ b/operator/vendor/k8s.io/api/discovery/v1/types.go
@@ -166,8 +166,6 @@ type EndpointHints struct {
// forNodes indicates the node(s) this endpoint should be consumed by when
// using topology aware routing. May contain a maximum of 8 entries.
- // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
- // feature gate is enabled.
// +listType=atomic
ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"`
}
diff --git a/operator/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
index ac5b853b..ba8b0363 100644
--- a/operator/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/discovery/v1/types_swagger_doc_generated.go
@@ -57,7 +57,7 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
var map_EndpointHints = map[string]string{
"": "EndpointHints provides hints describing how an endpoint should be consumed.",
"forZones": "forZones indicates the zone(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries.",
- "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.",
+ "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries.",
}
func (EndpointHints) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/discovery/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/discovery/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..2053c561
--- /dev/null
+++ b/operator/vendor/k8s.io/api/discovery/v1/zz_generated.model_name.go
@@ -0,0 +1,62 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Endpoint) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.Endpoint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointConditions) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.EndpointConditions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointHints) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.EndpointHints"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointPort) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.EndpointPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointSlice) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.EndpointSlice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointSliceList) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.EndpointSliceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ForNode) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.ForNode"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ForZone) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1.ForZone"
+}
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/doc.go b/operator/vendor/k8s.io/api/discovery/v1beta1/doc.go
index f12087ef..aa5ba744 100644
--- a/operator/vendor/k8s.io/api/discovery/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.discovery.v1beta1
+
// +groupName=discovery.k8s.io
package v1beta1
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
index de325778..7a7613d0 100644
--- a/operator/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
@@ -23,327 +23,31 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Endpoint) Reset() { *m = Endpoint{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
-func (m *Endpoint) Reset() { *m = Endpoint{} }
-func (*Endpoint) ProtoMessage() {}
-func (*Endpoint) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{0}
-}
-func (m *Endpoint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Endpoint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Endpoint.Merge(m, src)
-}
-func (m *Endpoint) XXX_Size() int {
- return m.Size()
-}
-func (m *Endpoint) XXX_DiscardUnknown() {
- xxx_messageInfo_Endpoint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Endpoint proto.InternalMessageInfo
-
-func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
-func (*EndpointConditions) ProtoMessage() {}
-func (*EndpointConditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{1}
-}
-func (m *EndpointConditions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointConditions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointConditions.Merge(m, src)
-}
-func (m *EndpointConditions) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointConditions) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointConditions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo
-
-func (m *EndpointHints) Reset() { *m = EndpointHints{} }
-func (*EndpointHints) ProtoMessage() {}
-func (*EndpointHints) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{2}
-}
-func (m *EndpointHints) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointHints) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointHints.Merge(m, src)
-}
-func (m *EndpointHints) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointHints) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointHints.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointHints proto.InternalMessageInfo
-
-func (m *EndpointPort) Reset() { *m = EndpointPort{} }
-func (*EndpointPort) ProtoMessage() {}
-func (*EndpointPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{3}
-}
-func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointPort.Merge(m, src)
-}
-func (m *EndpointPort) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointPort) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointPort.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
-
-func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
-func (*EndpointSlice) ProtoMessage() {}
-func (*EndpointSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{4}
-}
-func (m *EndpointSlice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointSlice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointSlice.Merge(m, src)
-}
-func (m *EndpointSlice) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointSlice) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointSlice.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo
-
-func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
-func (*EndpointSliceList) ProtoMessage() {}
-func (*EndpointSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{5}
-}
-func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EndpointSliceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EndpointSliceList.Merge(m, src)
-}
-func (m *EndpointSliceList) XXX_Size() int {
- return m.Size()
-}
-func (m *EndpointSliceList) XXX_DiscardUnknown() {
- xxx_messageInfo_EndpointSliceList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
-
-func (m *ForNode) Reset() { *m = ForNode{} }
-func (*ForNode) ProtoMessage() {}
-func (*ForNode) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{6}
-}
-func (m *ForNode) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ForNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ForNode) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ForNode.Merge(m, src)
-}
-func (m *ForNode) XXX_Size() int {
- return m.Size()
-}
-func (m *ForNode) XXX_DiscardUnknown() {
- xxx_messageInfo_ForNode.DiscardUnknown(m)
-}
+func (m *EndpointHints) Reset() { *m = EndpointHints{} }
-var xxx_messageInfo_ForNode proto.InternalMessageInfo
-
-func (m *ForZone) Reset() { *m = ForZone{} }
-func (*ForZone) ProtoMessage() {}
-func (*ForZone) Descriptor() ([]byte, []int) {
- return fileDescriptor_6555bad15de200e0, []int{7}
-}
-func (m *ForZone) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ForZone) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ForZone.Merge(m, src)
-}
-func (m *ForZone) XXX_Size() int {
- return m.Size()
-}
-func (m *ForZone) XXX_DiscardUnknown() {
- xxx_messageInfo_ForZone.DiscardUnknown(m)
-}
+func (m *EndpointPort) Reset() { *m = EndpointPort{} }
-var xxx_messageInfo_ForZone proto.InternalMessageInfo
+func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
-func init() {
- proto.RegisterType((*Endpoint)(nil), "k8s.io.api.discovery.v1beta1.Endpoint")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.discovery.v1beta1.Endpoint.TopologyEntry")
- proto.RegisterType((*EndpointConditions)(nil), "k8s.io.api.discovery.v1beta1.EndpointConditions")
- proto.RegisterType((*EndpointHints)(nil), "k8s.io.api.discovery.v1beta1.EndpointHints")
- proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.discovery.v1beta1.EndpointPort")
- proto.RegisterType((*EndpointSlice)(nil), "k8s.io.api.discovery.v1beta1.EndpointSlice")
- proto.RegisterType((*EndpointSliceList)(nil), "k8s.io.api.discovery.v1beta1.EndpointSliceList")
- proto.RegisterType((*ForNode)(nil), "k8s.io.api.discovery.v1beta1.ForNode")
- proto.RegisterType((*ForZone)(nil), "k8s.io.api.discovery.v1beta1.ForZone")
-}
+func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
-func init() {
- proto.RegisterFile("k8s.io/api/discovery/v1beta1/generated.proto", fileDescriptor_6555bad15de200e0)
-}
+func (m *ForNode) Reset() { *m = ForNode{} }
-var fileDescriptor_6555bad15de200e0 = []byte{
- // 877 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x4f, 0x6f, 0xe4, 0x34,
- 0x1c, 0x9d, 0x74, 0x1a, 0x9a, 0x78, 0x5a, 0xb1, 0x6b, 0x71, 0x18, 0x95, 0x2a, 0x19, 0x05, 0x2d,
- 0x1a, 0x51, 0x48, 0x68, 0xb5, 0x42, 0x2b, 0x38, 0x35, 0xb0, 0xb0, 0x48, 0xcb, 0x6e, 0xe5, 0x56,
- 0x42, 0x5a, 0x71, 0xc0, 0x93, 0xb8, 0x19, 0xd3, 0x26, 0x8e, 0x62, 0x77, 0xa4, 0xb9, 0xf1, 0x0d,
- 0xe0, 0xb3, 0x70, 0xe3, 0x8c, 0x84, 0x7a, 0xdc, 0xe3, 0x9e, 0x22, 0x1a, 0xbe, 0xc5, 0x9e, 0x90,
- 0x1d, 0xe7, 0xcf, 0x30, 0xd0, 0xce, 0x2d, 0x7e, 0x7e, 0xef, 0xfd, 0xfe, 0xd9, 0x56, 0xc0, 0xc7,
- 0x97, 0x4f, 0xb8, 0x4f, 0x59, 0x80, 0x73, 0x1a, 0xc4, 0x94, 0x47, 0x6c, 0x41, 0x8a, 0x65, 0xb0,
- 0x38, 0x9a, 0x11, 0x81, 0x8f, 0x82, 0x84, 0x64, 0xa4, 0xc0, 0x82, 0xc4, 0x7e, 0x5e, 0x30, 0xc1,
- 0xe0, 0x41, 0xcd, 0xf6, 0x71, 0x4e, 0xfd, 0x96, 0xed, 0x6b, 0xf6, 0xfe, 0x27, 0x09, 0x15, 0xf3,
- 0xeb, 0x99, 0x1f, 0xb1, 0x34, 0x48, 0x58, 0xc2, 0x02, 0x25, 0x9a, 0x5d, 0x5f, 0xa8, 0x95, 0x5a,
- 0xa8, 0xaf, 0xda, 0x6c, 0xdf, 0xeb, 0x85, 0x8e, 0x58, 0x41, 0x82, 0xc5, 0x5a, 0xc0, 0xfd, 0xc7,
- 0x1d, 0x27, 0xc5, 0xd1, 0x9c, 0x66, 0x32, 0xbb, 0xfc, 0x32, 0x91, 0x00, 0x0f, 0x52, 0x22, 0xf0,
- 0x7f, 0xa9, 0x82, 0xff, 0x53, 0x15, 0xd7, 0x99, 0xa0, 0x29, 0x59, 0x13, 0x7c, 0x76, 0x9f, 0x80,
- 0x47, 0x73, 0x92, 0xe2, 0x7f, 0xeb, 0xbc, 0xdf, 0xb6, 0x81, 0xf5, 0x34, 0x8b, 0x73, 0x46, 0x33,
- 0x01, 0x0f, 0x81, 0x8d, 0xe3, 0xb8, 0x20, 0x9c, 0x13, 0x3e, 0x36, 0x26, 0xc3, 0xa9, 0x1d, 0xee,
- 0x55, 0xa5, 0x6b, 0x9f, 0x34, 0x20, 0xea, 0xf6, 0x61, 0x0c, 0x40, 0xc4, 0xb2, 0x98, 0x0a, 0xca,
- 0x32, 0x3e, 0xde, 0x9a, 0x18, 0xd3, 0xd1, 0xf1, 0xa7, 0xfe, 0x5d, 0xed, 0xf5, 0x9b, 0x40, 0x5f,
- 0xb6, 0xba, 0x10, 0xde, 0x94, 0xee, 0xa0, 0x2a, 0x5d, 0xd0, 0x61, 0xa8, 0xe7, 0x0b, 0xa7, 0xc0,
- 0x9a, 0x33, 0x2e, 0x32, 0x9c, 0x92, 0xf1, 0x70, 0x62, 0x4c, 0xed, 0x70, 0xb7, 0x2a, 0x5d, 0xeb,
- 0x99, 0xc6, 0x50, 0xbb, 0x0b, 0x4f, 0x81, 0x2d, 0x70, 0x91, 0x10, 0x81, 0xc8, 0xc5, 0x78, 0x5b,
- 0xa5, 0xf3, 0x41, 0x3f, 0x1d, 0x39, 0x20, 0x7f, 0x71, 0xe4, 0xbf, 0x9c, 0xfd, 0x44, 0x22, 0x49,
- 0x22, 0x05, 0xc9, 0x22, 0x52, 0x57, 0x78, 0xde, 0x28, 0x51, 0x67, 0x02, 0x67, 0xc0, 0x12, 0x2c,
- 0x67, 0x57, 0x2c, 0x59, 0x8e, 0xcd, 0xc9, 0x70, 0x3a, 0x3a, 0x7e, 0xbc, 0x59, 0x7d, 0xfe, 0xb9,
- 0x96, 0x3d, 0xcd, 0x44, 0xb1, 0x0c, 0x1f, 0xe8, 0x1a, 0xad, 0x06, 0x46, 0xad, 0xaf, 0xac, 0x2f,
- 0x63, 0x31, 0x79, 0x21, 0xeb, 0x7b, 0xa7, 0xab, 0xef, 0x85, 0xc6, 0x50, 0xbb, 0x0b, 0x9f, 0x03,
- 0x73, 0x4e, 0x33, 0xc1, 0xc7, 0x3b, 0xaa, 0xb6, 0xc3, 0xcd, 0x52, 0x79, 0x26, 0x25, 0xa1, 0x5d,
- 0x95, 0xae, 0xa9, 0x3e, 0x51, 0x6d, 0xb2, 0xff, 0x05, 0xd8, 0x5b, 0x49, 0x12, 0x3e, 0x00, 0xc3,
- 0x4b, 0xb2, 0x1c, 0x1b, 0x32, 0x07, 0x24, 0x3f, 0xe1, 0x7b, 0xc0, 0x5c, 0xe0, 0xab, 0x6b, 0xa2,
- 0x66, 0x6b, 0xa3, 0x7a, 0xf1, 0xf9, 0xd6, 0x13, 0xc3, 0xfb, 0xc5, 0x00, 0x70, 0x7d, 0x96, 0xd0,
- 0x05, 0x66, 0x41, 0x70, 0x5c, 0x9b, 0x58, 0x75, 0x50, 0x24, 0x01, 0x54, 0xe3, 0xf0, 0x11, 0xd8,
- 0xe1, 0xa4, 0x58, 0xd0, 0x2c, 0x51, 0x9e, 0x56, 0x38, 0xaa, 0x4a, 0x77, 0xe7, 0xac, 0x86, 0x50,
- 0xb3, 0x07, 0x8f, 0xc0, 0x48, 0x90, 0x22, 0xa5, 0x19, 0x16, 0x92, 0x3a, 0x54, 0xd4, 0x77, 0xab,
- 0xd2, 0x1d, 0x9d, 0x77, 0x30, 0xea, 0x73, 0xbc, 0xdf, 0x0d, 0xb0, 0xb7, 0x52, 0x32, 0x3c, 0x03,
- 0xd6, 0x05, 0x2b, 0x5e, 0xb1, 0x4c, 0x1f, 0xe5, 0xd1, 0xf1, 0xa3, 0xbb, 0x3b, 0xf6, 0x75, 0xcd,
- 0xee, 0xa6, 0xa5, 0x01, 0x8e, 0x5a, 0x23, 0x6d, 0x2a, 0x87, 0x23, 0x4f, 0xfc, 0x66, 0xa6, 0x92,
- 0xbd, 0x62, 0xaa, 0xe4, 0xa8, 0x35, 0xf2, 0xfe, 0x34, 0xc0, 0x6e, 0x93, 0xfb, 0x29, 0x2b, 0x04,
- 0x3c, 0x00, 0xdb, 0xea, 0xbc, 0xab, 0x59, 0x84, 0x56, 0x55, 0xba, 0xdb, 0xea, 0x2c, 0x28, 0x14,
- 0x7e, 0x03, 0x2c, 0x75, 0x75, 0x23, 0x76, 0x55, 0x4f, 0x26, 0x3c, 0x94, 0xc6, 0xa7, 0x1a, 0x7b,
- 0x5b, 0xba, 0xef, 0xaf, 0x3f, 0x4b, 0x7e, 0xb3, 0x8d, 0x5a, 0xb1, 0x0c, 0x93, 0xb3, 0x42, 0xa8,
- 0xfe, 0x9a, 0x75, 0x18, 0x19, 0x1e, 0x29, 0x54, 0x0e, 0x01, 0xe7, 0x79, 0x23, 0x53, 0x17, 0xca,
- 0xae, 0x87, 0x70, 0xd2, 0xc1, 0xa8, 0xcf, 0xf1, 0x6e, 0xb7, 0xba, 0x21, 0x9c, 0x5d, 0xd1, 0x88,
- 0xc0, 0x1f, 0x81, 0x25, 0x5f, 0xb8, 0x18, 0x0b, 0xac, 0xaa, 0x59, 0x7d, 0x21, 0xda, 0x87, 0xca,
- 0xcf, 0x2f, 0x13, 0x09, 0x70, 0x5f, 0xb2, 0xbb, 0x4b, 0xfa, 0x1d, 0x11, 0xb8, 0x7b, 0x21, 0x3a,
- 0x0c, 0xb5, 0xae, 0xf0, 0x2b, 0x30, 0xd2, 0x4f, 0xd2, 0xf9, 0x32, 0x27, 0x3a, 0x4d, 0x4f, 0x4b,
- 0x46, 0x27, 0xdd, 0xd6, 0xdb, 0xd5, 0x25, 0xea, 0xcb, 0xe0, 0xf7, 0xc0, 0x26, 0x3a, 0xf1, 0x66,
- 0xb0, 0x1f, 0x6e, 0x76, 0xbf, 0xc2, 0x87, 0x3a, 0x96, 0xdd, 0x20, 0x1c, 0x75, 0x5e, 0xf0, 0x25,
- 0x30, 0x65, 0x37, 0xf9, 0x78, 0xa8, 0x4c, 0x3f, 0xda, 0xcc, 0x54, 0x8e, 0x21, 0xdc, 0xd3, 0xc6,
- 0xa6, 0x5c, 0x71, 0x54, 0xfb, 0x78, 0x7f, 0x18, 0xe0, 0xe1, 0x4a, 0x8f, 0x9f, 0x53, 0x2e, 0xe0,
- 0x0f, 0x6b, 0x7d, 0xf6, 0x37, 0xeb, 0xb3, 0x54, 0xab, 0x2e, 0xb7, 0x07, 0xb4, 0x41, 0x7a, 0x3d,
- 0x3e, 0x05, 0x26, 0x15, 0x24, 0x6d, 0x3a, 0xb3, 0xe1, 0xcb, 0xa3, 0xb2, 0xeb, 0xaa, 0xf8, 0x56,
- 0x3a, 0xa0, 0xda, 0xc8, 0x3b, 0x04, 0x3b, 0xfa, 0x22, 0xc0, 0xc9, 0xca, 0x61, 0xdf, 0xd5, 0xf4,
- 0xde, 0x81, 0xd7, 0x64, 0x79, 0x01, 0xef, 0x27, 0x87, 0xe1, 0xcd, 0xad, 0x33, 0x78, 0x7d, 0xeb,
- 0x0c, 0xde, 0xdc, 0x3a, 0x83, 0x9f, 0x2b, 0xc7, 0xb8, 0xa9, 0x1c, 0xe3, 0x75, 0xe5, 0x18, 0x6f,
- 0x2a, 0xc7, 0xf8, 0xab, 0x72, 0x8c, 0x5f, 0xff, 0x76, 0x06, 0xaf, 0x0e, 0xee, 0xfa, 0x67, 0xf8,
- 0x27, 0x00, 0x00, 0xff, 0xff, 0x76, 0x8e, 0x48, 0x7e, 0x52, 0x08, 0x00, 0x00,
-}
+func (m *ForZone) Reset() { *m = ForZone{} }
func (m *Endpoint) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -389,7 +93,7 @@ func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Topology {
keysForTopology = append(keysForTopology, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForTopology)
+ sort.Strings(keysForTopology)
for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- {
v := m.Topology[string(keysForTopology[iNdEx])]
baseI := i
@@ -964,7 +668,7 @@ func (this *Endpoint) String() string {
for k := range this.Topology {
keysForTopology = append(keysForTopology, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForTopology)
+ sort.Strings(keysForTopology)
mapStringForTopology := "map[string]string{"
for _, k := range keysForTopology {
mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k])
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/operator/vendor/k8s.io/api/discovery/v1beta1/generated.proto
index 907050da..7b9d983b 100644
--- a/operator/vendor/k8s.io/api/discovery/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/generated.proto
@@ -117,8 +117,6 @@ message EndpointHints {
// forNodes indicates the node(s) this endpoint should be consumed by when
// using topology aware routing. May contain a maximum of 8 entries.
- // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
- // feature gate is enabled.
// +listType=atomic
repeated ForNode forNodes = 2;
}
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/discovery/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..6e60bb13
--- /dev/null
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,38 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*Endpoint) ProtoMessage() {}
+
+func (*EndpointConditions) ProtoMessage() {}
+
+func (*EndpointHints) ProtoMessage() {}
+
+func (*EndpointPort) ProtoMessage() {}
+
+func (*EndpointSlice) ProtoMessage() {}
+
+func (*EndpointSliceList) ProtoMessage() {}
+
+func (*ForNode) ProtoMessage() {}
+
+func (*ForZone) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/types.go b/operator/vendor/k8s.io/api/discovery/v1beta1/types.go
index fa9d1eae..11ec1b39 100644
--- a/operator/vendor/k8s.io/api/discovery/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/types.go
@@ -164,8 +164,6 @@ type EndpointHints struct {
// forNodes indicates the node(s) this endpoint should be consumed by when
// using topology aware routing. May contain a maximum of 8 entries.
- // This is an Alpha feature and is only used when the PreferSameTrafficDistribution
- // feature gate is enabled.
// +listType=atomic
ForNodes []ForNode `json:"forNodes,omitempty" protobuf:"bytes,2,name=forNodes"`
}
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
index 72aa0cb9..acc83837 100644
--- a/operator/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
@@ -56,7 +56,7 @@ func (EndpointConditions) SwaggerDoc() map[string]string {
var map_EndpointHints = map[string]string{
"": "EndpointHints provides hints describing how an endpoint should be consumed.",
"forZones": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing. May contain a maximum of 8 entries.",
- "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries. This is an Alpha feature and is only used when the PreferSameTrafficDistribution feature gate is enabled.",
+ "forNodes": "forNodes indicates the node(s) this endpoint should be consumed by when using topology aware routing. May contain a maximum of 8 entries.",
}
func (EndpointHints) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/discovery/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/discovery/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..531c6672
--- /dev/null
+++ b/operator/vendor/k8s.io/api/discovery/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,62 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Endpoint) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.Endpoint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointConditions) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.EndpointConditions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointHints) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.EndpointHints"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointPort) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.EndpointPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointSlice) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.EndpointSlice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EndpointSliceList) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.EndpointSliceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ForNode) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.ForNode"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ForZone) OpenAPIModelName() string {
+ return "io.k8s.api.discovery.v1beta1.ForZone"
+}
diff --git a/operator/vendor/k8s.io/api/events/v1/doc.go b/operator/vendor/k8s.io/api/events/v1/doc.go
index 91163904..49e9733f 100644
--- a/operator/vendor/k8s.io/api/events/v1/doc.go
+++ b/operator/vendor/k8s.io/api/events/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.events.v1
+
// +groupName=events.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/events/v1/generated.pb.go b/operator/vendor/k8s.io/api/events/v1/generated.pb.go
index 96a6047e..82d88d95 100644
--- a/operator/vendor/k8s.io/api/events/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/events/v1/generated.pb.go
@@ -24,171 +24,18 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v11 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Event) Reset() { *m = Event{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *EventList) Reset() { *m = EventList{} }
-func (m *Event) Reset() { *m = Event{} }
-func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_d3a3e1495c224e47, []int{0}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func (m *EventList) Reset() { *m = EventList{} }
-func (*EventList) ProtoMessage() {}
-func (*EventList) Descriptor() ([]byte, []int) {
- return fileDescriptor_d3a3e1495c224e47, []int{1}
-}
-func (m *EventList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventList.Merge(m, src)
-}
-func (m *EventList) XXX_Size() int {
- return m.Size()
-}
-func (m *EventList) XXX_DiscardUnknown() {
- xxx_messageInfo_EventList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventList proto.InternalMessageInfo
-
-func (m *EventSeries) Reset() { *m = EventSeries{} }
-func (*EventSeries) ProtoMessage() {}
-func (*EventSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_d3a3e1495c224e47, []int{2}
-}
-func (m *EventSeries) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventSeries.Merge(m, src)
-}
-func (m *EventSeries) XXX_Size() int {
- return m.Size()
-}
-func (m *EventSeries) XXX_DiscardUnknown() {
- xxx_messageInfo_EventSeries.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventSeries proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1.Event")
- proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1.EventList")
- proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1.EventSeries")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/events/v1/generated.proto", fileDescriptor_d3a3e1495c224e47)
-}
-
-var fileDescriptor_d3a3e1495c224e47 = []byte{
- // 759 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x4f, 0xdb, 0x48,
- 0x14, 0x8f, 0x81, 0x04, 0x32, 0xe1, 0x4f, 0x18, 0x90, 0x98, 0x05, 0xc9, 0xc9, 0x86, 0xd5, 0x2a,
- 0x5a, 0x69, 0xed, 0x05, 0xad, 0x56, 0xab, 0x3d, 0x2d, 0x26, 0xec, 0x8a, 0x0a, 0x8a, 0x34, 0x70,
- 0xaa, 0x7a, 0x60, 0xe2, 0x3c, 0x8c, 0x4b, 0xec, 0xb1, 0xc6, 0x93, 0x48, 0xdc, 0x7a, 0xa9, 0xd4,
- 0x63, 0xbf, 0x40, 0x3f, 0x40, 0xd5, 0x2f, 0xc2, 0x91, 0x23, 0xa7, 0xa8, 0xb8, 0x5f, 0xa4, 0xf2,
- 0xd8, 0x89, 0x43, 0xfe, 0xb4, 0xa9, 0x7a, 0xf3, 0xbc, 0xf7, 0xfb, 0xf3, 0xde, 0xcc, 0xcb, 0x0b,
- 0xfa, 0xe5, 0xe6, 0xef, 0xd0, 0x70, 0xb9, 0xc9, 0x02, 0xd7, 0x84, 0x2e, 0xf8, 0x32, 0x34, 0xbb,
- 0x7b, 0xa6, 0x03, 0x3e, 0x08, 0x26, 0xa1, 0x65, 0x04, 0x82, 0x4b, 0x8e, 0x37, 0x13, 0x94, 0xc1,
- 0x02, 0xd7, 0x48, 0x50, 0x46, 0x77, 0x6f, 0xfb, 0x77, 0xc7, 0x95, 0xd7, 0x9d, 0xa6, 0x61, 0x73,
- 0xcf, 0x74, 0xb8, 0xc3, 0x4d, 0x05, 0x6e, 0x76, 0xae, 0xd4, 0x49, 0x1d, 0xd4, 0x57, 0x22, 0xb2,
- 0x5d, 0x1b, 0xb2, 0xb2, 0xb9, 0x80, 0x09, 0x46, 0xdb, 0x7f, 0x66, 0x18, 0x8f, 0xd9, 0xd7, 0xae,
- 0x0f, 0xe2, 0xd6, 0x0c, 0x6e, 0x9c, 0x38, 0x10, 0x9a, 0x1e, 0x48, 0x36, 0x89, 0x65, 0x4e, 0x63,
- 0x89, 0x8e, 0x2f, 0x5d, 0x0f, 0xc6, 0x08, 0x7f, 0x7d, 0x8b, 0x10, 0xda, 0xd7, 0xe0, 0xb1, 0x51,
- 0x5e, 0xed, 0x7d, 0x11, 0xe5, 0x8f, 0xe2, 0xfe, 0xf1, 0x25, 0x5a, 0x8a, 0xab, 0x69, 0x31, 0xc9,
- 0x88, 0x56, 0xd5, 0xea, 0xa5, 0xfd, 0x3f, 0x8c, 0xec, 0x92, 0x06, 0xa2, 0x46, 0x70, 0xe3, 0xc4,
- 0x81, 0xd0, 0x88, 0xd1, 0x46, 0x77, 0xcf, 0x38, 0x6b, 0xbe, 0x02, 0x5b, 0x9e, 0x82, 0x64, 0x16,
- 0xbe, 0xeb, 0x55, 0x72, 0x51, 0xaf, 0x82, 0xb2, 0x18, 0x1d, 0xa8, 0xe2, 0x4b, 0x54, 0x54, 0x57,
- 0x7d, 0xe1, 0x7a, 0x40, 0xe6, 0x94, 0x85, 0x39, 0x9b, 0xc5, 0xa9, 0x6b, 0x0b, 0x1e, 0xd3, 0xac,
- 0xf5, 0xd4, 0xa1, 0x78, 0xd4, 0x57, 0xa2, 0x99, 0x28, 0x3e, 0x42, 0x85, 0x10, 0x84, 0x0b, 0x21,
- 0x99, 0x57, 0xf2, 0x3f, 0x1b, 0x93, 0x9e, 0xd9, 0x50, 0xdc, 0x73, 0x05, 0xb4, 0x50, 0xd4, 0xab,
- 0x14, 0x92, 0x6f, 0x9a, 0x92, 0xf1, 0x29, 0xda, 0x10, 0x10, 0x70, 0x21, 0x5d, 0xdf, 0x39, 0xe4,
- 0xbe, 0x14, 0xbc, 0xdd, 0x06, 0x41, 0x16, 0xaa, 0x5a, 0xbd, 0x68, 0xed, 0xa4, 0x15, 0x6c, 0xd0,
- 0x71, 0x08, 0x9d, 0xc4, 0xc3, 0xff, 0xa3, 0xf5, 0x41, 0xf8, 0xd8, 0x0f, 0x25, 0xf3, 0x6d, 0x20,
- 0x79, 0x25, 0xf6, 0x53, 0x2a, 0xb6, 0x4e, 0x47, 0x01, 0x74, 0x9c, 0x83, 0x7f, 0x45, 0x05, 0x66,
- 0x4b, 0x97, 0xfb, 0xa4, 0xa0, 0xd8, 0xab, 0x29, 0xbb, 0x70, 0xa0, 0xa2, 0x34, 0xcd, 0xc6, 0x38,
- 0x01, 0x2c, 0xe4, 0x3e, 0x59, 0x7c, 0x8a, 0xa3, 0x2a, 0x4a, 0xd3, 0x2c, 0xbe, 0x40, 0x45, 0x01,
- 0x0e, 0x13, 0x2d, 0xd7, 0x77, 0xc8, 0x92, 0xba, 0xb1, 0xdd, 0xe1, 0x1b, 0x8b, 0x67, 0x3a, 0x7b,
- 0x61, 0x0a, 0x57, 0x20, 0xc0, 0xb7, 0x87, 0x1e, 0x81, 0xf6, 0xd9, 0x34, 0x13, 0xc2, 0xcf, 0xd0,
- 0xa2, 0x80, 0x76, 0x3c, 0x63, 0xa4, 0x38, 0xbb, 0x66, 0x29, 0xea, 0x55, 0x16, 0x69, 0xc2, 0xa3,
- 0x7d, 0x01, 0x5c, 0x45, 0x0b, 0x3e, 0x97, 0x40, 0x90, 0xea, 0x63, 0x39, 0xf5, 0x5d, 0x78, 0xce,
- 0x25, 0x50, 0x95, 0x89, 0x11, 0xf2, 0x36, 0x00, 0x52, 0x7a, 0x8a, 0xb8, 0xb8, 0x0d, 0x80, 0xaa,
- 0x0c, 0x06, 0x54, 0x6e, 0x41, 0x20, 0xc0, 0x8e, 0x15, 0xcf, 0x79, 0x47, 0xd8, 0x40, 0x96, 0x55,
- 0x61, 0x95, 0x49, 0x85, 0x25, 0xc3, 0xa1, 0x60, 0x16, 0x49, 0xe5, 0xca, 0x8d, 0x11, 0x01, 0x3a,
- 0x26, 0x89, 0xdf, 0x6a, 0x88, 0x64, 0xc1, 0xff, 0x5c, 0x11, 0xaa, 0x99, 0x0c, 0x25, 0xf3, 0x02,
- 0xb2, 0xa2, 0xfc, 0x7e, 0x9b, 0x6d, 0xda, 0xd5, 0xa0, 0x57, 0x53, 0x6b, 0xd2, 0x98, 0xa2, 0x49,
- 0xa7, 0xba, 0xe1, 0x37, 0x1a, 0xda, 0xca, 0x92, 0x27, 0x6c, 0xb8, 0x92, 0xd5, 0xef, 0xae, 0xa4,
- 0x92, 0x56, 0xb2, 0xd5, 0x98, 0x2c, 0x49, 0xa7, 0x79, 0xe1, 0x03, 0xb4, 0x96, 0xa5, 0x0e, 0x79,
- 0xc7, 0x97, 0x64, 0xad, 0xaa, 0xd5, 0xf3, 0xd6, 0x56, 0x2a, 0xb9, 0xd6, 0x78, 0x9a, 0xa6, 0xa3,
- 0xf8, 0xda, 0x47, 0x0d, 0x25, 0x3f, 0xf5, 0x13, 0x37, 0x94, 0xf8, 0xe5, 0xd8, 0x8e, 0x32, 0x66,
- 0x6b, 0x24, 0x66, 0xab, 0x0d, 0x55, 0x4e, 0x9d, 0x97, 0xfa, 0x91, 0xa1, 0xfd, 0xf4, 0x2f, 0xca,
- 0xbb, 0x12, 0xbc, 0x90, 0xcc, 0x55, 0xe7, 0xeb, 0xa5, 0xfd, 0x9d, 0xaf, 0x2c, 0x0f, 0x6b, 0x25,
- 0xd5, 0xc9, 0x1f, 0xc7, 0x0c, 0x9a, 0x10, 0x6b, 0x1f, 0x34, 0x54, 0x1a, 0x5a, 0x2e, 0x78, 0x17,
- 0xe5, 0x6d, 0xd5, 0xb6, 0xa6, 0xda, 0x1e, 0x90, 0x92, 0x66, 0x93, 0x1c, 0xee, 0xa0, 0x72, 0x9b,
- 0x85, 0xf2, 0xac, 0x19, 0x82, 0xe8, 0x42, 0xeb, 0x47, 0xb6, 0xe3, 0x60, 0x5e, 0x4f, 0x46, 0x04,
- 0xe9, 0x98, 0x85, 0xf5, 0xcf, 0xdd, 0xa3, 0x9e, 0xbb, 0x7f, 0xd4, 0x73, 0x0f, 0x8f, 0x7a, 0xee,
- 0x75, 0xa4, 0x6b, 0x77, 0x91, 0xae, 0xdd, 0x47, 0xba, 0xf6, 0x10, 0xe9, 0xda, 0xa7, 0x48, 0xd7,
- 0xde, 0x7d, 0xd6, 0x73, 0x2f, 0x36, 0x27, 0xfd, 0x9b, 0x7e, 0x09, 0x00, 0x00, 0xff, 0xff, 0x6f,
- 0x4f, 0x7a, 0xe4, 0x64, 0x07, 0x00, 0x00,
-}
+func (m *EventSeries) Reset() { *m = EventSeries{} }
func (m *Event) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/events/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/events/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..09ff231d
--- /dev/null
+++ b/operator/vendor/k8s.io/api/events/v1/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*Event) ProtoMessage() {}
+
+func (*EventList) ProtoMessage() {}
+
+func (*EventSeries) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/events/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/events/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..ac580c79
--- /dev/null
+++ b/operator/vendor/k8s.io/api/events/v1/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Event) OpenAPIModelName() string {
+ return "io.k8s.api.events.v1.Event"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventList) OpenAPIModelName() string {
+ return "io.k8s.api.events.v1.EventList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventSeries) OpenAPIModelName() string {
+ return "io.k8s.api.events.v1.EventSeries"
+}
diff --git a/operator/vendor/k8s.io/api/events/v1beta1/doc.go b/operator/vendor/k8s.io/api/events/v1beta1/doc.go
index e4864294..24023038 100644
--- a/operator/vendor/k8s.io/api/events/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/events/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.events.v1beta1
// +groupName=events.k8s.io
diff --git a/operator/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/events/v1beta1/generated.pb.go
index 5d7881e8..70eee4ab 100644
--- a/operator/vendor/k8s.io/api/events/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/events/v1beta1/generated.pb.go
@@ -24,171 +24,18 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v11 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Event) Reset() { *m = Event{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *EventList) Reset() { *m = EventList{} }
-func (m *Event) Reset() { *m = Event{} }
-func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_99027a32dee7673b, []int{0}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func (m *EventList) Reset() { *m = EventList{} }
-func (*EventList) ProtoMessage() {}
-func (*EventList) Descriptor() ([]byte, []int) {
- return fileDescriptor_99027a32dee7673b, []int{1}
-}
-func (m *EventList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventList.Merge(m, src)
-}
-func (m *EventList) XXX_Size() int {
- return m.Size()
-}
-func (m *EventList) XXX_DiscardUnknown() {
- xxx_messageInfo_EventList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventList proto.InternalMessageInfo
-
-func (m *EventSeries) Reset() { *m = EventSeries{} }
-func (*EventSeries) ProtoMessage() {}
-func (*EventSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_99027a32dee7673b, []int{2}
-}
-func (m *EventSeries) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EventSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EventSeries.Merge(m, src)
-}
-func (m *EventSeries) XXX_Size() int {
- return m.Size()
-}
-func (m *EventSeries) XXX_DiscardUnknown() {
- xxx_messageInfo_EventSeries.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EventSeries proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event")
- proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList")
- proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_99027a32dee7673b)
-}
-
-var fileDescriptor_99027a32dee7673b = []byte{
- // 764 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0xcf, 0x4f, 0xdb, 0x48,
- 0x14, 0xc7, 0x63, 0x20, 0x81, 0x4c, 0xf8, 0x11, 0x86, 0x03, 0x03, 0x2b, 0x39, 0x51, 0x90, 0x50,
- 0x76, 0xa5, 0xb5, 0x17, 0xb4, 0x5a, 0xed, 0x6d, 0x85, 0x09, 0x5b, 0x81, 0xa0, 0x48, 0x03, 0xa7,
- 0xaa, 0x07, 0x26, 0xce, 0xc3, 0xb8, 0xc4, 0x1e, 0x6b, 0x3c, 0x89, 0xc4, 0xad, 0x97, 0x4a, 0x3d,
- 0xf6, 0x6f, 0xe8, 0xad, 0xb7, 0xfe, 0x19, 0x1c, 0x39, 0x72, 0x8a, 0x8a, 0xfb, 0x8f, 0x54, 0x1e,
- 0x3b, 0x71, 0xc8, 0x0f, 0x91, 0xaa, 0x37, 0xfb, 0xbd, 0xef, 0xf7, 0xf3, 0xde, 0x8c, 0x5f, 0x5e,
- 0xd0, 0xef, 0xb7, 0xff, 0x86, 0x86, 0xcb, 0x4d, 0x16, 0xb8, 0x26, 0x74, 0xc1, 0x97, 0xa1, 0xd9,
- 0xdd, 0x6b, 0x82, 0x64, 0x7b, 0xa6, 0x03, 0x3e, 0x08, 0x26, 0xa1, 0x65, 0x04, 0x82, 0x4b, 0x8e,
- 0xb7, 0x12, 0xa9, 0xc1, 0x02, 0xd7, 0x48, 0xa4, 0x46, 0x2a, 0xdd, 0xfe, 0xd3, 0x71, 0xe5, 0x4d,
- 0xa7, 0x69, 0xd8, 0xdc, 0x33, 0x1d, 0xee, 0x70, 0x53, 0x39, 0x9a, 0x9d, 0x6b, 0xf5, 0xa6, 0x5e,
- 0xd4, 0x53, 0x42, 0xda, 0xae, 0x0d, 0x15, 0xb5, 0xb9, 0x00, 0xb3, 0x3b, 0x56, 0x6d, 0xfb, 0xef,
- 0x4c, 0xe3, 0x31, 0xfb, 0xc6, 0xf5, 0x41, 0xdc, 0x99, 0xc1, 0xad, 0x13, 0x07, 0x42, 0xd3, 0x03,
- 0xc9, 0x26, 0xb9, 0xcc, 0x69, 0x2e, 0xd1, 0xf1, 0xa5, 0xeb, 0xc1, 0x98, 0xe1, 0x9f, 0x97, 0x0c,
- 0xa1, 0x7d, 0x03, 0x1e, 0x1b, 0xf5, 0xd5, 0x3e, 0x17, 0x51, 0xfe, 0x28, 0xbe, 0x04, 0x7c, 0x85,
- 0x96, 0xe2, 0x6e, 0x5a, 0x4c, 0x32, 0xa2, 0x55, 0xb5, 0x7a, 0x69, 0xff, 0x2f, 0x23, 0xbb, 0xa9,
- 0x01, 0xd4, 0x08, 0x6e, 0x9d, 0x38, 0x10, 0x1a, 0xb1, 0xda, 0xe8, 0xee, 0x19, 0xe7, 0xcd, 0x77,
- 0x60, 0xcb, 0x33, 0x90, 0xcc, 0xc2, 0xf7, 0xbd, 0x4a, 0x2e, 0xea, 0x55, 0x50, 0x16, 0xa3, 0x03,
- 0x2a, 0xbe, 0x42, 0x45, 0x75, 0xdf, 0x97, 0xae, 0x07, 0x64, 0x4e, 0x95, 0x30, 0x67, 0x2b, 0x71,
- 0xe6, 0xda, 0x82, 0xc7, 0x36, 0x6b, 0x3d, 0xad, 0x50, 0x3c, 0xea, 0x93, 0x68, 0x06, 0xc5, 0x27,
- 0xa8, 0x10, 0x82, 0x70, 0x21, 0x24, 0xf3, 0x0a, 0xbf, 0x6b, 0x4c, 0xfd, 0xd6, 0x86, 0x02, 0x5c,
- 0x28, 0xb5, 0x85, 0xa2, 0x5e, 0xa5, 0x90, 0x3c, 0xd3, 0x94, 0x80, 0xcf, 0xd0, 0x86, 0x80, 0x80,
- 0x0b, 0xe9, 0xfa, 0xce, 0x21, 0xf7, 0xa5, 0xe0, 0xed, 0x36, 0x08, 0xb2, 0x50, 0xd5, 0xea, 0x45,
- 0xeb, 0xb7, 0xb4, 0x8d, 0x0d, 0x3a, 0x2e, 0xa1, 0x93, 0x7c, 0xf8, 0x15, 0x5a, 0x1f, 0x84, 0x8f,
- 0xfd, 0x50, 0x32, 0xdf, 0x06, 0x92, 0x57, 0xb0, 0xad, 0x14, 0xb6, 0x4e, 0x47, 0x05, 0x74, 0xdc,
- 0x83, 0x77, 0x51, 0x81, 0xd9, 0xd2, 0xe5, 0x3e, 0x29, 0x28, 0xf7, 0x6a, 0xea, 0x2e, 0x1c, 0xa8,
- 0x28, 0x4d, 0xb3, 0xb1, 0x4e, 0x00, 0x0b, 0xb9, 0x4f, 0x16, 0x9f, 0xeb, 0xa8, 0x8a, 0xd2, 0x34,
- 0x8b, 0x2f, 0x51, 0x51, 0x80, 0xc3, 0x44, 0xcb, 0xf5, 0x1d, 0xb2, 0xa4, 0xae, 0x6d, 0x67, 0xf8,
- 0xda, 0xe2, 0xc1, 0xce, 0x3e, 0x33, 0x85, 0x6b, 0x10, 0xe0, 0xdb, 0x43, 0x5f, 0x82, 0xf6, 0xdd,
- 0x34, 0x03, 0xe1, 0x13, 0xb4, 0x28, 0xa0, 0x1d, 0x0f, 0x1a, 0x29, 0xce, 0xce, 0x2c, 0x45, 0xbd,
- 0xca, 0x22, 0x4d, 0x7c, 0xb4, 0x0f, 0xc0, 0x55, 0xb4, 0xe0, 0x73, 0x09, 0x04, 0xa9, 0x73, 0x2c,
- 0xa7, 0x75, 0x17, 0x5e, 0x73, 0x09, 0x54, 0x65, 0x62, 0x85, 0xbc, 0x0b, 0x80, 0x94, 0x9e, 0x2b,
- 0x2e, 0xef, 0x02, 0xa0, 0x2a, 0x83, 0x01, 0x95, 0x5b, 0x10, 0x08, 0xb0, 0x63, 0xe2, 0x05, 0xef,
- 0x08, 0x1b, 0xc8, 0xb2, 0x6a, 0xac, 0x32, 0xa9, 0xb1, 0x64, 0x38, 0x94, 0xcc, 0x22, 0x29, 0xae,
- 0xdc, 0x18, 0x01, 0xd0, 0x31, 0x24, 0xfe, 0xa8, 0x21, 0x92, 0x05, 0xff, 0x77, 0x45, 0xa8, 0x06,
- 0x33, 0x94, 0xcc, 0x0b, 0xc8, 0x8a, 0xaa, 0xf7, 0xc7, 0x6c, 0x23, 0xaf, 0xa6, 0xbd, 0x9a, 0x96,
- 0x26, 0x8d, 0x29, 0x4c, 0x3a, 0xb5, 0x1a, 0xfe, 0xa0, 0xa1, 0xcd, 0x2c, 0x79, 0xca, 0x86, 0x3b,
- 0x59, 0xfd, 0xe9, 0x4e, 0x2a, 0x69, 0x27, 0x9b, 0x8d, 0xc9, 0x48, 0x3a, 0xad, 0x16, 0x3e, 0x40,
- 0x6b, 0x59, 0xea, 0x90, 0x77, 0x7c, 0x49, 0xd6, 0xaa, 0x5a, 0x3d, 0x6f, 0x6d, 0xa6, 0xc8, 0xb5,
- 0xc6, 0xf3, 0x34, 0x1d, 0xd5, 0xd7, 0xbe, 0x6a, 0x28, 0xf9, 0xbd, 0x9f, 0xba, 0xa1, 0xc4, 0x6f,
- 0xc7, 0x16, 0x95, 0x31, 0xdb, 0x41, 0x62, 0xb7, 0x5a, 0x53, 0xe5, 0xb4, 0xf2, 0x52, 0x3f, 0x32,
- 0xb4, 0xa4, 0x8e, 0x50, 0xde, 0x95, 0xe0, 0x85, 0x64, 0xae, 0x3a, 0x5f, 0x2f, 0xed, 0x57, 0x5f,
- 0xda, 0x20, 0xd6, 0x4a, 0x0a, 0xcb, 0x1f, 0xc7, 0x36, 0x9a, 0xb8, 0x6b, 0x5f, 0x34, 0x54, 0x1a,
- 0xda, 0x30, 0x78, 0x07, 0xe5, 0x6d, 0x75, 0x76, 0x4d, 0x9d, 0x7d, 0x60, 0x4a, 0x4e, 0x9c, 0xe4,
- 0x70, 0x07, 0x95, 0xdb, 0x2c, 0x94, 0xe7, 0xcd, 0x10, 0x44, 0x17, 0x5a, 0xbf, 0xb2, 0x27, 0x07,
- 0x43, 0x7b, 0x3a, 0x02, 0xa4, 0x63, 0x25, 0xac, 0xff, 0xee, 0x9f, 0xf4, 0xdc, 0xc3, 0x93, 0x9e,
- 0x7b, 0x7c, 0xd2, 0x73, 0xef, 0x23, 0x5d, 0xbb, 0x8f, 0x74, 0xed, 0x21, 0xd2, 0xb5, 0xc7, 0x48,
- 0xd7, 0xbe, 0x45, 0xba, 0xf6, 0xe9, 0xbb, 0x9e, 0x7b, 0xb3, 0x35, 0xf5, 0x1f, 0xf6, 0x47, 0x00,
- 0x00, 0x00, 0xff, 0xff, 0x2b, 0xc1, 0x64, 0x36, 0x7d, 0x07, 0x00, 0x00,
-}
+func (m *EventSeries) Reset() { *m = EventSeries{} }
func (m *Event) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/events/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/events/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..a70924a2
--- /dev/null
+++ b/operator/vendor/k8s.io/api/events/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*Event) ProtoMessage() {}
+
+func (*EventList) ProtoMessage() {}
+
+func (*EventSeries) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/events/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/events/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..189a20ad
--- /dev/null
+++ b/operator/vendor/k8s.io/api/events/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,37 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Event) OpenAPIModelName() string {
+ return "io.k8s.api.events.v1beta1.Event"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventList) OpenAPIModelName() string {
+ return "io.k8s.api.events.v1beta1.EventList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in EventSeries) OpenAPIModelName() string {
+ return "io.k8s.api.events.v1beta1.EventSeries"
+}
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/doc.go b/operator/vendor/k8s.io/api/extensions/v1beta1/doc.go
index be710973..bf39ca3e 100644
--- a/operator/vendor/k8s.io/api/extensions/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.extensions.v1beta1
// +k8s:validation-gen=TypeMeta
// +k8s:validation-gen-input=k8s.io/api/extensions/v1beta1
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
index 35b9a4ff..ca947739 100644
--- a/operator/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
@@ -23,14 +23,12 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -38,1514 +36,95 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *DaemonSet) Reset() { *m = DaemonSet{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
-func (m *DaemonSet) Reset() { *m = DaemonSet{} }
-func (*DaemonSet) ProtoMessage() {}
-func (*DaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{0}
-}
-func (m *DaemonSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSet.Merge(m, src)
-}
-func (m *DaemonSet) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSet) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSet proto.InternalMessageInfo
-
-func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
-func (*DaemonSetCondition) ProtoMessage() {}
-func (*DaemonSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{1}
-}
-func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetCondition.Merge(m, src)
-}
-func (m *DaemonSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo
-
-func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
-func (*DaemonSetList) ProtoMessage() {}
-func (*DaemonSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{2}
-}
-func (m *DaemonSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetList.Merge(m, src)
-}
-func (m *DaemonSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo
-
-func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
-func (*DaemonSetSpec) ProtoMessage() {}
-func (*DaemonSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{3}
-}
-func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetSpec.Merge(m, src)
-}
-func (m *DaemonSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo
-
-func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
-func (*DaemonSetStatus) ProtoMessage() {}
-func (*DaemonSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{4}
-}
-func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetStatus.Merge(m, src)
-}
-func (m *DaemonSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo
-
-func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
-func (*DaemonSetUpdateStrategy) ProtoMessage() {}
-func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{5}
-}
-func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src)
-}
-func (m *DaemonSetUpdateStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo
-
-func (m *Deployment) Reset() { *m = Deployment{} }
-func (*Deployment) ProtoMessage() {}
-func (*Deployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{6}
-}
-func (m *Deployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Deployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Deployment.Merge(m, src)
-}
-func (m *Deployment) XXX_Size() int {
- return m.Size()
-}
-func (m *Deployment) XXX_DiscardUnknown() {
- xxx_messageInfo_Deployment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Deployment proto.InternalMessageInfo
-
-func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (*DeploymentCondition) ProtoMessage() {}
-func (*DeploymentCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{7}
-}
-func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentCondition.Merge(m, src)
-}
-func (m *DeploymentCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
-
-func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-func (*DeploymentList) ProtoMessage() {}
-func (*DeploymentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{8}
-}
-func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentList.Merge(m, src)
-}
-func (m *DeploymentList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
-
-func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
-func (*DeploymentRollback) ProtoMessage() {}
-func (*DeploymentRollback) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{9}
-}
-func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentRollback) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentRollback.Merge(m, src)
-}
-func (m *DeploymentRollback) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentRollback) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentRollback.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo
-
-func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-func (*DeploymentSpec) ProtoMessage() {}
-func (*DeploymentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{10}
-}
-func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentSpec.Merge(m, src)
-}
-func (m *DeploymentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
-
-func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (*DeploymentStatus) ProtoMessage() {}
-func (*DeploymentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{11}
-}
-func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStatus.Merge(m, src)
-}
-func (m *DeploymentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
-
-func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-func (*DeploymentStrategy) ProtoMessage() {}
-func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{12}
-}
-func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeploymentStrategy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeploymentStrategy.Merge(m, src)
-}
-func (m *DeploymentStrategy) XXX_Size() int {
- return m.Size()
-}
-func (m *DeploymentStrategy) XXX_DiscardUnknown() {
- xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
-
-func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
-func (*HTTPIngressPath) ProtoMessage() {}
-func (*HTTPIngressPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{13}
-}
-func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPIngressPath) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPIngressPath.Merge(m, src)
-}
-func (m *HTTPIngressPath) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPIngressPath) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo
-
-func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
-func (*HTTPIngressRuleValue) ProtoMessage() {}
-func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{14}
-}
-func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src)
-}
-func (m *HTTPIngressRuleValue) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
-
-func (m *IPBlock) Reset() { *m = IPBlock{} }
-func (*IPBlock) ProtoMessage() {}
-func (*IPBlock) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{15}
-}
-func (m *IPBlock) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPBlock) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPBlock.Merge(m, src)
-}
-func (m *IPBlock) XXX_Size() int {
- return m.Size()
-}
-func (m *IPBlock) XXX_DiscardUnknown() {
- xxx_messageInfo_IPBlock.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPBlock proto.InternalMessageInfo
-
-func (m *Ingress) Reset() { *m = Ingress{} }
-func (*Ingress) ProtoMessage() {}
-func (*Ingress) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{16}
-}
-func (m *Ingress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Ingress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Ingress.Merge(m, src)
-}
-func (m *Ingress) XXX_Size() int {
- return m.Size()
-}
-func (m *Ingress) XXX_DiscardUnknown() {
- xxx_messageInfo_Ingress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Ingress proto.InternalMessageInfo
-
-func (m *IngressBackend) Reset() { *m = IngressBackend{} }
-func (*IngressBackend) ProtoMessage() {}
-func (*IngressBackend) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{17}
-}
-func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressBackend) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressBackend.Merge(m, src)
-}
-func (m *IngressBackend) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressBackend) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressBackend.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
-
-func (m *IngressList) Reset() { *m = IngressList{} }
-func (*IngressList) ProtoMessage() {}
-func (*IngressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{18}
-}
-func (m *IngressList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressList.Merge(m, src)
-}
-func (m *IngressList) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressList) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressList proto.InternalMessageInfo
-
-func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
-func (*IngressLoadBalancerIngress) ProtoMessage() {}
-func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{19}
-}
-func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src)
-}
-func (m *IngressLoadBalancerIngress) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
-
-func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
-func (*IngressLoadBalancerStatus) ProtoMessage() {}
-func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{20}
-}
-func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src)
-}
-func (m *IngressLoadBalancerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
-
-func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
-func (*IngressPortStatus) ProtoMessage() {}
-func (*IngressPortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{21}
-}
-func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressPortStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressPortStatus.Merge(m, src)
-}
-func (m *IngressPortStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressPortStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressPortStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
-
-func (m *IngressRule) Reset() { *m = IngressRule{} }
-func (*IngressRule) ProtoMessage() {}
-func (*IngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{22}
-}
-func (m *IngressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressRule.Merge(m, src)
-}
-func (m *IngressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressRule proto.InternalMessageInfo
+func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
-func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
-func (*IngressRuleValue) ProtoMessage() {}
-func (*IngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{23}
-}
-func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressRuleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressRuleValue.Merge(m, src)
-}
-func (m *IngressRuleValue) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressRuleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressRuleValue.DiscardUnknown(m)
-}
+func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
-var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
-
-func (m *IngressSpec) Reset() { *m = IngressSpec{} }
-func (*IngressSpec) ProtoMessage() {}
-func (*IngressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{24}
-}
-func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressSpec.Merge(m, src)
-}
-func (m *IngressSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressSpec.DiscardUnknown(m)
-}
+func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
-var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
+func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
-func (m *IngressStatus) Reset() { *m = IngressStatus{} }
-func (*IngressStatus) ProtoMessage() {}
-func (*IngressStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{25}
-}
-func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressStatus.Merge(m, src)
-}
-func (m *IngressStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressStatus.DiscardUnknown(m)
-}
+func (m *Deployment) Reset() { *m = Deployment{} }
-var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
+func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
-func (m *IngressTLS) Reset() { *m = IngressTLS{} }
-func (*IngressTLS) ProtoMessage() {}
-func (*IngressTLS) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{26}
-}
-func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressTLS) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressTLS.Merge(m, src)
-}
-func (m *IngressTLS) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressTLS) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressTLS.DiscardUnknown(m)
-}
+func (m *DeploymentList) Reset() { *m = DeploymentList{} }
-var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
+func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} }
-func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
-func (*NetworkPolicy) ProtoMessage() {}
-func (*NetworkPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{27}
-}
-func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicy.Merge(m, src)
-}
-func (m *NetworkPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicy.DiscardUnknown(m)
-}
+func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
-var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
+func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
-func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
-func (*NetworkPolicyEgressRule) ProtoMessage() {}
-func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{28}
-}
-func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src)
-}
-func (m *NetworkPolicyEgressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m)
-}
+func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
-var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo
+func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
-func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
-func (*NetworkPolicyIngressRule) ProtoMessage() {}
-func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{29}
-}
-func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src)
-}
-func (m *NetworkPolicyIngressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m)
-}
+func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
-var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo
+func (m *IPBlock) Reset() { *m = IPBlock{} }
-func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
-func (*NetworkPolicyList) ProtoMessage() {}
-func (*NetworkPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{30}
-}
-func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyList.Merge(m, src)
-}
-func (m *NetworkPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m)
-}
+func (m *Ingress) Reset() { *m = Ingress{} }
-var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo
+func (m *IngressBackend) Reset() { *m = IngressBackend{} }
-func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
-func (*NetworkPolicyPeer) ProtoMessage() {}
-func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{31}
-}
-func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyPeer.Merge(m, src)
-}
-func (m *NetworkPolicyPeer) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyPeer) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m)
-}
+func (m *IngressList) Reset() { *m = IngressList{} }
-var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo
+func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
-func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
-func (*NetworkPolicyPort) ProtoMessage() {}
-func (*NetworkPolicyPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{32}
-}
-func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyPort.Merge(m, src)
-}
-func (m *NetworkPolicyPort) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyPort) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m)
-}
+func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
-var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo
+func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
-func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
-func (*NetworkPolicySpec) ProtoMessage() {}
-func (*NetworkPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{33}
-}
-func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicySpec.Merge(m, src)
-}
-func (m *NetworkPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m)
-}
+func (m *IngressRule) Reset() { *m = IngressRule{} }
-var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
+func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
-func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
-func (*ReplicaSet) ProtoMessage() {}
-func (*ReplicaSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{34}
-}
-func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSet.Merge(m, src)
-}
-func (m *ReplicaSet) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSet) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSet.DiscardUnknown(m)
-}
+func (m *IngressSpec) Reset() { *m = IngressSpec{} }
-var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
+func (m *IngressStatus) Reset() { *m = IngressStatus{} }
-func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
-func (*ReplicaSetCondition) ProtoMessage() {}
-func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{35}
-}
-func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetCondition.Merge(m, src)
-}
-func (m *ReplicaSetCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m)
-}
+func (m *IngressTLS) Reset() { *m = IngressTLS{} }
-var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
+func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
-func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
-func (*ReplicaSetList) ProtoMessage() {}
-func (*ReplicaSetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{36}
-}
-func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetList.Merge(m, src)
-}
-func (m *ReplicaSetList) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetList) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetList.DiscardUnknown(m)
-}
+func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
-var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
+func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
-func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
-func (*ReplicaSetSpec) ProtoMessage() {}
-func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{37}
-}
-func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetSpec.Merge(m, src)
-}
-func (m *ReplicaSetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m)
-}
+func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
-var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
+func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
-func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
-func (*ReplicaSetStatus) ProtoMessage() {}
-func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{38}
-}
-func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ReplicaSetStatus.Merge(m, src)
-}
-func (m *ReplicaSetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ReplicaSetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m)
-}
+func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
-var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
+func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
-func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
-func (*RollbackConfig) ProtoMessage() {}
-func (*RollbackConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{39}
-}
-func (m *RollbackConfig) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollbackConfig) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollbackConfig.Merge(m, src)
-}
-func (m *RollbackConfig) XXX_Size() int {
- return m.Size()
-}
-func (m *RollbackConfig) XXX_DiscardUnknown() {
- xxx_messageInfo_RollbackConfig.DiscardUnknown(m)
-}
+func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
-var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo
+func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
-func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
-func (*RollingUpdateDaemonSet) ProtoMessage() {}
-func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{40}
-}
-func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src)
-}
-func (m *RollingUpdateDaemonSet) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m)
-}
+func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
-var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
+func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
-func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (*RollingUpdateDeployment) ProtoMessage() {}
-func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{41}
-}
-func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RollingUpdateDeployment.Merge(m, src)
-}
-func (m *RollingUpdateDeployment) XXX_Size() int {
- return m.Size()
-}
-func (m *RollingUpdateDeployment) XXX_DiscardUnknown() {
- xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m)
-}
+func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
-var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
+func (m *RollbackConfig) Reset() { *m = RollbackConfig{} }
-func (m *Scale) Reset() { *m = Scale{} }
-func (*Scale) ProtoMessage() {}
-func (*Scale) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{42}
-}
-func (m *Scale) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scale) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scale.Merge(m, src)
-}
-func (m *Scale) XXX_Size() int {
- return m.Size()
-}
-func (m *Scale) XXX_DiscardUnknown() {
- xxx_messageInfo_Scale.DiscardUnknown(m)
-}
+func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
-var xxx_messageInfo_Scale proto.InternalMessageInfo
+func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
-func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-func (*ScaleSpec) ProtoMessage() {}
-func (*ScaleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{43}
-}
-func (m *ScaleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleSpec.Merge(m, src)
-}
-func (m *ScaleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleSpec.DiscardUnknown(m)
-}
+func (m *Scale) Reset() { *m = Scale{} }
-var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo
+func (m *ScaleSpec) Reset() { *m = ScaleSpec{} }
-func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
-func (*ScaleStatus) ProtoMessage() {}
-func (*ScaleStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_90a532284de28347, []int{44}
-}
-func (m *ScaleStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ScaleStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ScaleStatus.Merge(m, src)
-}
-func (m *ScaleStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ScaleStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ScaleStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet")
- proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition")
- proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList")
- proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec")
- proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus")
- proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy")
- proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment")
- proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition")
- proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList")
- proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry")
- proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec")
- proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus")
- proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy")
- proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath")
- proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue")
- proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock")
- proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress")
- proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend")
- proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList")
- proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.extensions.v1beta1.IngressLoadBalancerIngress")
- proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressLoadBalancerStatus")
- proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressPortStatus")
- proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule")
- proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue")
- proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec")
- proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus")
- proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS")
- proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy")
- proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule")
- proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule")
- proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList")
- proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer")
- proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort")
- proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec")
- proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet")
- proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition")
- proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList")
- proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec")
- proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus")
- proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig")
- proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet")
- proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment")
- proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale")
- proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec")
- proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_90a532284de28347)
-}
-
-var fileDescriptor_90a532284de28347 = []byte{
- // 2875 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcf, 0x6f, 0x24, 0x47,
- 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0xe3, 0xe7, 0xb5, 0xbd, 0x5b, 0xeb, 0xac, 0x1d, 0xef, 0x37, 0x76,
- 0xd4, 0x5f, 0x11, 0x36, 0x61, 0x77, 0x86, 0xdd, 0x24, 0x4b, 0x7e, 0x48, 0x09, 0x3b, 0xde, 0x4d,
- 0xd6, 0x89, 0x7f, 0x4c, 0x6a, 0xc6, 0x09, 0x8a, 0x08, 0xd0, 0xee, 0x29, 0x8f, 0x3b, 0xee, 0xe9,
- 0x1e, 0x75, 0xd7, 0x98, 0xf5, 0x0d, 0x04, 0x97, 0x9c, 0x40, 0x42, 0x21, 0x1c, 0x91, 0x90, 0xb8,
- 0x72, 0xe5, 0x10, 0x22, 0x10, 0x41, 0x8a, 0x38, 0x45, 0xe2, 0x40, 0x4e, 0x16, 0x71, 0x4e, 0x88,
- 0x7f, 0x00, 0xed, 0x09, 0xd5, 0x8f, 0xae, 0xfe, 0x6d, 0xf7, 0x0c, 0x5e, 0x8b, 0x20, 0x4e, 0xeb,
- 0xa9, 0xf7, 0xde, 0xa7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0x53, 0x55, 0xbd, 0x70, 0x7d, 0xef, 0x39,
- 0xbf, 0x66, 0xb9, 0x75, 0xa3, 0x6f, 0xd5, 0xc9, 0x7d, 0x4a, 0x1c, 0xdf, 0x72, 0x1d, 0xbf, 0xbe,
- 0x7f, 0x63, 0x9b, 0x50, 0xe3, 0x46, 0xbd, 0x4b, 0x1c, 0xe2, 0x19, 0x94, 0x74, 0x6a, 0x7d, 0xcf,
- 0xa5, 0x2e, 0x7a, 0x4c, 0xa8, 0xd7, 0x8c, 0xbe, 0x55, 0x0b, 0xd5, 0x6b, 0x52, 0x7d, 0xf1, 0x7a,
- 0xd7, 0xa2, 0xbb, 0x83, 0xed, 0x9a, 0xe9, 0xf6, 0xea, 0x5d, 0xb7, 0xeb, 0xd6, 0xb9, 0xd5, 0xf6,
- 0x60, 0x87, 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x02, 0x6d, 0x51, 0x8f, 0x74, 0x6e, 0xba, 0x1e, 0xa9,
- 0xef, 0xa7, 0x7a, 0x5c, 0x7c, 0x26, 0xd4, 0xe9, 0x19, 0xe6, 0xae, 0xe5, 0x10, 0xef, 0xa0, 0xde,
- 0xdf, 0xeb, 0xb2, 0x06, 0xbf, 0xde, 0x23, 0xd4, 0xc8, 0xb2, 0xaa, 0xe7, 0x59, 0x79, 0x03, 0x87,
- 0x5a, 0x3d, 0x92, 0x32, 0xb8, 0x75, 0x92, 0x81, 0x6f, 0xee, 0x92, 0x9e, 0x91, 0xb2, 0x7b, 0x3a,
- 0xcf, 0x6e, 0x40, 0x2d, 0xbb, 0x6e, 0x39, 0xd4, 0xa7, 0x5e, 0xd2, 0x48, 0x7f, 0xbf, 0x04, 0x93,
- 0x77, 0x0c, 0xd2, 0x73, 0x9d, 0x16, 0xa1, 0xe8, 0x7b, 0x50, 0x65, 0xc3, 0xe8, 0x18, 0xd4, 0x58,
- 0xd0, 0x1e, 0xd7, 0xae, 0x4e, 0xdd, 0xfc, 0x7a, 0x2d, 0x9c, 0x66, 0x85, 0x5a, 0xeb, 0xef, 0x75,
- 0x59, 0x83, 0x5f, 0x63, 0xda, 0xb5, 0xfd, 0x1b, 0xb5, 0xcd, 0xed, 0x77, 0x89, 0x49, 0xd7, 0x09,
- 0x35, 0x1a, 0xe8, 0x93, 0xc3, 0xe5, 0x73, 0x47, 0x87, 0xcb, 0x10, 0xb6, 0x61, 0x85, 0x8a, 0x36,
- 0x60, 0xcc, 0xef, 0x13, 0x73, 0xa1, 0xc4, 0xd1, 0xaf, 0xd5, 0x8e, 0x5d, 0xc4, 0x9a, 0xf2, 0xac,
- 0xd5, 0x27, 0x66, 0xe3, 0xbc, 0x44, 0x1e, 0x63, 0xbf, 0x30, 0xc7, 0x41, 0x6f, 0xc2, 0xb8, 0x4f,
- 0x0d, 0x3a, 0xf0, 0x17, 0xca, 0x1c, 0xb1, 0x56, 0x18, 0x91, 0x5b, 0x35, 0x66, 0x24, 0xe6, 0xb8,
- 0xf8, 0x8d, 0x25, 0x9a, 0xfe, 0xf7, 0x12, 0x20, 0xa5, 0xbb, 0xe2, 0x3a, 0x1d, 0x8b, 0x5a, 0xae,
- 0x83, 0x5e, 0x80, 0x31, 0x7a, 0xd0, 0x27, 0x7c, 0x72, 0x26, 0x1b, 0x4f, 0x04, 0x0e, 0xb5, 0x0f,
- 0xfa, 0xe4, 0xc1, 0xe1, 0xf2, 0xe5, 0xb4, 0x05, 0x93, 0x60, 0x6e, 0x83, 0xd6, 0x94, 0xab, 0x25,
- 0x6e, 0xfd, 0x4c, 0xbc, 0xeb, 0x07, 0x87, 0xcb, 0x19, 0x41, 0x58, 0x53, 0x48, 0x71, 0x07, 0xd1,
- 0x3e, 0x20, 0xdb, 0xf0, 0x69, 0xdb, 0x33, 0x1c, 0x5f, 0xf4, 0x64, 0xf5, 0x88, 0x9c, 0x84, 0xa7,
- 0x8a, 0x2d, 0x1a, 0xb3, 0x68, 0x2c, 0x4a, 0x2f, 0xd0, 0x5a, 0x0a, 0x0d, 0x67, 0xf4, 0x80, 0x9e,
- 0x80, 0x71, 0x8f, 0x18, 0xbe, 0xeb, 0x2c, 0x8c, 0xf1, 0x51, 0xa8, 0x09, 0xc4, 0xbc, 0x15, 0x4b,
- 0x29, 0x7a, 0x12, 0x26, 0x7a, 0xc4, 0xf7, 0x8d, 0x2e, 0x59, 0xa8, 0x70, 0xc5, 0x59, 0xa9, 0x38,
- 0xb1, 0x2e, 0x9a, 0x71, 0x20, 0xd7, 0x3f, 0xd4, 0x60, 0x5a, 0xcd, 0xdc, 0x9a, 0xe5, 0x53, 0xf4,
- 0xed, 0x54, 0x1c, 0xd6, 0x8a, 0x0d, 0x89, 0x59, 0xf3, 0x28, 0xbc, 0x20, 0x7b, 0xab, 0x06, 0x2d,
- 0x91, 0x18, 0x5c, 0x87, 0x8a, 0x45, 0x49, 0x8f, 0xad, 0x43, 0xf9, 0xea, 0xd4, 0xcd, 0xab, 0x45,
- 0x43, 0xa6, 0x31, 0x2d, 0x41, 0x2b, 0xab, 0xcc, 0x1c, 0x0b, 0x14, 0xfd, 0xe7, 0x63, 0x11, 0xf7,
- 0x59, 0x68, 0xa2, 0x77, 0xa0, 0xea, 0x13, 0x9b, 0x98, 0xd4, 0xf5, 0xa4, 0xfb, 0x4f, 0x17, 0x74,
- 0xdf, 0xd8, 0x26, 0x76, 0x4b, 0x9a, 0x36, 0xce, 0x33, 0xff, 0x83, 0x5f, 0x58, 0x41, 0xa2, 0x37,
- 0xa0, 0x4a, 0x49, 0xaf, 0x6f, 0x1b, 0x94, 0xc8, 0x7d, 0xf4, 0xff, 0xd1, 0x21, 0xb0, 0xc8, 0x61,
- 0x60, 0x4d, 0xb7, 0xd3, 0x96, 0x6a, 0x7c, 0xfb, 0xa8, 0x29, 0x09, 0x5a, 0xb1, 0x82, 0x41, 0xfb,
- 0x30, 0x33, 0xe8, 0x77, 0x98, 0x26, 0x65, 0xd9, 0xa1, 0x7b, 0x20, 0x23, 0xe9, 0x56, 0xd1, 0xb9,
- 0xd9, 0x8a, 0x59, 0x37, 0x2e, 0xcb, 0xbe, 0x66, 0xe2, 0xed, 0x38, 0xd1, 0x0b, 0xba, 0x0d, 0xb3,
- 0x3d, 0xcb, 0xc1, 0xc4, 0xe8, 0x1c, 0xb4, 0x88, 0xe9, 0x3a, 0x1d, 0x9f, 0x87, 0x55, 0xa5, 0x31,
- 0x2f, 0x01, 0x66, 0xd7, 0xe3, 0x62, 0x9c, 0xd4, 0x47, 0xaf, 0x01, 0x0a, 0x86, 0xf1, 0xaa, 0x48,
- 0x6e, 0x96, 0xeb, 0xf0, 0x98, 0x2b, 0x87, 0xc1, 0xdd, 0x4e, 0x69, 0xe0, 0x0c, 0x2b, 0xb4, 0x06,
- 0x73, 0x1e, 0xd9, 0xb7, 0xd8, 0x18, 0xef, 0x59, 0x3e, 0x75, 0xbd, 0x83, 0x35, 0xab, 0x67, 0xd1,
- 0x85, 0x71, 0xee, 0xd3, 0xc2, 0xd1, 0xe1, 0xf2, 0x1c, 0xce, 0x90, 0xe3, 0x4c, 0x2b, 0xfd, 0x83,
- 0x71, 0x98, 0x4d, 0xe4, 0x1b, 0xf4, 0x26, 0x5c, 0x36, 0x07, 0x9e, 0x47, 0x1c, 0xba, 0x31, 0xe8,
- 0x6d, 0x13, 0xaf, 0x65, 0xee, 0x92, 0xce, 0xc0, 0x26, 0x1d, 0x1e, 0x28, 0x95, 0xc6, 0x92, 0xf4,
- 0xf8, 0xf2, 0x4a, 0xa6, 0x16, 0xce, 0xb1, 0x66, 0xb3, 0xe0, 0xf0, 0xa6, 0x75, 0xcb, 0xf7, 0x15,
- 0x66, 0x89, 0x63, 0xaa, 0x59, 0xd8, 0x48, 0x69, 0xe0, 0x0c, 0x2b, 0xe6, 0x63, 0x87, 0xf8, 0x96,
- 0x47, 0x3a, 0x49, 0x1f, 0xcb, 0x71, 0x1f, 0xef, 0x64, 0x6a, 0xe1, 0x1c, 0x6b, 0xf4, 0x2c, 0x4c,
- 0x89, 0xde, 0xf8, 0xfa, 0xc9, 0x85, 0xbe, 0x24, 0xc1, 0xa6, 0x36, 0x42, 0x11, 0x8e, 0xea, 0xb1,
- 0xa1, 0xb9, 0xdb, 0x3e, 0xf1, 0xf6, 0x49, 0x27, 0x7f, 0x81, 0x37, 0x53, 0x1a, 0x38, 0xc3, 0x8a,
- 0x0d, 0x4d, 0x44, 0x60, 0x6a, 0x68, 0xe3, 0xf1, 0xa1, 0x6d, 0x65, 0x6a, 0xe1, 0x1c, 0x6b, 0x16,
- 0xc7, 0xc2, 0xe5, 0xdb, 0xfb, 0x86, 0x65, 0x1b, 0xdb, 0x36, 0x59, 0x98, 0x88, 0xc7, 0xf1, 0x46,
- 0x5c, 0x8c, 0x93, 0xfa, 0xe8, 0x55, 0xb8, 0x28, 0x9a, 0xb6, 0x1c, 0x43, 0x81, 0x54, 0x39, 0xc8,
- 0xa3, 0x12, 0xe4, 0xe2, 0x46, 0x52, 0x01, 0xa7, 0x6d, 0xd0, 0x0b, 0x30, 0x63, 0xba, 0xb6, 0xcd,
- 0xe3, 0x71, 0xc5, 0x1d, 0x38, 0x74, 0x61, 0x92, 0xa3, 0x20, 0xb6, 0x1f, 0x57, 0x62, 0x12, 0x9c,
- 0xd0, 0x44, 0x04, 0xc0, 0x0c, 0x0a, 0x8e, 0xbf, 0x00, 0x3c, 0x3f, 0xde, 0x28, 0x9a, 0x03, 0x54,
- 0xa9, 0x0a, 0x39, 0x80, 0x6a, 0xf2, 0x71, 0x04, 0x58, 0xff, 0xb3, 0x06, 0xf3, 0x39, 0xa9, 0x03,
- 0xbd, 0x1c, 0x2b, 0xb1, 0x5f, 0x4b, 0x94, 0xd8, 0x2b, 0x39, 0x66, 0x91, 0x3a, 0xeb, 0xc0, 0xb4,
- 0xc7, 0x46, 0xe5, 0x74, 0x85, 0x8a, 0xcc, 0x91, 0xcf, 0x9e, 0x30, 0x0c, 0x1c, 0xb5, 0x09, 0x73,
- 0xfe, 0xc5, 0xa3, 0xc3, 0xe5, 0xe9, 0x98, 0x0c, 0xc7, 0xe1, 0xf5, 0x5f, 0x94, 0x00, 0xee, 0x90,
- 0xbe, 0xed, 0x1e, 0xf4, 0x88, 0x73, 0x16, 0x1c, 0x6a, 0x33, 0xc6, 0xa1, 0xae, 0x9f, 0xb4, 0x3c,
- 0xca, 0xb5, 0x5c, 0x12, 0xf5, 0x56, 0x82, 0x44, 0xd5, 0x8b, 0x43, 0x1e, 0xcf, 0xa2, 0xfe, 0x5a,
- 0x86, 0x4b, 0xa1, 0x72, 0x48, 0xa3, 0x5e, 0x8c, 0xad, 0xf1, 0x57, 0x13, 0x6b, 0x3c, 0x9f, 0x61,
- 0xf2, 0xd0, 0x78, 0xd4, 0xbb, 0x30, 0xc3, 0x58, 0x8e, 0x58, 0x4b, 0xce, 0xa1, 0xc6, 0x87, 0xe6,
- 0x50, 0xaa, 0xda, 0xad, 0xc5, 0x90, 0x70, 0x02, 0x39, 0x87, 0xb3, 0x4d, 0x7c, 0x19, 0x39, 0xdb,
- 0x47, 0x1a, 0xcc, 0x84, 0xcb, 0x74, 0x06, 0xa4, 0x6d, 0x23, 0x4e, 0xda, 0x9e, 0x2c, 0x1c, 0xa2,
- 0x39, 0xac, 0xed, 0x9f, 0x8c, 0xe0, 0x2b, 0x25, 0xb6, 0xc1, 0xb7, 0x0d, 0x73, 0x0f, 0x3d, 0x0e,
- 0x63, 0x8e, 0xd1, 0x0b, 0x22, 0x53, 0x6d, 0x96, 0x0d, 0xa3, 0x47, 0x30, 0x97, 0xa0, 0xf7, 0x35,
- 0x40, 0xb2, 0x0a, 0xdc, 0x76, 0x1c, 0x97, 0x1a, 0x22, 0x57, 0x0a, 0xb7, 0x56, 0x0b, 0xbb, 0x15,
- 0xf4, 0x58, 0xdb, 0x4a, 0x61, 0xdd, 0x75, 0xa8, 0x77, 0x10, 0x2e, 0x72, 0x5a, 0x01, 0x67, 0x38,
- 0x80, 0x0c, 0x00, 0x4f, 0x62, 0xb6, 0x5d, 0xb9, 0x91, 0xaf, 0x17, 0xc8, 0x79, 0xcc, 0x60, 0xc5,
- 0x75, 0x76, 0xac, 0x6e, 0x98, 0x76, 0xb0, 0x02, 0xc2, 0x11, 0xd0, 0xc5, 0xbb, 0x30, 0x9f, 0xe3,
- 0x2d, 0xba, 0x00, 0xe5, 0x3d, 0x72, 0x20, 0xa6, 0x0d, 0xb3, 0x3f, 0xd1, 0x1c, 0x54, 0xf6, 0x0d,
- 0x7b, 0x20, 0xd2, 0xef, 0x24, 0x16, 0x3f, 0x5e, 0x28, 0x3d, 0xa7, 0xe9, 0x1f, 0x56, 0xa2, 0xb1,
- 0xc3, 0x19, 0xf3, 0x55, 0xa8, 0x7a, 0xa4, 0x6f, 0x5b, 0xa6, 0xe1, 0x4b, 0x22, 0xc4, 0xc9, 0x2f,
- 0x96, 0x6d, 0x58, 0x49, 0x63, 0xdc, 0xba, 0xf4, 0x70, 0xb9, 0x75, 0xf9, 0x74, 0xb8, 0xf5, 0x77,
- 0xa1, 0xea, 0x07, 0xac, 0x7a, 0x8c, 0x43, 0xde, 0x18, 0x22, 0xbf, 0x4a, 0x42, 0xad, 0x3a, 0x50,
- 0x54, 0x5a, 0x81, 0x66, 0x91, 0xe8, 0xca, 0x90, 0x24, 0xfa, 0x54, 0x89, 0x2f, 0xcb, 0x37, 0x7d,
- 0x63, 0xe0, 0x93, 0x0e, 0xcf, 0x6d, 0xd5, 0x30, 0xdf, 0x34, 0x79, 0x2b, 0x96, 0x52, 0xf4, 0x4e,
- 0x2c, 0x64, 0xab, 0xa3, 0x84, 0xec, 0x4c, 0x7e, 0xb8, 0xa2, 0x2d, 0x98, 0xef, 0x7b, 0x6e, 0xd7,
- 0x23, 0xbe, 0x7f, 0x87, 0x18, 0x1d, 0xdb, 0x72, 0x48, 0x30, 0x3f, 0x82, 0x11, 0x5d, 0x39, 0x3a,
- 0x5c, 0x9e, 0x6f, 0x66, 0xab, 0xe0, 0x3c, 0x5b, 0xfd, 0x67, 0x15, 0xb8, 0x90, 0xac, 0x80, 0x39,
- 0x24, 0x55, 0x1b, 0x89, 0xa4, 0x5e, 0x8b, 0x6c, 0x06, 0xc1, 0xe0, 0xd5, 0xea, 0x67, 0x6c, 0x88,
- 0xdb, 0x30, 0x2b, 0xb3, 0x41, 0x20, 0x94, 0x34, 0x5d, 0xad, 0xfe, 0x56, 0x5c, 0x8c, 0x93, 0xfa,
- 0xe8, 0x45, 0x98, 0xf6, 0x38, 0xef, 0x0e, 0x00, 0x04, 0x77, 0x7d, 0x44, 0x02, 0x4c, 0xe3, 0xa8,
- 0x10, 0xc7, 0x75, 0x19, 0x6f, 0x0d, 0xe9, 0x68, 0x00, 0x30, 0x16, 0xe7, 0xad, 0xb7, 0x93, 0x0a,
- 0x38, 0x6d, 0x83, 0xd6, 0xe1, 0xd2, 0xc0, 0x49, 0x43, 0x89, 0x50, 0xbe, 0x22, 0xa1, 0x2e, 0x6d,
- 0xa5, 0x55, 0x70, 0x96, 0x1d, 0x5a, 0x85, 0x4b, 0x94, 0x78, 0x3d, 0xcb, 0x31, 0xa8, 0xe5, 0x74,
- 0x15, 0x9c, 0x58, 0xf9, 0x79, 0x06, 0xd5, 0x4e, 0x8b, 0x71, 0x96, 0x0d, 0xda, 0x89, 0xb1, 0xe2,
- 0x71, 0x9e, 0xe9, 0x6f, 0x16, 0xde, 0xc3, 0x85, 0x69, 0x71, 0x06, 0x73, 0xaf, 0x16, 0x65, 0xee,
- 0xfa, 0x1f, 0xb4, 0x68, 0x3d, 0x53, 0x6c, 0xfa, 0xa4, 0x0b, 0xab, 0x94, 0x45, 0x84, 0x68, 0xb9,
- 0xd9, 0x44, 0xfa, 0xd6, 0x50, 0x44, 0x3a, 0xac, 0xc3, 0x27, 0x33, 0xe9, 0x3f, 0x6a, 0x30, 0x7b,
- 0xaf, 0xdd, 0x6e, 0xae, 0x3a, 0x7c, 0xe3, 0x35, 0x0d, 0xba, 0xcb, 0x0a, 0x72, 0xdf, 0xa0, 0xbb,
- 0xc9, 0x82, 0xcc, 0x64, 0x98, 0x4b, 0xd0, 0x33, 0x50, 0x65, 0xff, 0x32, 0xc7, 0x79, 0xe4, 0x4f,
- 0xf2, 0x7c, 0x55, 0x6d, 0xca, 0xb6, 0x07, 0x91, 0xbf, 0xb1, 0xd2, 0x44, 0xdf, 0x82, 0x09, 0x96,
- 0x26, 0x88, 0xd3, 0x29, 0xc8, 0xa3, 0xa5, 0x53, 0x0d, 0x61, 0x14, 0x52, 0x23, 0xd9, 0x80, 0x03,
- 0x38, 0x7d, 0x0f, 0xe6, 0x22, 0x83, 0xc0, 0x03, 0x9b, 0xbc, 0xc9, 0x4a, 0x1f, 0x6a, 0x41, 0x85,
- 0xf5, 0xce, 0x0a, 0x5c, 0xb9, 0xc0, 0x4d, 0x65, 0x62, 0x22, 0x42, 0x1a, 0xc3, 0x7e, 0xf9, 0x58,
- 0x60, 0xe9, 0x9b, 0x30, 0xb1, 0xda, 0x6c, 0xd8, 0xae, 0xa0, 0x2e, 0xa6, 0xd5, 0xf1, 0x92, 0x33,
- 0xb5, 0xb2, 0x7a, 0x07, 0x63, 0x2e, 0x41, 0x3a, 0x8c, 0x93, 0xfb, 0x26, 0xe9, 0x53, 0xce, 0x56,
- 0x26, 0x1b, 0xc0, 0x72, 0xf2, 0x5d, 0xde, 0x82, 0xa5, 0x44, 0xff, 0x49, 0x09, 0x26, 0x64, 0xb7,
- 0x67, 0x70, 0x94, 0x59, 0x8b, 0x1d, 0x65, 0x9e, 0x2a, 0xb6, 0x04, 0xb9, 0xe7, 0x98, 0x76, 0xe2,
- 0x1c, 0x73, 0xad, 0x20, 0xde, 0xf1, 0x87, 0x98, 0xf7, 0x4a, 0x30, 0x13, 0x5f, 0x7c, 0xf4, 0x2c,
- 0x4c, 0xb1, 0xac, 0x6d, 0x99, 0x64, 0x23, 0x24, 0x8b, 0xea, 0x26, 0xa3, 0x15, 0x8a, 0x70, 0x54,
- 0x0f, 0x75, 0x95, 0x59, 0xd3, 0xf5, 0xa8, 0x1c, 0x74, 0xfe, 0x94, 0x0e, 0xa8, 0x65, 0xd7, 0xc4,
- 0xbd, 0x7d, 0x6d, 0xd5, 0xa1, 0x9b, 0x5e, 0x8b, 0x7a, 0x96, 0xd3, 0x4d, 0x75, 0xc4, 0xc0, 0x70,
- 0x14, 0x19, 0xbd, 0xc5, 0x2a, 0x88, 0xef, 0x0e, 0x3c, 0x93, 0x64, 0x31, 0xc1, 0x80, 0xc5, 0xb0,
- 0x8d, 0xd0, 0x59, 0x73, 0x4d, 0xc3, 0x16, 0x8b, 0x83, 0xc9, 0x0e, 0xf1, 0x88, 0x63, 0x92, 0x80,
- 0x7d, 0x09, 0x08, 0xac, 0xc0, 0xf4, 0xdf, 0x6a, 0x30, 0x25, 0xe7, 0xe2, 0x0c, 0x38, 0xff, 0xeb,
- 0x71, 0xce, 0xff, 0x44, 0xc1, 0x1d, 0x9a, 0x4d, 0xf8, 0x7f, 0xa7, 0xc1, 0x62, 0xe0, 0xba, 0x6b,
- 0x74, 0x1a, 0x86, 0x6d, 0x38, 0x26, 0xf1, 0x82, 0x58, 0x5f, 0x84, 0x92, 0xd5, 0x97, 0x2b, 0x09,
- 0x12, 0xa0, 0xb4, 0xda, 0xc4, 0x25, 0xab, 0xcf, 0x0a, 0xf2, 0xae, 0xeb, 0x53, 0x7e, 0x30, 0x10,
- 0x67, 0x4e, 0xe5, 0xf5, 0x3d, 0xd9, 0x8e, 0x95, 0x06, 0xda, 0x82, 0x4a, 0xdf, 0xf5, 0x28, 0x2b,
- 0x82, 0xe5, 0xc4, 0xfa, 0x1e, 0xe3, 0x35, 0x5b, 0x37, 0x19, 0x88, 0xe1, 0x4e, 0x67, 0x30, 0x58,
- 0xa0, 0xe9, 0x3f, 0xd4, 0xe0, 0xd1, 0x0c, 0xff, 0x25, 0xff, 0xe8, 0xc0, 0x84, 0x25, 0x84, 0x32,
- 0xbd, 0x3c, 0x5f, 0xac, 0xdb, 0x8c, 0xa9, 0x08, 0x53, 0x5b, 0x90, 0xc2, 0x02, 0x68, 0xfd, 0x57,
- 0x1a, 0x5c, 0x4c, 0xf9, 0xcb, 0x53, 0x34, 0x8b, 0x67, 0x49, 0xdc, 0x55, 0x8a, 0x66, 0x61, 0xc9,
- 0x25, 0xe8, 0x75, 0xa8, 0xf2, 0xe7, 0x26, 0xd3, 0xb5, 0xe5, 0x04, 0xd6, 0x83, 0x09, 0x6c, 0xca,
- 0xf6, 0x07, 0x87, 0xcb, 0x57, 0x32, 0x8e, 0xed, 0x81, 0x18, 0x2b, 0x00, 0xb4, 0x0c, 0x15, 0xe2,
- 0x79, 0xae, 0x27, 0x93, 0xfd, 0x24, 0x9b, 0xa9, 0xbb, 0xac, 0x01, 0x8b, 0x76, 0xfd, 0xd7, 0x61,
- 0x90, 0xb2, 0xec, 0xcb, 0xfc, 0x63, 0x8b, 0x93, 0x4c, 0x8c, 0x6c, 0xe9, 0x30, 0x97, 0xa0, 0x01,
- 0x5c, 0xb0, 0x12, 0xe9, 0x5a, 0xee, 0xce, 0x7a, 0xb1, 0x69, 0x54, 0x66, 0x8d, 0x05, 0x09, 0x7f,
- 0x21, 0x29, 0xc1, 0xa9, 0x2e, 0x74, 0x02, 0x29, 0x2d, 0xf4, 0x06, 0x8c, 0xed, 0x52, 0xda, 0xcf,
- 0x78, 0x37, 0x38, 0xa1, 0x48, 0x84, 0x2e, 0x54, 0xf9, 0xe8, 0xda, 0xed, 0x26, 0xe6, 0x50, 0xfa,
- 0xef, 0x4b, 0x6a, 0x3e, 0xf8, 0x61, 0xeb, 0x9b, 0x6a, 0xb4, 0x2b, 0xb6, 0xe1, 0xfb, 0x3c, 0x85,
- 0x89, 0x8b, 0x81, 0xb9, 0x88, 0xe3, 0x4a, 0x86, 0x53, 0xda, 0xa8, 0x1d, 0x16, 0x4f, 0x6d, 0x94,
- 0xe2, 0x39, 0x95, 0x55, 0x38, 0xd1, 0x3d, 0x28, 0x53, 0xbb, 0xe8, 0x01, 0x5f, 0x22, 0xb6, 0xd7,
- 0x5a, 0x8d, 0x29, 0x39, 0xe5, 0xe5, 0xf6, 0x5a, 0x0b, 0x33, 0x08, 0xb4, 0x09, 0x15, 0x6f, 0x60,
- 0x13, 0x56, 0x07, 0xca, 0xc5, 0xeb, 0x0a, 0x9b, 0xc1, 0x70, 0xf3, 0xb1, 0x5f, 0x3e, 0x16, 0x38,
- 0xfa, 0x8f, 0x34, 0x98, 0x8e, 0x55, 0x0b, 0xe4, 0xc1, 0x79, 0x3b, 0xb2, 0x77, 0xe4, 0x3c, 0x3c,
- 0x37, 0xfc, 0xae, 0x93, 0x9b, 0x7e, 0x4e, 0xf6, 0x7b, 0x3e, 0x2a, 0xc3, 0xb1, 0x3e, 0x74, 0x03,
- 0x20, 0x1c, 0x36, 0xdb, 0x07, 0x2c, 0x78, 0xc5, 0x86, 0x97, 0xfb, 0x80, 0xc5, 0xb4, 0x8f, 0x45,
- 0x3b, 0xba, 0x09, 0xe0, 0x13, 0xd3, 0x23, 0x74, 0x23, 0x4c, 0x5c, 0xaa, 0x1c, 0xb7, 0x94, 0x04,
- 0x47, 0xb4, 0xf4, 0x3f, 0x69, 0x30, 0xbd, 0x41, 0xe8, 0xf7, 0x5d, 0x6f, 0xaf, 0xe9, 0xda, 0x96,
- 0x79, 0x70, 0x06, 0x24, 0x00, 0xc7, 0x48, 0xc0, 0x49, 0xf9, 0x32, 0xe6, 0x5d, 0x1e, 0x15, 0xd0,
- 0x3f, 0xd2, 0x60, 0x3e, 0xa6, 0x79, 0x37, 0xcc, 0x07, 0x2a, 0x41, 0x6b, 0x85, 0x12, 0x74, 0x0c,
- 0x86, 0x25, 0xb5, 0xec, 0x04, 0x8d, 0xd6, 0xa0, 0x44, 0x5d, 0x19, 0xbd, 0xc3, 0x61, 0x12, 0xe2,
- 0x85, 0x35, 0xa7, 0xed, 0xe2, 0x12, 0x75, 0xd9, 0x42, 0x2c, 0xc4, 0xb4, 0xa2, 0x19, 0xed, 0x21,
- 0x8d, 0x00, 0xc3, 0xd8, 0x8e, 0xe7, 0xf6, 0x46, 0x1e, 0x83, 0x5a, 0x88, 0x57, 0x3c, 0xb7, 0x87,
- 0x39, 0x96, 0xfe, 0xb1, 0x06, 0x17, 0x63, 0x9a, 0x67, 0xc0, 0x1b, 0xde, 0x88, 0xf3, 0x86, 0x6b,
- 0xc3, 0x0c, 0x24, 0x87, 0x3d, 0x7c, 0x5c, 0x4a, 0x0c, 0x83, 0x0d, 0x18, 0xed, 0xc0, 0x54, 0xdf,
- 0xed, 0xb4, 0x4e, 0xe1, 0xad, 0x77, 0x96, 0xf1, 0xb9, 0x66, 0x88, 0x85, 0xa3, 0xc0, 0xe8, 0x3e,
- 0x5c, 0x64, 0xd4, 0xc2, 0xef, 0x1b, 0x26, 0x69, 0x9d, 0xc2, 0xed, 0xd7, 0x23, 0xfc, 0x31, 0x29,
- 0x89, 0x88, 0xd3, 0x9d, 0xa0, 0x75, 0x98, 0xb0, 0xfa, 0xfc, 0x7c, 0x21, 0x89, 0xe4, 0x89, 0x24,
- 0x4c, 0x9c, 0x46, 0x44, 0x8a, 0x97, 0x3f, 0x70, 0x80, 0xa1, 0xff, 0x25, 0x19, 0x0d, 0x9c, 0xae,
- 0xbe, 0x1a, 0xa1, 0x07, 0xf2, 0xd9, 0x67, 0x34, 0x6a, 0xb0, 0x21, 0x99, 0xc8, 0xa8, 0xcc, 0xba,
- 0x9a, 0xe0, 0x2d, 0x5f, 0x81, 0x09, 0xe2, 0x74, 0x38, 0x59, 0x17, 0x77, 0x2a, 0x7c, 0x54, 0x77,
- 0x45, 0x13, 0x0e, 0x64, 0xfa, 0x8f, 0xcb, 0x89, 0x51, 0xf1, 0x32, 0xfb, 0xee, 0xa9, 0x05, 0x87,
- 0x22, 0xfc, 0xb9, 0x01, 0xb2, 0x1d, 0xd2, 0x3f, 0x11, 0xf3, 0xdf, 0x18, 0x26, 0xe6, 0xa3, 0xf5,
- 0x2f, 0x97, 0xfc, 0xa1, 0xef, 0xc0, 0x38, 0x11, 0x5d, 0x88, 0xaa, 0x7a, 0x6b, 0x98, 0x2e, 0xc2,
- 0xf4, 0x1b, 0x9e, 0xb3, 0x64, 0x9b, 0x44, 0x45, 0x2f, 0xb3, 0xf9, 0x62, 0xba, 0xec, 0x58, 0x22,
- 0xd8, 0xf3, 0x64, 0xe3, 0x31, 0x31, 0x6c, 0xd5, 0xfc, 0xe0, 0x70, 0x19, 0xc2, 0x9f, 0x38, 0x6a,
- 0xc1, 0x1f, 0xe2, 0xe4, 0x9d, 0xcd, 0xd9, 0x7c, 0xcc, 0x34, 0xdc, 0x43, 0x5c, 0xe8, 0xda, 0xa9,
- 0x3d, 0xc4, 0x45, 0x20, 0x8f, 0x3f, 0xc3, 0xfe, 0xa3, 0x04, 0x97, 0x42, 0xe5, 0xc2, 0x0f, 0x71,
- 0x19, 0x26, 0xff, 0xfb, 0xa0, 0xa9, 0xd8, 0xe3, 0x58, 0x38, 0x75, 0xff, 0x79, 0x8f, 0x63, 0xa1,
- 0x6f, 0x39, 0xd5, 0xee, 0x37, 0xa5, 0xe8, 0x00, 0x86, 0x7c, 0xa1, 0x39, 0x85, 0x6f, 0x7a, 0xbe,
- 0x74, 0x8f, 0x3c, 0xfa, 0x07, 0x63, 0x70, 0x21, 0xb9, 0x1b, 0x63, 0x17, 0xf9, 0xda, 0x89, 0x17,
- 0xf9, 0x4d, 0x98, 0xdb, 0x19, 0xd8, 0xf6, 0x01, 0x1f, 0x43, 0xe4, 0x36, 0x5f, 0x3c, 0x01, 0xfc,
- 0x9f, 0xb4, 0x9c, 0x7b, 0x25, 0x43, 0x07, 0x67, 0x5a, 0xa6, 0xef, 0xf5, 0xc7, 0xfe, 0xdd, 0x7b,
- 0xfd, 0xca, 0x08, 0xf7, 0xfa, 0x39, 0x17, 0xf1, 0x13, 0x23, 0x5c, 0xc4, 0x67, 0xbf, 0xb2, 0x94,
- 0x47, 0x7a, 0x65, 0x19, 0xe5, 0x52, 0x3f, 0x23, 0x1f, 0x9e, 0xf8, 0xad, 0xcb, 0x4b, 0x30, 0x13,
- 0x7f, 0xb3, 0x12, 0x61, 0x21, 0x9e, 0xcd, 0xe4, 0x0b, 0x51, 0x24, 0x2c, 0x44, 0x3b, 0x56, 0x1a,
- 0xfa, 0x91, 0x06, 0x97, 0xb3, 0xbf, 0x4d, 0x41, 0x36, 0xcc, 0xf4, 0x8c, 0xfb, 0xd1, 0xef, 0x85,
- 0xb4, 0x11, 0x89, 0x0f, 0x7f, 0x61, 0x58, 0x8f, 0x61, 0xe1, 0x04, 0x36, 0x7a, 0x1b, 0xaa, 0x3d,
- 0xe3, 0x7e, 0x6b, 0xe0, 0x75, 0xc9, 0xc8, 0x04, 0x8b, 0xef, 0xc8, 0x75, 0x89, 0x82, 0x15, 0x9e,
- 0xfe, 0x85, 0x06, 0xf3, 0x39, 0xef, 0x06, 0xff, 0x45, 0xa3, 0x7c, 0xaf, 0x04, 0x95, 0x96, 0x69,
- 0xd8, 0xe4, 0x0c, 0xb8, 0xc9, 0x6b, 0x31, 0x6e, 0x72, 0xd2, 0x37, 0xae, 0xdc, 0xab, 0x5c, 0x5a,
- 0x82, 0x13, 0xb4, 0xe4, 0xa9, 0x42, 0x68, 0xc7, 0x33, 0x92, 0xe7, 0x61, 0x52, 0x75, 0x3a, 0x5c,
- 0xa2, 0xd4, 0x7f, 0x59, 0x82, 0xa9, 0x48, 0x17, 0x43, 0xa6, 0xd9, 0x9d, 0x58, 0x6d, 0x29, 0x17,
- 0xb8, 0xb4, 0x89, 0xf4, 0x55, 0x0b, 0xaa, 0x89, 0xf8, 0x46, 0x23, 0x7c, 0x95, 0x4f, 0x17, 0x99,
- 0x97, 0x60, 0x86, 0x1a, 0x5e, 0x97, 0x50, 0x75, 0x02, 0x10, 0xf7, 0x95, 0xea, 0x63, 0xa1, 0x76,
- 0x4c, 0x8a, 0x13, 0xda, 0x8b, 0x2f, 0xc2, 0x74, 0xac, 0xb3, 0x61, 0x3e, 0xb1, 0x68, 0xac, 0x7c,
- 0xf2, 0xf9, 0xd2, 0xb9, 0x4f, 0x3f, 0x5f, 0x3a, 0xf7, 0xd9, 0xe7, 0x4b, 0xe7, 0x7e, 0x70, 0xb4,
- 0xa4, 0x7d, 0x72, 0xb4, 0xa4, 0x7d, 0x7a, 0xb4, 0xa4, 0x7d, 0x76, 0xb4, 0xa4, 0xfd, 0xed, 0x68,
- 0x49, 0xfb, 0xe9, 0x17, 0x4b, 0xe7, 0xde, 0x7e, 0xec, 0xd8, 0xff, 0x71, 0xf1, 0xaf, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x6a, 0x79, 0xb9, 0xab, 0x91, 0x31, 0x00, 0x00,
-}
+func (m *ScaleStatus) Reset() { *m = ScaleStatus{} }
func (m *DaemonSet) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -2070,7 +649,7 @@ func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.UpdatedAnnotations {
keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations)
+ sort.Strings(keysForUpdatedAnnotations)
for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])]
baseI := i
@@ -3759,7 +2338,7 @@ func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Selector {
keysForSelector = append(keysForSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.Selector[string(keysForSelector[iNdEx])]
baseI := i
@@ -4761,7 +3340,7 @@ func (this *DeploymentRollback) String() string {
for k := range this.UpdatedAnnotations {
keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations)
+ sort.Strings(keysForUpdatedAnnotations)
mapStringForUpdatedAnnotations := "map[string]string{"
for _, k := range keysForUpdatedAnnotations {
mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k])
@@ -5268,7 +3847,7 @@ func (this *ScaleStatus) String() string {
for k := range this.Selector {
keysForSelector = append(keysForSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+ sort.Strings(keysForSelector)
mapStringForSelector := "map[string]string{"
for _, k := range keysForSelector {
mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/operator/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index fed0b483..c664c71b 100644
--- a/operator/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -345,7 +345,7 @@ message DeploymentStatus {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 9;
@@ -924,7 +924,7 @@ message ReplicaSetStatus {
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
// and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
optional int32 terminatingReplicas = 7;
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/extensions/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..ef18ae29
--- /dev/null
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,112 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*DaemonSet) ProtoMessage() {}
+
+func (*DaemonSetCondition) ProtoMessage() {}
+
+func (*DaemonSetList) ProtoMessage() {}
+
+func (*DaemonSetSpec) ProtoMessage() {}
+
+func (*DaemonSetStatus) ProtoMessage() {}
+
+func (*DaemonSetUpdateStrategy) ProtoMessage() {}
+
+func (*Deployment) ProtoMessage() {}
+
+func (*DeploymentCondition) ProtoMessage() {}
+
+func (*DeploymentList) ProtoMessage() {}
+
+func (*DeploymentRollback) ProtoMessage() {}
+
+func (*DeploymentSpec) ProtoMessage() {}
+
+func (*DeploymentStatus) ProtoMessage() {}
+
+func (*DeploymentStrategy) ProtoMessage() {}
+
+func (*HTTPIngressPath) ProtoMessage() {}
+
+func (*HTTPIngressRuleValue) ProtoMessage() {}
+
+func (*IPBlock) ProtoMessage() {}
+
+func (*Ingress) ProtoMessage() {}
+
+func (*IngressBackend) ProtoMessage() {}
+
+func (*IngressList) ProtoMessage() {}
+
+func (*IngressLoadBalancerIngress) ProtoMessage() {}
+
+func (*IngressLoadBalancerStatus) ProtoMessage() {}
+
+func (*IngressPortStatus) ProtoMessage() {}
+
+func (*IngressRule) ProtoMessage() {}
+
+func (*IngressRuleValue) ProtoMessage() {}
+
+func (*IngressSpec) ProtoMessage() {}
+
+func (*IngressStatus) ProtoMessage() {}
+
+func (*IngressTLS) ProtoMessage() {}
+
+func (*NetworkPolicy) ProtoMessage() {}
+
+func (*NetworkPolicyEgressRule) ProtoMessage() {}
+
+func (*NetworkPolicyIngressRule) ProtoMessage() {}
+
+func (*NetworkPolicyList) ProtoMessage() {}
+
+func (*NetworkPolicyPeer) ProtoMessage() {}
+
+func (*NetworkPolicyPort) ProtoMessage() {}
+
+func (*NetworkPolicySpec) ProtoMessage() {}
+
+func (*ReplicaSet) ProtoMessage() {}
+
+func (*ReplicaSetCondition) ProtoMessage() {}
+
+func (*ReplicaSetList) ProtoMessage() {}
+
+func (*ReplicaSetSpec) ProtoMessage() {}
+
+func (*ReplicaSetStatus) ProtoMessage() {}
+
+func (*RollbackConfig) ProtoMessage() {}
+
+func (*RollingUpdateDaemonSet) ProtoMessage() {}
+
+func (*RollingUpdateDeployment) ProtoMessage() {}
+
+func (*Scale) ProtoMessage() {}
+
+func (*ScaleSpec) ProtoMessage() {}
+
+func (*ScaleStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/types.go b/operator/vendor/k8s.io/api/extensions/v1beta1/types.go
index c7b50e05..c0d8b4f9 100644
--- a/operator/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -274,7 +274,7 @@ type DeploymentStatus struct {
// Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
// .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,9,opt,name=terminatingReplicas"`
@@ -1006,7 +1006,7 @@ type ReplicaSetStatus struct {
// The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
// and have not yet reached the Failed or Succeeded .status.phase.
//
- // This is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
// +optional
TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty" protobuf:"varint,7,opt,name=terminatingReplicas"`
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 8a158233..ce64d7a2 100644
--- a/operator/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -174,7 +174,7 @@ var map_DeploymentStatus = map[string]string{
"readyReplicas": "Total number of non-terminating pods targeted by this Deployment with a Ready Condition.",
"availableReplicas": "Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.",
"unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.",
- "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "Total number of terminating pods targeted by this deployment. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"conditions": "Represents the latest available observations of a deployment's current state.",
"collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.",
}
@@ -461,7 +461,7 @@ var map_ReplicaSetStatus = map[string]string{
"fullyLabeledReplicas": "The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.",
"readyReplicas": "The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.",
"availableReplicas": "The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.",
- "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is an alpha field. Enable DeploymentReplicaSetTerminatingReplicas to be able to use this field.",
+ "terminatingReplicas": "The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.\n\nThis is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).",
"observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.",
"conditions": "Represents the latest available observations of a replica set's current state.",
}
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..bba6d5d9
--- /dev/null
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,247 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSet) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DaemonSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DaemonSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetList) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DaemonSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DaemonSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DaemonSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DaemonSetUpdateStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DaemonSetUpdateStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Deployment) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.Deployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentCondition) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DeploymentCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentList) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DeploymentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentRollback) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DeploymentRollback"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DeploymentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DeploymentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeploymentStrategy) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.DeploymentStrategy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPIngressPath) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.HTTPIngressPath"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPIngressRuleValue) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.HTTPIngressRuleValue"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPBlock) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IPBlock"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Ingress) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.Ingress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressBackend) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressBackend"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressList) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressLoadBalancerIngress) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressLoadBalancerIngress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressLoadBalancerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressLoadBalancerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressPortStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressPortStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressRule) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressRuleValue) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressRuleValue"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressSpec) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressTLS) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.IngressTLS"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyEgressRule) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicyEgressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyIngressRule) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicyIngressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyPeer) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicyPeer"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyPort) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicyPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.NetworkPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSet) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ReplicaSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetCondition) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ReplicaSetCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetList) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ReplicaSetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ReplicaSetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ReplicaSetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ReplicaSetStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollbackConfig) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.RollbackConfig"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDaemonSet) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.RollingUpdateDaemonSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RollingUpdateDeployment) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.RollingUpdateDeployment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scale) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.Scale"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ScaleSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ScaleStatus) OpenAPIModelName() string {
+ return "io.k8s.api.extensions.v1beta1.ScaleStatus"
+}
diff --git a/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go b/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
index 6d2a1666..f4f0e317 100644
--- a/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
+++ b/operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.validations.go
@@ -37,6 +37,7 @@ func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
+ // type Scale
scheme.AddValidationFunc((*Scale)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/scale":
@@ -47,32 +48,43 @@ func RegisterValidations(scheme *runtime.Scheme) error {
return nil
}
+// Validate_Scale validates an instance of Scale according
+// to declarative validation rules in the API schema.
func Validate_Scale(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *Scale) (errs field.ErrorList) {
// field Scale.TypeMeta has no validation
// field Scale.ObjectMeta has no validation
// field Scale.Spec
errs = append(errs,
- func(fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
+ func(fldPath *field.Path, obj, oldObj *ScaleSpec, oldValueCorrelated bool) (errs field.ErrorList) {
+ // don't revalidate unchanged data
+ if oldValueCorrelated && op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
+ return nil
+ }
+ // call the type's validation function
errs = append(errs, Validate_ScaleSpec(ctx, op, fldPath, obj, oldObj)...)
return
- }(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }))...)
+ }(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *Scale) *ScaleSpec { return &oldObj.Spec }), oldObj != nil)...)
// field Scale.Status has no validation
return errs
}
+// Validate_ScaleSpec validates an instance of ScaleSpec according
+// to declarative validation rules in the API schema.
func Validate_ScaleSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *ScaleSpec) (errs field.ErrorList) {
// field ScaleSpec.Replicas
errs = append(errs,
- func(fldPath *field.Path, obj, oldObj *int32) (errs field.ErrorList) {
+ func(fldPath *field.Path, obj, oldObj *int32, oldValueCorrelated bool) (errs field.ErrorList) {
// optional value-type fields with zero-value defaults are purely documentation
- if op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
- return nil // no changes
+ // don't revalidate unchanged data
+ if oldValueCorrelated && op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
+ return nil
}
+ // call field-attached validations
errs = append(errs, validate.Minimum(ctx, op, fldPath, obj, oldObj, 0)...)
return
- }(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }))...)
+ }(fldPath.Child("replicas"), &obj.Replicas, safe.Field(oldObj, func(oldObj *ScaleSpec) *int32 { return &oldObj.Replicas }), oldObj != nil)...)
return errs
}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1/doc.go b/operator/vendor/k8s.io/api/flowcontrol/v1/doc.go
index ad5f4579..b1367b92 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1/doc.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.flowcontrol.v1
// +groupName=flowcontrol.apiserver.k8s.io
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go
index b342445f..d1f31868 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1/generated.pb.go
@@ -24,801 +24,56 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
-func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{0}
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
-}
+func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (*FlowDistinguisherMethod) ProtoMessage() {}
-func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{1}
-}
-func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src)
-}
-func (m *FlowDistinguisherMethod) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m)
-}
+func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
+func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-func (*FlowSchema) ProtoMessage() {}
-func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{2}
-}
-func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchema.Merge(m, src)
-}
-func (m *FlowSchema) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchema.DiscardUnknown(m)
-}
+func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
+func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (*FlowSchemaCondition) ProtoMessage() {}
-func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{3}
-}
-func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaCondition.Merge(m, src)
-}
-func (m *FlowSchemaCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m)
-}
+func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
+func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-func (*FlowSchemaList) ProtoMessage() {}
-func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{4}
-}
-func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaList.Merge(m, src)
-}
-func (m *FlowSchemaList) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaList) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaList.DiscardUnknown(m)
-}
+func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
+func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (*FlowSchemaSpec) ProtoMessage() {}
-func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{5}
-}
-func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaSpec.Merge(m, src)
-}
-func (m *FlowSchemaSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-func (*FlowSchemaStatus) ProtoMessage() {}
-func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{6}
-}
-func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaStatus.Merge(m, src)
-}
-func (m *FlowSchemaStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (*GroupSubject) ProtoMessage() {}
-func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{7}
-}
-func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupSubject.Merge(m, src)
-}
-func (m *GroupSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupSubject.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-func (*LimitResponse) ProtoMessage() {}
-func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{8}
-}
-func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitResponse.Merge(m, src)
-}
-func (m *LimitResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitResponse.DiscardUnknown(m)
-}
+func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
+func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
-func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{9}
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-func (*NonResourcePolicyRule) ProtoMessage() {}
-func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{10}
-}
-func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourcePolicyRule.Merge(m, src)
-}
-func (m *NonResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
-
-func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (*PolicyRulesWithSubjects) ProtoMessage() {}
-func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{11}
-}
-func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src)
-}
-func (m *PolicyRulesWithSubjects) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m)
-}
+func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
+func (m *Subject) Reset() { *m = Subject{} }
-func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-func (*PriorityLevelConfiguration) ProtoMessage() {}
-func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{12}
-}
-func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src)
-}
-func (m *PriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
-func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{13}
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-func (*PriorityLevelConfigurationList) ProtoMessage() {}
-func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{14}
-}
-func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (*PriorityLevelConfigurationReference) ProtoMessage() {}
-func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{15}
-}
-func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
-func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{16}
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
-func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{17}
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
-
-func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-func (*QueuingConfiguration) ProtoMessage() {}
-func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{18}
-}
-func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *QueuingConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueuingConfiguration.Merge(m, src)
-}
-func (m *QueuingConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *QueuingConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
-
-func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (*ResourcePolicyRule) ProtoMessage() {}
-func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{19}
-}
-func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePolicyRule.Merge(m, src)
-}
-func (m *ResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
-
-func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-func (*ServiceAccountSubject) ProtoMessage() {}
-func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{20}
-}
-func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccountSubject.Merge(m, src)
-}
-func (m *ServiceAccountSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccountSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{21}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func (m *UserSubject) Reset() { *m = UserSubject{} }
-func (*UserSubject) ProtoMessage() {}
-func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_5d08a1401821035d, []int{22}
-}
-func (m *UserSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UserSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserSubject.Merge(m, src)
-}
-func (m *UserSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *UserSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_UserSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserSubject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.ExemptPriorityLevelConfiguration")
- proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1.FlowDistinguisherMethod")
- proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1.FlowSchema")
- proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaCondition")
- proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaList")
- proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaSpec")
- proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1.FlowSchemaStatus")
- proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1.GroupSubject")
- proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1.LimitResponse")
- proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.LimitedPriorityLevelConfiguration")
- proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.NonResourcePolicyRule")
- proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1.PolicyRulesWithSubjects")
- proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfiguration")
- proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationCondition")
- proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationList")
- proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationReference")
- proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationSpec")
- proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1.PriorityLevelConfigurationStatus")
- proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1.QueuingConfiguration")
- proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1.ResourcePolicyRule")
- proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1.ServiceAccountSubject")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1.Subject")
- proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1.UserSubject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/flowcontrol/v1/generated.proto", fileDescriptor_5d08a1401821035d)
-}
-
-var fileDescriptor_5d08a1401821035d = []byte{
- // 1575 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0xdb, 0x56,
- 0x16, 0x36, 0x65, 0xc9, 0xb6, 0x8e, 0x9f, 0xb9, 0x8e, 0x61, 0xc5, 0x19, 0x48, 0x0e, 0x07, 0x93,
- 0xc7, 0x64, 0x42, 0x25, 0xc6, 0x64, 0x26, 0x41, 0x66, 0x26, 0x08, 0x93, 0x4c, 0x5e, 0xb6, 0xe3,
- 0x5c, 0xe5, 0x51, 0xa4, 0x05, 0x5a, 0x9a, 0xba, 0x96, 0x18, 0x8b, 0x8f, 0xf2, 0x92, 0x72, 0x5d,
- 0xa0, 0x40, 0x7f, 0x42, 0x56, 0x5d, 0x76, 0xd1, 0xfe, 0x83, 0xae, 0x8a, 0x76, 0xd3, 0x65, 0x76,
- 0xcd, 0x32, 0xed, 0x42, 0x68, 0xd4, 0xbf, 0xd0, 0x45, 0x9b, 0x55, 0x71, 0x2f, 0x2f, 0x49, 0x51,
- 0x12, 0x69, 0xc1, 0x8b, 0x74, 0xd3, 0x9d, 0x79, 0xce, 0x77, 0xbe, 0x73, 0xef, 0xb9, 0xe7, 0x25,
- 0xc3, 0x99, 0xdd, 0x4b, 0x54, 0x31, 0xec, 0xaa, 0xe6, 0x18, 0xd5, 0x9d, 0x96, 0xbd, 0xa7, 0xdb,
- 0x96, 0xe7, 0xda, 0xad, 0x6a, 0xfb, 0x42, 0xb5, 0x41, 0x2c, 0xe2, 0x6a, 0x1e, 0xa9, 0x2b, 0x8e,
- 0x6b, 0x7b, 0x36, 0x3a, 0x16, 0x40, 0x15, 0xcd, 0x31, 0x94, 0x1e, 0xa8, 0xd2, 0xbe, 0xb0, 0x72,
- 0xae, 0x61, 0x78, 0x4d, 0x7f, 0x5b, 0xd1, 0x6d, 0xb3, 0xda, 0xb0, 0x1b, 0x76, 0x95, 0x5b, 0x6c,
- 0xfb, 0x3b, 0xfc, 0x8b, 0x7f, 0xf0, 0xbf, 0x02, 0xa6, 0x95, 0x7f, 0xc6, 0x4e, 0x4d, 0x4d, 0x6f,
- 0x1a, 0x16, 0x71, 0xf7, 0xab, 0xce, 0x6e, 0x83, 0x09, 0x68, 0xd5, 0x24, 0x9e, 0x36, 0xc4, 0xff,
- 0x4a, 0x35, 0xcd, 0xca, 0xf5, 0x2d, 0xcf, 0x30, 0xc9, 0x80, 0xc1, 0xbf, 0x0e, 0x32, 0xa0, 0x7a,
- 0x93, 0x98, 0x5a, 0xbf, 0x9d, 0xfc, 0xad, 0x04, 0xab, 0x37, 0x3f, 0x22, 0xa6, 0xe3, 0x6d, 0xb9,
- 0x86, 0xed, 0x1a, 0xde, 0xfe, 0x3a, 0x69, 0x93, 0xd6, 0x75, 0xdb, 0xda, 0x31, 0x1a, 0xbe, 0xab,
- 0x79, 0x86, 0x6d, 0xa1, 0x77, 0xa0, 0x64, 0xd9, 0xa6, 0x61, 0x69, 0x4c, 0xae, 0xfb, 0xae, 0x4b,
- 0x2c, 0x7d, 0xbf, 0xd6, 0xd4, 0x5c, 0x42, 0x4b, 0xd2, 0xaa, 0x74, 0xba, 0xa0, 0xfe, 0xa5, 0xdb,
- 0xa9, 0x94, 0x36, 0x53, 0x30, 0x38, 0xd5, 0x1a, 0xfd, 0x17, 0xe6, 0x5b, 0xc4, 0xaa, 0x6b, 0xdb,
- 0x2d, 0xb2, 0x45, 0x5c, 0x9d, 0x58, 0x5e, 0x29, 0xc7, 0x09, 0x17, 0xbb, 0x9d, 0xca, 0xfc, 0x7a,
- 0x52, 0x85, 0xfb, 0xb1, 0xf2, 0x53, 0x58, 0xfe, 0x7f, 0xcb, 0xde, 0xbb, 0x61, 0x50, 0xcf, 0xb0,
- 0x1a, 0xbe, 0x41, 0x9b, 0xc4, 0xdd, 0x20, 0x5e, 0xd3, 0xae, 0xa3, 0xab, 0x90, 0xf7, 0xf6, 0x1d,
- 0xc2, 0xcf, 0x57, 0x54, 0xcf, 0xbe, 0xe8, 0x54, 0xc6, 0xba, 0x9d, 0x4a, 0xfe, 0xe1, 0xbe, 0x43,
- 0xde, 0x74, 0x2a, 0xc7, 0x53, 0xcc, 0x98, 0x1a, 0x73, 0x43, 0xf9, 0x79, 0x0e, 0x80, 0xa1, 0x6a,
- 0x3c, 0x70, 0xe8, 0x03, 0x98, 0x62, 0x8f, 0x55, 0xd7, 0x3c, 0x8d, 0x73, 0x4e, 0xaf, 0x9d, 0x57,
- 0xe2, 0x24, 0x89, 0x62, 0xae, 0x38, 0xbb, 0x0d, 0x26, 0xa0, 0x0a, 0x43, 0x2b, 0xed, 0x0b, 0xca,
- 0xfd, 0xed, 0x67, 0x44, 0xf7, 0x36, 0x88, 0xa7, 0xa9, 0x48, 0x9c, 0x02, 0x62, 0x19, 0x8e, 0x58,
- 0xd1, 0x3d, 0xc8, 0x53, 0x87, 0xe8, 0x3c, 0x00, 0xd3, 0x6b, 0x67, 0x94, 0xd4, 0x14, 0x54, 0xe2,
- 0x63, 0xd5, 0x1c, 0xa2, 0xab, 0x33, 0xe1, 0xe5, 0xd8, 0x17, 0xe6, 0x24, 0xa8, 0x06, 0x13, 0xd4,
- 0xd3, 0x3c, 0x9f, 0x96, 0xc6, 0x39, 0xdd, 0xd9, 0xd1, 0xe8, 0xb8, 0x89, 0x3a, 0x27, 0x08, 0x27,
- 0x82, 0x6f, 0x2c, 0xa8, 0xe4, 0x57, 0x39, 0x58, 0x8c, 0xc1, 0xd7, 0x6d, 0xab, 0x6e, 0xf0, 0xfc,
- 0xb8, 0x92, 0x88, 0xf5, 0xa9, 0xbe, 0x58, 0x2f, 0x0f, 0x31, 0x89, 0xe3, 0x8c, 0x2e, 0x47, 0x27,
- 0xcd, 0x71, 0xf3, 0x13, 0x49, 0xe7, 0x6f, 0x3a, 0x95, 0xf9, 0xc8, 0x2c, 0x79, 0x1e, 0xd4, 0x06,
- 0xd4, 0xd2, 0xa8, 0xf7, 0xd0, 0xd5, 0x2c, 0x1a, 0xd0, 0x1a, 0x26, 0x11, 0x17, 0xfe, 0xfb, 0x68,
- 0xaf, 0xc3, 0x2c, 0xd4, 0x15, 0xe1, 0x12, 0xad, 0x0f, 0xb0, 0xe1, 0x21, 0x1e, 0xd0, 0x49, 0x98,
- 0x70, 0x89, 0x46, 0x6d, 0xab, 0x94, 0xe7, 0x47, 0x8e, 0xe2, 0x85, 0xb9, 0x14, 0x0b, 0x2d, 0x3a,
- 0x03, 0x93, 0x26, 0xa1, 0x54, 0x6b, 0x90, 0x52, 0x81, 0x03, 0xe7, 0x05, 0x70, 0x72, 0x23, 0x10,
- 0xe3, 0x50, 0x2f, 0x7f, 0x23, 0xc1, 0x5c, 0x1c, 0xa7, 0x75, 0x83, 0x7a, 0xe8, 0xbd, 0x81, 0x8c,
- 0x53, 0x46, 0xbb, 0x13, 0xb3, 0xe6, 0xf9, 0xb6, 0x20, 0xdc, 0x4d, 0x85, 0x92, 0x9e, 0x6c, 0xbb,
- 0x0b, 0x05, 0xc3, 0x23, 0x26, 0x8b, 0xfa, 0xf8, 0xe9, 0xe9, 0xb5, 0xbf, 0x8d, 0x94, 0x1f, 0xea,
- 0xac, 0x60, 0x2c, 0xdc, 0x61, 0xb6, 0x38, 0xa0, 0x90, 0x7f, 0x18, 0xef, 0x3d, 0x3c, 0xcb, 0x42,
- 0xf4, 0x85, 0x04, 0x2b, 0x4e, 0x6a, 0x47, 0x11, 0xf7, 0xf9, 0x5f, 0x86, 0xd3, 0xf4, 0x76, 0x84,
- 0xc9, 0x0e, 0x61, 0x3d, 0x84, 0xa8, 0xb2, 0x38, 0xcd, 0x4a, 0x06, 0x38, 0xe3, 0x14, 0xe8, 0x2e,
- 0x20, 0x53, 0xf3, 0x58, 0x1c, 0x1b, 0x5b, 0x2e, 0xd1, 0x49, 0x9d, 0xb1, 0x8a, 0x06, 0x14, 0xe5,
- 0xc4, 0xc6, 0x00, 0x02, 0x0f, 0xb1, 0x42, 0x9f, 0xc0, 0x62, 0x7d, 0xb0, 0x9f, 0x88, 0x64, 0x5c,
- 0x3b, 0x20, 0xba, 0x43, 0x3a, 0x91, 0xba, 0xdc, 0xed, 0x54, 0x16, 0x87, 0x28, 0xf0, 0x30, 0x3f,
- 0xe8, 0x09, 0x14, 0x5c, 0xbf, 0x45, 0x68, 0x29, 0xcf, 0x9f, 0x33, 0xcb, 0xe1, 0x96, 0xdd, 0x32,
- 0xf4, 0x7d, 0xcc, 0xd0, 0x4f, 0x0c, 0xaf, 0x59, 0xf3, 0x79, 0x33, 0xa2, 0xf1, 0xdb, 0x72, 0x15,
- 0x0e, 0xf8, 0xe4, 0x36, 0x2c, 0xf4, 0xf7, 0x07, 0xb4, 0x0d, 0xa0, 0x87, 0x25, 0xc9, 0x26, 0xc0,
- 0x78, 0x5f, 0x6e, 0xa6, 0x27, 0x50, 0x54, 0xc9, 0x71, 0x2f, 0x8c, 0x44, 0x14, 0xf7, 0xb0, 0xca,
- 0xe7, 0x61, 0xe6, 0x96, 0x6b, 0xfb, 0x8e, 0x38, 0x1e, 0x5a, 0x85, 0xbc, 0xa5, 0x99, 0x61, 0x8f,
- 0x89, 0x5a, 0xde, 0xa6, 0x66, 0x12, 0xcc, 0x35, 0xf2, 0xe7, 0x12, 0xcc, 0xae, 0x1b, 0xa6, 0xe1,
- 0x61, 0x42, 0x1d, 0xdb, 0xa2, 0x04, 0x5d, 0x4c, 0xf4, 0xa5, 0x13, 0x7d, 0x7d, 0xe9, 0x48, 0x02,
- 0xdc, 0xd3, 0x91, 0x1e, 0xc3, 0xe4, 0x87, 0x3e, 0xf1, 0x0d, 0xab, 0x21, 0x7a, 0x71, 0x35, 0xe3,
- 0x6e, 0x0f, 0x02, 0x64, 0x22, 0xb1, 0xd4, 0x69, 0x56, 0xe3, 0x42, 0x83, 0x43, 0x32, 0xf9, 0x97,
- 0x1c, 0x9c, 0xe0, 0x3e, 0x49, 0xfd, 0x0f, 0x19, 0xb6, 0x04, 0x66, 0x5b, 0xbd, 0x57, 0x16, 0xb7,
- 0x3b, 0x9d, 0x71, 0xbb, 0x44, 0x88, 0xd4, 0x25, 0x11, 0xc1, 0x64, 0x98, 0x71, 0x92, 0x75, 0xd8,
- 0x4c, 0x1f, 0x1f, 0x7d, 0xa6, 0xa3, 0xfb, 0xb0, 0xb4, 0x6d, 0xbb, 0xae, 0xbd, 0x67, 0x58, 0x0d,
- 0xee, 0x27, 0x24, 0xc9, 0x73, 0x92, 0x63, 0xdd, 0x4e, 0x65, 0x49, 0x1d, 0x06, 0xc0, 0xc3, 0xed,
- 0xe4, 0x3d, 0x58, 0xda, 0x64, 0x5d, 0x83, 0xda, 0xbe, 0xab, 0x93, 0x38, 0xfb, 0x51, 0x05, 0x0a,
- 0x6d, 0xe2, 0x6e, 0x07, 0x19, 0x5c, 0x54, 0x8b, 0x2c, 0xf7, 0x1f, 0x33, 0x01, 0x0e, 0xe4, 0xec,
- 0x26, 0x56, 0x6c, 0xf9, 0x08, 0xaf, 0xd3, 0xd2, 0x04, 0x87, 0xf2, 0x9b, 0x6c, 0x26, 0x55, 0xb8,
- 0x1f, 0x2b, 0x7f, 0x9f, 0x83, 0xe5, 0x94, 0x62, 0x43, 0x5b, 0x30, 0x45, 0xc5, 0xdf, 0xa2, 0x80,
- 0xe4, 0x8c, 0x67, 0x10, 0x66, 0x71, 0x43, 0x0f, 0x79, 0x70, 0xc4, 0x82, 0x9e, 0xc1, 0xac, 0x2b,
- 0xbc, 0x73, 0x77, 0xa2, 0xb1, 0x9f, 0xcb, 0xa0, 0x1d, 0x8c, 0x49, 0xfc, 0xc4, 0xb8, 0x97, 0x0b,
- 0x27, 0xa9, 0x51, 0x1b, 0x16, 0x7a, 0x2e, 0x1b, 0xb8, 0x1b, 0xe7, 0xee, 0xce, 0x67, 0xb8, 0x1b,
- 0xfa, 0x0a, 0x6a, 0x49, 0x78, 0x5c, 0xd8, 0xec, 0x63, 0xc4, 0x03, 0x3e, 0xe4, 0xef, 0x72, 0x90,
- 0xd1, 0xeb, 0xdf, 0xc2, 0x8e, 0xf6, 0x6e, 0x62, 0x47, 0xbb, 0x7c, 0xa8, 0xf9, 0x95, 0xba, 0xb3,
- 0xe9, 0x7d, 0x3b, 0xdb, 0x95, 0xc3, 0xd1, 0x67, 0xef, 0x70, 0xbf, 0xe6, 0xe0, 0xaf, 0xe9, 0xc6,
- 0xf1, 0x4e, 0x77, 0x2f, 0xd1, 0x3b, 0xff, 0xdd, 0xd7, 0x3b, 0x4f, 0x8d, 0x40, 0xf1, 0xe7, 0x8e,
- 0xd7, 0xb7, 0xe3, 0xfd, 0x28, 0x41, 0x39, 0x3d, 0x6e, 0x6f, 0x61, 0xe7, 0x7b, 0x9a, 0xdc, 0xf9,
- 0x2e, 0x1e, 0x2a, 0xbf, 0x52, 0x76, 0xc0, 0x5b, 0x59, 0x69, 0x15, 0xad, 0x6c, 0x23, 0x8c, 0xf1,
- 0x2f, 0x73, 0x59, 0x51, 0xe2, 0xcb, 0xe5, 0x01, 0xbf, 0x37, 0x12, 0xd6, 0x37, 0x2d, 0x36, 0x5c,
- 0x4c, 0x36, 0x1f, 0x82, 0x5c, 0xd4, 0x61, 0xb2, 0x15, 0x0c, 0x61, 0x51, 0xc5, 0xff, 0x39, 0x68,
- 0xfe, 0x65, 0x8d, 0xeb, 0x60, 0xd4, 0x0b, 0x18, 0x0e, 0x99, 0xd1, 0xfb, 0x30, 0x41, 0xf8, 0xaf,
- 0xea, 0x11, 0x4a, 0xf9, 0xa0, 0x9f, 0xdf, 0x2a, 0xb0, 0xb4, 0x0b, 0x50, 0x58, 0xd0, 0xca, 0x9f,
- 0x49, 0xb0, 0x7a, 0x50, 0x0f, 0x40, 0xee, 0x90, 0x3d, 0xed, 0x70, 0x3b, 0xf7, 0xe8, 0x7b, 0xdb,
- 0x57, 0x12, 0x1c, 0x1d, 0xb6, 0x13, 0xb1, 0x82, 0x62, 0x8b, 0x50, 0xb4, 0xc5, 0x44, 0x05, 0xf5,
- 0x80, 0x4b, 0xb1, 0xd0, 0xa2, 0x7f, 0xc0, 0x54, 0x53, 0xb3, 0xea, 0x35, 0xe3, 0xe3, 0x70, 0x15,
- 0x8f, 0x52, 0xfa, 0xb6, 0x90, 0xe3, 0x08, 0x81, 0x6e, 0xc0, 0x02, 0xb7, 0x5b, 0x27, 0x56, 0xc3,
- 0x6b, 0xf2, 0x77, 0x10, 0xdb, 0x46, 0x34, 0x57, 0x1e, 0xf4, 0xe9, 0xf1, 0x80, 0x85, 0xfc, 0x9b,
- 0x04, 0xe8, 0x30, 0x0b, 0xc2, 0x59, 0x28, 0x6a, 0x8e, 0xc1, 0xf7, 0xd4, 0xa0, 0xa8, 0x8a, 0xea,
- 0x6c, 0xb7, 0x53, 0x29, 0x5e, 0xdb, 0xba, 0x13, 0x08, 0x71, 0xac, 0x67, 0xe0, 0x70, 0x8a, 0x06,
- 0xd3, 0x52, 0x80, 0x43, 0xc7, 0x14, 0xc7, 0x7a, 0x74, 0x09, 0x66, 0xf4, 0x96, 0x4f, 0x3d, 0xe2,
- 0xd6, 0x74, 0xdb, 0x21, 0xbc, 0x09, 0x4d, 0xa9, 0x47, 0xc5, 0x9d, 0x66, 0xae, 0xf7, 0xe8, 0x70,
- 0x02, 0x89, 0x14, 0x00, 0x56, 0x47, 0xd4, 0xd1, 0x98, 0x9f, 0x02, 0xf7, 0x33, 0xc7, 0x1e, 0x6c,
- 0x33, 0x92, 0xe2, 0x1e, 0x84, 0xfc, 0x0c, 0x96, 0x6a, 0xc4, 0x6d, 0x1b, 0x3a, 0xb9, 0xa6, 0xeb,
- 0xb6, 0x6f, 0x79, 0xe1, 0xc6, 0x5d, 0x85, 0x62, 0x04, 0x13, 0xa5, 0x76, 0x44, 0xf8, 0x2f, 0x46,
- 0x5c, 0x38, 0xc6, 0x44, 0xb5, 0x9d, 0x4b, 0xad, 0xed, 0xaf, 0x73, 0x30, 0x19, 0xd3, 0xe7, 0x77,
- 0x0d, 0xab, 0x2e, 0x98, 0x8f, 0x87, 0xe8, 0x7b, 0x86, 0x55, 0x7f, 0xd3, 0xa9, 0x4c, 0x0b, 0x18,
- 0xfb, 0xc4, 0x1c, 0x88, 0x6e, 0x40, 0xde, 0xa7, 0xc4, 0x15, 0x55, 0x7b, 0x32, 0x23, 0x8f, 0x1f,
- 0x51, 0xe2, 0x86, 0x2b, 0xd3, 0x14, 0x23, 0x65, 0x02, 0xcc, 0xad, 0xd1, 0x6d, 0x28, 0x34, 0xd8,
- 0x7b, 0x88, 0xc2, 0x3c, 0x95, 0x41, 0xd3, 0xfb, 0xfb, 0x23, 0x78, 0x7c, 0x2e, 0xc1, 0x01, 0x01,
- 0x6a, 0xc1, 0x1c, 0x4d, 0x04, 0x8e, 0x3f, 0x52, 0xf6, 0x0a, 0x34, 0x34, 0xd2, 0x2a, 0xea, 0x76,
- 0x2a, 0x73, 0x49, 0x15, 0xee, 0xe3, 0x96, 0xab, 0x30, 0xdd, 0x73, 0xad, 0x83, 0xfb, 0xa8, 0x7a,
- 0xf5, 0xc5, 0xeb, 0xf2, 0xd8, 0xcb, 0xd7, 0xe5, 0xb1, 0x57, 0xaf, 0xcb, 0x63, 0x9f, 0x76, 0xcb,
- 0xd2, 0x8b, 0x6e, 0x59, 0x7a, 0xd9, 0x2d, 0x4b, 0xaf, 0xba, 0x65, 0xe9, 0xa7, 0x6e, 0x59, 0x7a,
- 0xfe, 0x73, 0x79, 0xec, 0xe9, 0xb1, 0xd4, 0xff, 0x89, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0x16,
- 0x4e, 0x14, 0xcf, 0x2f, 0x15, 0x00, 0x00,
-}
+func (m *UserSubject) Reset() { *m = UserSubject{} }
func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..51612dee
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1/generated.protomessage.pb.go
@@ -0,0 +1,68 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*FlowDistinguisherMethod) ProtoMessage() {}
+
+func (*FlowSchema) ProtoMessage() {}
+
+func (*FlowSchemaCondition) ProtoMessage() {}
+
+func (*FlowSchemaList) ProtoMessage() {}
+
+func (*FlowSchemaSpec) ProtoMessage() {}
+
+func (*FlowSchemaStatus) ProtoMessage() {}
+
+func (*GroupSubject) ProtoMessage() {}
+
+func (*LimitResponse) ProtoMessage() {}
+
+func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*NonResourcePolicyRule) ProtoMessage() {}
+
+func (*PolicyRulesWithSubjects) ProtoMessage() {}
+
+func (*PriorityLevelConfiguration) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationList) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationReference) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
+
+func (*QueuingConfiguration) ProtoMessage() {}
+
+func (*ResourcePolicyRule) ProtoMessage() {}
+
+func (*ServiceAccountSubject) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
+
+func (*UserSubject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/flowcontrol/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..a3bfb2c5
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1/zz_generated.model_name.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExemptPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowDistinguisherMethod) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchema) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.FlowSchema"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.FlowSchemaCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.FlowSchemaList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.FlowSchemaSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.FlowSchemaStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.GroupSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitResponse) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.LimitResponse"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitedPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.NonResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRulesWithSubjects) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationReference) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QueuingConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.QueuingConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.ResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccountSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.ServiceAccountSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.Subject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UserSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1.UserSubject"
+}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
index 20268c1f..e66df16d 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.flowcontrol.v1beta1
// +groupName=flowcontrol.apiserver.k8s.io
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go
index 96e368f6..de8950c2 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.pb.go
@@ -24,802 +24,56 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
-func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{0}
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
-}
+func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (*FlowDistinguisherMethod) ProtoMessage() {}
-func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{1}
-}
-func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src)
-}
-func (m *FlowDistinguisherMethod) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m)
-}
+func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
+func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-func (*FlowSchema) ProtoMessage() {}
-func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{2}
-}
-func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchema.Merge(m, src)
-}
-func (m *FlowSchema) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchema.DiscardUnknown(m)
-}
+func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
+func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (*FlowSchemaCondition) ProtoMessage() {}
-func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{3}
-}
-func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaCondition.Merge(m, src)
-}
-func (m *FlowSchemaCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m)
-}
+func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
+func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-func (*FlowSchemaList) ProtoMessage() {}
-func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{4}
-}
-func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaList.Merge(m, src)
-}
-func (m *FlowSchemaList) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaList) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaList.DiscardUnknown(m)
-}
+func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
+func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (*FlowSchemaSpec) ProtoMessage() {}
-func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{5}
-}
-func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaSpec.Merge(m, src)
-}
-func (m *FlowSchemaSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-func (*FlowSchemaStatus) ProtoMessage() {}
-func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{6}
-}
-func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaStatus.Merge(m, src)
-}
-func (m *FlowSchemaStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (*GroupSubject) ProtoMessage() {}
-func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{7}
-}
-func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupSubject.Merge(m, src)
-}
-func (m *GroupSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupSubject.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-func (*LimitResponse) ProtoMessage() {}
-func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{8}
-}
-func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitResponse.Merge(m, src)
-}
-func (m *LimitResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitResponse.DiscardUnknown(m)
-}
+func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
+func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
-func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{9}
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-func (*NonResourcePolicyRule) ProtoMessage() {}
-func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{10}
-}
-func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourcePolicyRule.Merge(m, src)
-}
-func (m *NonResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
-
-func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (*PolicyRulesWithSubjects) ProtoMessage() {}
-func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{11}
-}
-func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src)
-}
-func (m *PolicyRulesWithSubjects) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m)
-}
+func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
+func (m *Subject) Reset() { *m = Subject{} }
-func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-func (*PriorityLevelConfiguration) ProtoMessage() {}
-func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{12}
-}
-func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src)
-}
-func (m *PriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
-func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{13}
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-func (*PriorityLevelConfigurationList) ProtoMessage() {}
-func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{14}
-}
-func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (*PriorityLevelConfigurationReference) ProtoMessage() {}
-func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{15}
-}
-func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
-func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{16}
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
-func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{17}
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
-
-func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-func (*QueuingConfiguration) ProtoMessage() {}
-func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{18}
-}
-func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *QueuingConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueuingConfiguration.Merge(m, src)
-}
-func (m *QueuingConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *QueuingConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
-
-func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (*ResourcePolicyRule) ProtoMessage() {}
-func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{19}
-}
-func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePolicyRule.Merge(m, src)
-}
-func (m *ResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
-
-func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-func (*ServiceAccountSubject) ProtoMessage() {}
-func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{20}
-}
-func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccountSubject.Merge(m, src)
-}
-func (m *ServiceAccountSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccountSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{21}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func (m *UserSubject) Reset() { *m = UserSubject{} }
-func (*UserSubject) ProtoMessage() {}
-func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_3a5cb22a034fcb2a, []int{22}
-}
-func (m *UserSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UserSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserSubject.Merge(m, src)
-}
-func (m *UserSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *UserSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_UserSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserSubject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration")
- proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowDistinguisherMethod")
- proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchema")
- proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaCondition")
- proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaList")
- proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaSpec")
- proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.FlowSchemaStatus")
- proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.GroupSubject")
- proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitResponse")
- proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration")
- proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.NonResourcePolicyRule")
- proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta1.PolicyRulesWithSubjects")
- proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfiguration")
- proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition")
- proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationList")
- proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference")
- proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec")
- proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus")
- proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta1.QueuingConfiguration")
- proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta1.ResourcePolicyRule")
- proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.ServiceAccountSubject")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta1.Subject")
- proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta1.UserSubject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/flowcontrol/v1beta1/generated.proto", fileDescriptor_3a5cb22a034fcb2a)
-}
-
-var fileDescriptor_3a5cb22a034fcb2a = []byte{
- // 1599 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x73, 0xdb, 0xc4,
- 0x17, 0x8f, 0x1c, 0x3b, 0x89, 0x5f, 0x7e, 0x76, 0xd3, 0x4c, 0xfc, 0x4d, 0xbf, 0x63, 0xa7, 0x62,
- 0x86, 0x02, 0x6d, 0xe5, 0xb6, 0xb4, 0xb4, 0xc0, 0xf0, 0x23, 0x4a, 0x4b, 0x29, 0x4d, 0xd2, 0x74,
- 0xd3, 0x42, 0xa7, 0x74, 0x86, 0x2a, 0xf2, 0xc6, 0x56, 0x63, 0xfd, 0xa8, 0x56, 0x4a, 0x08, 0xbd,
- 0x30, 0xfc, 0x05, 0x9c, 0xe1, 0xc8, 0x81, 0x13, 0x17, 0xae, 0x1c, 0x38, 0xd2, 0xe1, 0xd4, 0x63,
- 0x4f, 0x86, 0x9a, 0x13, 0xff, 0x01, 0x74, 0x86, 0x19, 0x66, 0x57, 0x2b, 0xc9, 0xb2, 0x2d, 0xcb,
- 0xd3, 0xce, 0xf4, 0xc4, 0x2d, 0x7a, 0xfb, 0x79, 0x9f, 0xb7, 0xef, 0xed, 0xfb, 0xe5, 0x80, 0xb2,
- 0x7b, 0x81, 0x2a, 0x86, 0x5d, 0xd5, 0x1c, 0xa3, 0xba, 0xd3, 0xb4, 0xf7, 0x75, 0xdb, 0xf2, 0x5c,
- 0xbb, 0x59, 0xdd, 0x3b, 0xbd, 0x4d, 0x3c, 0xed, 0x74, 0xb5, 0x4e, 0x2c, 0xe2, 0x6a, 0x1e, 0xa9,
- 0x29, 0x8e, 0x6b, 0x7b, 0x36, 0x2a, 0x07, 0x78, 0x45, 0x73, 0x0c, 0xa5, 0x03, 0xaf, 0x08, 0xfc,
- 0xd2, 0xc9, 0xba, 0xe1, 0x35, 0xfc, 0x6d, 0x45, 0xb7, 0xcd, 0x6a, 0xdd, 0xae, 0xdb, 0x55, 0xae,
- 0xb6, 0xed, 0xef, 0xf0, 0x2f, 0xfe, 0xc1, 0xff, 0x0a, 0xe8, 0x96, 0xce, 0xc6, 0xe6, 0x4d, 0x4d,
- 0x6f, 0x18, 0x16, 0x71, 0x0f, 0xaa, 0xce, 0x6e, 0x9d, 0x09, 0x68, 0xd5, 0x24, 0x9e, 0x56, 0xdd,
- 0xeb, 0xb9, 0xc4, 0x52, 0x35, 0x4d, 0xcb, 0xf5, 0x2d, 0xcf, 0x30, 0x49, 0x8f, 0xc2, 0x1b, 0x59,
- 0x0a, 0x54, 0x6f, 0x10, 0x53, 0xeb, 0xd6, 0x93, 0x7f, 0x92, 0x60, 0xf9, 0xd2, 0xe7, 0xc4, 0x74,
- 0xbc, 0x4d, 0xd7, 0xb0, 0x5d, 0xc3, 0x3b, 0x58, 0x23, 0x7b, 0xa4, 0xb9, 0x6a, 0x5b, 0x3b, 0x46,
- 0xdd, 0x77, 0x35, 0xcf, 0xb0, 0x2d, 0x74, 0x0b, 0x4a, 0x96, 0x6d, 0x1a, 0x96, 0xc6, 0xe4, 0xba,
- 0xef, 0xba, 0xc4, 0xd2, 0x0f, 0xb6, 0x1a, 0x9a, 0x4b, 0x68, 0x49, 0x5a, 0x96, 0x5e, 0x29, 0xa8,
- 0xff, 0x6f, 0xb7, 0x2a, 0xa5, 0x8d, 0x14, 0x0c, 0x4e, 0xd5, 0x46, 0xef, 0xc0, 0x6c, 0x93, 0x58,
- 0x35, 0x6d, 0xbb, 0x49, 0x36, 0x89, 0xab, 0x13, 0xcb, 0x2b, 0xe5, 0x38, 0xe1, 0x7c, 0xbb, 0x55,
- 0x99, 0x5d, 0x4b, 0x1e, 0xe1, 0x6e, 0xac, 0x7c, 0x1b, 0x16, 0x3f, 0x68, 0xda, 0xfb, 0x17, 0x0d,
- 0xea, 0x19, 0x56, 0xdd, 0x37, 0x68, 0x83, 0xb8, 0xeb, 0xc4, 0x6b, 0xd8, 0x35, 0xf4, 0x1e, 0xe4,
- 0xbd, 0x03, 0x87, 0xf0, 0xfb, 0x15, 0xd5, 0xe3, 0x0f, 0x5b, 0x95, 0x91, 0x76, 0xab, 0x92, 0xbf,
- 0x71, 0xe0, 0x90, 0xa7, 0xad, 0xca, 0x91, 0x14, 0x35, 0x76, 0x8c, 0xb9, 0xa2, 0xfc, 0x4d, 0x0e,
- 0x80, 0xa1, 0xb6, 0x78, 0xe0, 0xd0, 0x5d, 0x98, 0x60, 0x8f, 0x55, 0xd3, 0x3c, 0x8d, 0x73, 0x4e,
- 0x9e, 0x39, 0xa5, 0xc4, 0x99, 0x12, 0xc5, 0x5c, 0x71, 0x76, 0xeb, 0x4c, 0x40, 0x15, 0x86, 0x56,
- 0xf6, 0x4e, 0x2b, 0xd7, 0xb6, 0xef, 0x11, 0xdd, 0x5b, 0x27, 0x9e, 0xa6, 0x22, 0x71, 0x0b, 0x88,
- 0x65, 0x38, 0x62, 0x45, 0x9b, 0x90, 0xa7, 0x0e, 0xd1, 0x79, 0x00, 0x26, 0xcf, 0x28, 0xca, 0xe0,
- 0x3c, 0x54, 0xe2, 0xbb, 0x6d, 0x39, 0x44, 0x57, 0xa7, 0x42, 0x0f, 0xd9, 0x17, 0xe6, 0x4c, 0xe8,
- 0x16, 0x8c, 0x51, 0x4f, 0xf3, 0x7c, 0x5a, 0x1a, 0xed, 0xb9, 0x71, 0x16, 0x27, 0xd7, 0x53, 0x67,
- 0x04, 0xeb, 0x58, 0xf0, 0x8d, 0x05, 0x9f, 0xfc, 0x38, 0x07, 0xf3, 0x31, 0x78, 0xd5, 0xb6, 0x6a,
- 0x06, 0xcf, 0x94, 0xb7, 0x13, 0x51, 0x3f, 0xd6, 0x15, 0xf5, 0xc5, 0x3e, 0x2a, 0x71, 0xc4, 0xd1,
- 0x9b, 0xd1, 0x75, 0x73, 0x5c, 0xfd, 0x68, 0xd2, 0xf8, 0xd3, 0x56, 0x65, 0x36, 0x52, 0x4b, 0xde,
- 0x07, 0xed, 0x01, 0x6a, 0x6a, 0xd4, 0xbb, 0xe1, 0x6a, 0x16, 0x0d, 0x68, 0x0d, 0x93, 0x08, 0xaf,
- 0x5f, 0x1b, 0xee, 0x9d, 0x98, 0x86, 0xba, 0x24, 0x4c, 0xa2, 0xb5, 0x1e, 0x36, 0xdc, 0xc7, 0x02,
- 0x7a, 0x19, 0xc6, 0x5c, 0xa2, 0x51, 0xdb, 0x2a, 0xe5, 0xf9, 0x95, 0xa3, 0x78, 0x61, 0x2e, 0xc5,
- 0xe2, 0x14, 0xbd, 0x0a, 0xe3, 0x26, 0xa1, 0x54, 0xab, 0x93, 0x52, 0x81, 0x03, 0x67, 0x05, 0x70,
- 0x7c, 0x3d, 0x10, 0xe3, 0xf0, 0x5c, 0xfe, 0x59, 0x82, 0x99, 0x38, 0x4e, 0x6b, 0x06, 0xf5, 0xd0,
- 0x9d, 0x9e, 0xdc, 0x53, 0x86, 0xf3, 0x89, 0x69, 0xf3, 0xcc, 0x9b, 0x13, 0xe6, 0x26, 0x42, 0x49,
- 0x47, 0xde, 0x5d, 0x83, 0x82, 0xe1, 0x11, 0x93, 0x45, 0x7d, 0xb4, 0x2b, 0x5c, 0x19, 0x49, 0xa2,
- 0x4e, 0x0b, 0xda, 0xc2, 0x15, 0x46, 0x80, 0x03, 0x1e, 0xf9, 0xcf, 0xd1, 0x4e, 0x0f, 0x58, 0x3e,
- 0xa2, 0xef, 0x25, 0x58, 0x72, 0x52, 0x1b, 0x8c, 0x70, 0x6a, 0x35, 0xcb, 0x72, 0x7a, 0x8b, 0xc2,
- 0x64, 0x87, 0xb0, 0xbe, 0x42, 0x54, 0x59, 0x5c, 0x69, 0x69, 0x00, 0x78, 0xc0, 0x55, 0xd0, 0x47,
- 0x80, 0x4c, 0xcd, 0x63, 0x11, 0xad, 0x6f, 0xba, 0x44, 0x27, 0x35, 0xc6, 0x2a, 0x9a, 0x52, 0x94,
- 0x1d, 0xeb, 0x3d, 0x08, 0xdc, 0x47, 0x0b, 0x7d, 0x25, 0xc1, 0x7c, 0xad, 0xb7, 0xc9, 0x88, 0xbc,
- 0x3c, 0x3f, 0x4c, 0xa0, 0xfb, 0xf4, 0x28, 0x75, 0xb1, 0xdd, 0xaa, 0xcc, 0xf7, 0x39, 0xc0, 0xfd,
- 0x8c, 0xa1, 0x3b, 0x50, 0x70, 0xfd, 0x26, 0xa1, 0xa5, 0x3c, 0x7f, 0xde, 0x4c, 0xab, 0x9b, 0x76,
- 0xd3, 0xd0, 0x0f, 0x30, 0x53, 0xf9, 0xc4, 0xf0, 0x1a, 0x5b, 0x3e, 0xef, 0x55, 0x34, 0x7e, 0x6b,
- 0x7e, 0x84, 0x03, 0x52, 0xf9, 0x01, 0xcc, 0x75, 0x37, 0x0d, 0x54, 0x07, 0xd0, 0xc3, 0x3a, 0x65,
- 0x03, 0x82, 0x99, 0x7d, 0x7d, 0xf8, 0xac, 0x8a, 0x6a, 0x3c, 0xee, 0x97, 0x91, 0x88, 0xe2, 0x0e,
- 0x6a, 0xf9, 0x14, 0x4c, 0x5d, 0x76, 0x6d, 0xdf, 0x11, 0x77, 0x44, 0xcb, 0x90, 0xb7, 0x34, 0x33,
- 0xec, 0x3e, 0x51, 0x47, 0xdc, 0xd0, 0x4c, 0x82, 0xf9, 0x89, 0xfc, 0x9d, 0x04, 0xd3, 0x6b, 0x86,
- 0x69, 0x78, 0x98, 0x50, 0xc7, 0xb6, 0x28, 0x41, 0xe7, 0x12, 0x1d, 0xeb, 0x68, 0x57, 0xc7, 0x3a,
- 0x94, 0x00, 0x77, 0xf4, 0xaa, 0x4f, 0x61, 0xfc, 0xbe, 0x4f, 0x7c, 0xc3, 0xaa, 0x8b, 0x7e, 0x7d,
- 0x36, 0xcb, 0xc1, 0xeb, 0x01, 0x3c, 0x91, 0x6d, 0xea, 0x24, 0x6b, 0x01, 0xe2, 0x04, 0x87, 0x8c,
- 0xf2, 0x3f, 0x39, 0x38, 0xca, 0x0d, 0x93, 0xda, 0x80, 0xa9, 0x7c, 0x07, 0x4a, 0x1a, 0xa5, 0xbe,
- 0x4b, 0x6a, 0x69, 0x53, 0x79, 0x59, 0x78, 0x53, 0x5a, 0x49, 0xc1, 0xe1, 0x54, 0x06, 0x74, 0x0f,
- 0xa6, 0x9b, 0x9d, 0xbe, 0x0b, 0x37, 0x4f, 0x66, 0xb9, 0x99, 0x08, 0x98, 0xba, 0x20, 0x6e, 0x90,
- 0x0c, 0x3a, 0x4e, 0x52, 0xf7, 0xdb, 0x02, 0x46, 0x87, 0xdf, 0x02, 0xd0, 0x35, 0x58, 0xd8, 0xb6,
- 0x5d, 0xd7, 0xde, 0x37, 0xac, 0x3a, 0xb7, 0x13, 0x92, 0xe4, 0x39, 0xc9, 0xff, 0xda, 0xad, 0xca,
- 0x82, 0xda, 0x0f, 0x80, 0xfb, 0xeb, 0xc9, 0xfb, 0xb0, 0xb0, 0xc1, 0x7a, 0x0a, 0xb5, 0x7d, 0x57,
- 0x27, 0x71, 0x41, 0xa0, 0x0a, 0x14, 0xf6, 0x88, 0xbb, 0x1d, 0x24, 0x75, 0x51, 0x2d, 0xb2, 0x72,
- 0xf8, 0x98, 0x09, 0x70, 0x20, 0x67, 0x9e, 0x58, 0xb1, 0xe6, 0x4d, 0xbc, 0x46, 0x4b, 0x63, 0x1c,
- 0xca, 0x3d, 0xd9, 0x48, 0x1e, 0xe1, 0x6e, 0xac, 0xdc, 0xca, 0xc1, 0x62, 0x4a, 0xfd, 0xa1, 0x9b,
- 0x30, 0x41, 0xc5, 0xdf, 0xa2, 0xa6, 0x8e, 0x65, 0xbd, 0x85, 0xd0, 0x8d, 0xbb, 0x7f, 0x48, 0x86,
- 0x23, 0x2a, 0x64, 0xc3, 0xb4, 0x2b, 0xae, 0xc0, 0x6d, 0x8a, 0x29, 0x70, 0x26, 0x8b, 0xbb, 0x37,
- 0x3a, 0xf1, 0x63, 0xe3, 0x4e, 0x42, 0x9c, 0xe4, 0x47, 0x0f, 0x60, 0xae, 0xc3, 0xed, 0xc0, 0xe6,
- 0x28, 0xb7, 0x79, 0x2e, 0xcb, 0x66, 0xdf, 0x47, 0x51, 0x4b, 0xc2, 0xec, 0xdc, 0x46, 0x17, 0x2d,
- 0xee, 0x31, 0x24, 0xff, 0x9a, 0x83, 0x01, 0x83, 0xe1, 0x05, 0x2c, 0x79, 0x77, 0x13, 0x4b, 0xde,
- 0xbb, 0xcf, 0x3e, 0xf1, 0x52, 0x97, 0xbe, 0x46, 0xd7, 0xd2, 0xf7, 0xfe, 0x73, 0xd8, 0x18, 0xbc,
- 0x04, 0xfe, 0x95, 0x83, 0x97, 0xd2, 0x95, 0xe3, 0xa5, 0xf0, 0x6a, 0xa2, 0xc5, 0x9e, 0xef, 0x6a,
- 0xb1, 0xc7, 0x86, 0xa0, 0xf8, 0x6f, 0x49, 0xec, 0x5a, 0x12, 0x7f, 0x93, 0xa0, 0x9c, 0x1e, 0xb7,
- 0x17, 0xb0, 0x34, 0x7e, 0x96, 0x5c, 0x1a, 0xdf, 0x7a, 0xf6, 0x24, 0x4b, 0x59, 0x22, 0x2f, 0x0f,
- 0xca, 0xad, 0x68, 0xdd, 0x1b, 0x62, 0xe4, 0xff, 0x90, 0x1b, 0x14, 0x2a, 0xbe, 0x9d, 0x66, 0xfc,
- 0x6a, 0x49, 0x68, 0x5f, 0xb2, 0xd8, 0xe8, 0x31, 0xd9, 0xf4, 0x08, 0x12, 0xb2, 0x01, 0xe3, 0xcd,
- 0x60, 0x56, 0x8b, 0xa2, 0x5e, 0x19, 0x6a, 0x44, 0x0e, 0x1a, 0xed, 0xc1, 0x5a, 0x20, 0x60, 0x38,
- 0xa4, 0x47, 0x35, 0x18, 0x23, 0xfc, 0xa7, 0xfa, 0xb0, 0x95, 0x9d, 0xf5, 0xc3, 0x5e, 0x05, 0x96,
- 0x85, 0x01, 0x0a, 0x0b, 0x6e, 0xf9, 0x5b, 0x09, 0x96, 0xb3, 0x5a, 0x02, 0xda, 0xef, 0xb3, 0xe2,
- 0x3d, 0xc7, 0xfa, 0x3e, 0xfc, 0xca, 0xf7, 0xa3, 0x04, 0x87, 0xfb, 0x6d, 0x52, 0xac, 0xc8, 0xd8,
- 0xfa, 0x14, 0xed, 0x3e, 0x51, 0x91, 0x5d, 0xe7, 0x52, 0x2c, 0x4e, 0xd1, 0x09, 0x98, 0x68, 0x68,
- 0x56, 0x6d, 0xcb, 0xf8, 0x22, 0xdc, 0xea, 0xa3, 0x34, 0xff, 0x50, 0xc8, 0x71, 0x84, 0x40, 0x17,
- 0x61, 0x8e, 0xeb, 0xad, 0x11, 0xab, 0xee, 0x35, 0xf8, 0x8b, 0x88, 0xd5, 0x24, 0x9a, 0x3a, 0xd7,
- 0xbb, 0xce, 0x71, 0x8f, 0x86, 0xfc, 0xb7, 0x04, 0xe8, 0x59, 0xb6, 0x89, 0xe3, 0x50, 0xd4, 0x1c,
- 0x83, 0xaf, 0xb8, 0x41, 0xa1, 0x15, 0xd5, 0xe9, 0x76, 0xab, 0x52, 0x5c, 0xd9, 0xbc, 0x12, 0x08,
- 0x71, 0x7c, 0xce, 0xc0, 0xe1, 0xa0, 0x0d, 0x06, 0xaa, 0x00, 0x87, 0x86, 0x29, 0x8e, 0xcf, 0xd1,
- 0x05, 0x98, 0xd2, 0x9b, 0x3e, 0xf5, 0x88, 0xbb, 0xa5, 0xdb, 0x0e, 0xe1, 0x8d, 0x69, 0x42, 0x3d,
- 0x2c, 0x7c, 0x9a, 0x5a, 0xed, 0x38, 0xc3, 0x09, 0x24, 0x52, 0x00, 0x58, 0x59, 0x51, 0x47, 0x63,
- 0x76, 0x0a, 0xdc, 0xce, 0x0c, 0x7b, 0xb0, 0x8d, 0x48, 0x8a, 0x3b, 0x10, 0xf2, 0x3d, 0x58, 0xd8,
- 0x22, 0xee, 0x9e, 0xa1, 0x93, 0x15, 0x5d, 0xb7, 0x7d, 0xcb, 0x0b, 0x97, 0xf5, 0x2a, 0x14, 0x23,
- 0x98, 0xa8, 0xbc, 0x43, 0xc2, 0x7e, 0x31, 0xe2, 0xc2, 0x31, 0x26, 0x2a, 0xf5, 0x5c, 0x6a, 0xa9,
- 0xff, 0x92, 0x83, 0xf1, 0x98, 0x3e, 0xbf, 0x6b, 0x58, 0x35, 0xc1, 0x7c, 0x24, 0x44, 0x5f, 0x35,
- 0xac, 0xda, 0xd3, 0x56, 0x65, 0x52, 0xc0, 0xd8, 0x27, 0xe6, 0x40, 0x74, 0x05, 0xf2, 0x3e, 0x25,
- 0xae, 0x28, 0xe2, 0xe3, 0x59, 0xc9, 0x7c, 0x93, 0x12, 0x37, 0xdc, 0xaf, 0x26, 0x18, 0x33, 0x13,
- 0x60, 0x4e, 0x81, 0xd6, 0xa1, 0x50, 0x67, 0x8f, 0x22, 0xea, 0xf4, 0x44, 0x16, 0x57, 0xe7, 0x8f,
- 0x98, 0x20, 0x0d, 0xb8, 0x04, 0x07, 0x2c, 0xe8, 0x3e, 0xcc, 0xd0, 0x44, 0x08, 0xf9, 0x73, 0x0d,
- 0xb1, 0x2f, 0xf5, 0x0d, 0xbc, 0x8a, 0xda, 0xad, 0xca, 0x4c, 0xf2, 0x08, 0x77, 0x19, 0x90, 0xab,
- 0x30, 0xd9, 0xe1, 0x60, 0x76, 0x97, 0x55, 0x2f, 0x3e, 0x7c, 0x52, 0x1e, 0x79, 0xf4, 0xa4, 0x3c,
- 0xf2, 0xf8, 0x49, 0x79, 0xe4, 0xcb, 0x76, 0x59, 0x7a, 0xd8, 0x2e, 0x4b, 0x8f, 0xda, 0x65, 0xe9,
- 0x71, 0xbb, 0x2c, 0xfd, 0xde, 0x2e, 0x4b, 0x5f, 0xff, 0x51, 0x1e, 0xb9, 0x5d, 0x1e, 0xfc, 0xbf,
- 0xd8, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, 0x42, 0x4c, 0x0f, 0xac, 0x15, 0x00, 0x00,
-}
+func (m *UserSubject) Reset() { *m = UserSubject{} }
func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..9d205959
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,68 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*FlowDistinguisherMethod) ProtoMessage() {}
+
+func (*FlowSchema) ProtoMessage() {}
+
+func (*FlowSchemaCondition) ProtoMessage() {}
+
+func (*FlowSchemaList) ProtoMessage() {}
+
+func (*FlowSchemaSpec) ProtoMessage() {}
+
+func (*FlowSchemaStatus) ProtoMessage() {}
+
+func (*GroupSubject) ProtoMessage() {}
+
+func (*LimitResponse) ProtoMessage() {}
+
+func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*NonResourcePolicyRule) ProtoMessage() {}
+
+func (*PolicyRulesWithSubjects) ProtoMessage() {}
+
+func (*PriorityLevelConfiguration) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationList) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationReference) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
+
+func (*QueuingConfiguration) ProtoMessage() {}
+
+func (*ResourcePolicyRule) ProtoMessage() {}
+
+func (*ServiceAccountSubject) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
+
+func (*UserSubject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..fe34dbc9
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExemptPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.ExemptPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowDistinguisherMethod) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.FlowDistinguisherMethod"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchema) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.FlowSchema"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.FlowSchemaCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.FlowSchemaList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.FlowSchemaSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.FlowSchemaStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.GroupSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitResponse) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.LimitResponse"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitedPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.NonResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRulesWithSubjects) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PolicyRulesWithSubjects"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationReference) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfigurationStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QueuingConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.QueuingConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.ResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccountSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.ServiceAccountSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.Subject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UserSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta1.UserSubject"
+}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
index 2dcad11a..fb00b585 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.flowcontrol.v1beta2
// +groupName=flowcontrol.apiserver.k8s.io
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go
index f646446d..1d5a5d26 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.pb.go
@@ -24,803 +24,56 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
-func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{0}
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
-}
+func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (*FlowDistinguisherMethod) ProtoMessage() {}
-func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{1}
-}
-func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src)
-}
-func (m *FlowDistinguisherMethod) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m)
-}
+func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
+func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-func (*FlowSchema) ProtoMessage() {}
-func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{2}
-}
-func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchema.Merge(m, src)
-}
-func (m *FlowSchema) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchema.DiscardUnknown(m)
-}
+func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
+func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (*FlowSchemaCondition) ProtoMessage() {}
-func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{3}
-}
-func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaCondition.Merge(m, src)
-}
-func (m *FlowSchemaCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m)
-}
+func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
+func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-func (*FlowSchemaList) ProtoMessage() {}
-func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{4}
-}
-func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaList.Merge(m, src)
-}
-func (m *FlowSchemaList) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaList) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaList.DiscardUnknown(m)
-}
+func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
+func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (*FlowSchemaSpec) ProtoMessage() {}
-func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{5}
-}
-func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaSpec.Merge(m, src)
-}
-func (m *FlowSchemaSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-func (*FlowSchemaStatus) ProtoMessage() {}
-func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{6}
-}
-func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaStatus.Merge(m, src)
-}
-func (m *FlowSchemaStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (*GroupSubject) ProtoMessage() {}
-func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{7}
-}
-func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupSubject.Merge(m, src)
-}
-func (m *GroupSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupSubject.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-func (*LimitResponse) ProtoMessage() {}
-func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{8}
-}
-func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitResponse.Merge(m, src)
-}
-func (m *LimitResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitResponse.DiscardUnknown(m)
-}
+func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
+func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
-func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{9}
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-func (*NonResourcePolicyRule) ProtoMessage() {}
-func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{10}
-}
-func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourcePolicyRule.Merge(m, src)
-}
-func (m *NonResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
-
-func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (*PolicyRulesWithSubjects) ProtoMessage() {}
-func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{11}
-}
-func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src)
-}
-func (m *PolicyRulesWithSubjects) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m)
-}
+func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
+func (m *Subject) Reset() { *m = Subject{} }
-func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-func (*PriorityLevelConfiguration) ProtoMessage() {}
-func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{12}
-}
-func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src)
-}
-func (m *PriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
-func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{13}
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-func (*PriorityLevelConfigurationList) ProtoMessage() {}
-func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{14}
-}
-func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (*PriorityLevelConfigurationReference) ProtoMessage() {}
-func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{15}
-}
-func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
-func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{16}
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
-func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{17}
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
-
-func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-func (*QueuingConfiguration) ProtoMessage() {}
-func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{18}
-}
-func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *QueuingConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueuingConfiguration.Merge(m, src)
-}
-func (m *QueuingConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *QueuingConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
-
-func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (*ResourcePolicyRule) ProtoMessage() {}
-func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{19}
-}
-func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePolicyRule.Merge(m, src)
-}
-func (m *ResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
-
-func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-func (*ServiceAccountSubject) ProtoMessage() {}
-func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{20}
-}
-func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccountSubject.Merge(m, src)
-}
-func (m *ServiceAccountSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccountSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{21}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func (m *UserSubject) Reset() { *m = UserSubject{} }
-func (*UserSubject) ProtoMessage() {}
-func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e620af2eea53237, []int{22}
-}
-func (m *UserSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UserSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserSubject.Merge(m, src)
-}
-func (m *UserSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *UserSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_UserSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserSubject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration")
- proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowDistinguisherMethod")
- proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchema")
- proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaCondition")
- proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaList")
- proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaSpec")
- proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta2.FlowSchemaStatus")
- proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.GroupSubject")
- proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta2.LimitResponse")
- proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration")
- proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta2.NonResourcePolicyRule")
- proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta2.PolicyRulesWithSubjects")
- proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfiguration")
- proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition")
- proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationList")
- proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference")
- proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec")
- proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus")
- proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta2.QueuingConfiguration")
- proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta2.ResourcePolicyRule")
- proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.ServiceAccountSubject")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta2.Subject")
- proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta2.UserSubject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/flowcontrol/v1beta2/generated.proto", fileDescriptor_2e620af2eea53237)
-}
-
-var fileDescriptor_2e620af2eea53237 = []byte{
- // 1602 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcd, 0x73, 0xdb, 0xd4,
- 0x16, 0x8f, 0x1c, 0x3b, 0x89, 0x4f, 0x3e, 0x7b, 0xd3, 0x4c, 0xfc, 0xd2, 0x37, 0x76, 0xaa, 0x37,
- 0xf3, 0xfa, 0x1e, 0x6d, 0xe5, 0x36, 0xb4, 0xb4, 0xc0, 0xf0, 0x11, 0xa5, 0xa5, 0x94, 0x26, 0x69,
- 0x7a, 0xd3, 0x42, 0xa7, 0x74, 0x86, 0x2a, 0xf2, 0x8d, 0xad, 0xc6, 0xfa, 0xa8, 0xae, 0x94, 0x10,
- 0xba, 0x61, 0xf8, 0x0b, 0x58, 0xc3, 0x92, 0x05, 0x2b, 0x36, 0x6c, 0x59, 0xb0, 0xa4, 0xc3, 0xaa,
- 0xcb, 0xae, 0x0c, 0x35, 0x2b, 0xfe, 0x03, 0xe8, 0x0c, 0x33, 0xcc, 0xbd, 0xba, 0x92, 0x2c, 0xdb,
- 0xb2, 0x3c, 0xed, 0x4c, 0x57, 0xec, 0xa2, 0x73, 0x7f, 0xe7, 0x77, 0xee, 0x39, 0xf7, 0x7c, 0x39,
- 0xa0, 0xec, 0x5d, 0xa4, 0x8a, 0x61, 0x57, 0x35, 0xc7, 0xa8, 0xee, 0x36, 0xed, 0x03, 0xdd, 0xb6,
- 0x3c, 0xd7, 0x6e, 0x56, 0xf7, 0xcf, 0xee, 0x10, 0x4f, 0x5b, 0xa9, 0xd6, 0x89, 0x45, 0x5c, 0xcd,
- 0x23, 0x35, 0xc5, 0x71, 0x6d, 0xcf, 0x46, 0xe5, 0x00, 0xaf, 0x68, 0x8e, 0xa1, 0x74, 0xe0, 0x15,
- 0x81, 0x5f, 0x3a, 0x5d, 0x37, 0xbc, 0x86, 0xbf, 0xa3, 0xe8, 0xb6, 0x59, 0xad, 0xdb, 0x75, 0xbb,
- 0xca, 0xd5, 0x76, 0xfc, 0x5d, 0xfe, 0xc5, 0x3f, 0xf8, 0x5f, 0x01, 0xdd, 0xd2, 0xb9, 0xd8, 0xbc,
- 0xa9, 0xe9, 0x0d, 0xc3, 0x22, 0xee, 0x61, 0xd5, 0xd9, 0xab, 0x33, 0x01, 0xad, 0x9a, 0xc4, 0xd3,
- 0xaa, 0xfb, 0x67, 0xbb, 0x2f, 0xb1, 0x54, 0x4d, 0xd3, 0x72, 0x7d, 0xcb, 0x33, 0x4c, 0xd2, 0xa3,
- 0xf0, 0x5a, 0x96, 0x02, 0xd5, 0x1b, 0xc4, 0xd4, 0xba, 0xf5, 0xe4, 0x1f, 0x24, 0x58, 0xbe, 0xfc,
- 0x29, 0x31, 0x1d, 0x6f, 0xcb, 0x35, 0x6c, 0xd7, 0xf0, 0x0e, 0xd7, 0xc9, 0x3e, 0x69, 0xae, 0xd9,
- 0xd6, 0xae, 0x51, 0xf7, 0x5d, 0xcd, 0x33, 0x6c, 0x0b, 0xdd, 0x86, 0x92, 0x65, 0x9b, 0x86, 0xa5,
- 0x31, 0xb9, 0xee, 0xbb, 0x2e, 0xb1, 0xf4, 0xc3, 0xed, 0x86, 0xe6, 0x12, 0x5a, 0x92, 0x96, 0xa5,
- 0xff, 0x15, 0xd4, 0x7f, 0xb7, 0x5b, 0x95, 0xd2, 0x66, 0x0a, 0x06, 0xa7, 0x6a, 0xa3, 0xb7, 0x60,
- 0xb6, 0x49, 0xac, 0x9a, 0xb6, 0xd3, 0x24, 0x5b, 0xc4, 0xd5, 0x89, 0xe5, 0x95, 0x72, 0x9c, 0x70,
- 0xbe, 0xdd, 0xaa, 0xcc, 0xae, 0x27, 0x8f, 0x70, 0x37, 0x56, 0xbe, 0x03, 0x8b, 0xef, 0x35, 0xed,
- 0x83, 0x4b, 0x06, 0xf5, 0x0c, 0xab, 0xee, 0x1b, 0xb4, 0x41, 0xdc, 0x0d, 0xe2, 0x35, 0xec, 0x1a,
- 0x7a, 0x07, 0xf2, 0xde, 0xa1, 0x43, 0xf8, 0xfd, 0x8a, 0xea, 0xc9, 0x47, 0xad, 0xca, 0x48, 0xbb,
- 0x55, 0xc9, 0xdf, 0x3c, 0x74, 0xc8, 0xb3, 0x56, 0xe5, 0x58, 0x8a, 0x1a, 0x3b, 0xc6, 0x5c, 0x51,
- 0xfe, 0x2a, 0x07, 0xc0, 0x50, 0xdb, 0x3c, 0x70, 0xe8, 0x1e, 0x4c, 0xb0, 0xc7, 0xaa, 0x69, 0x9e,
- 0xc6, 0x39, 0x27, 0x57, 0xce, 0x28, 0x71, 0xa6, 0x44, 0x31, 0x57, 0x9c, 0xbd, 0x3a, 0x13, 0x50,
- 0x85, 0xa1, 0x95, 0xfd, 0xb3, 0xca, 0xf5, 0x9d, 0xfb, 0x44, 0xf7, 0x36, 0x88, 0xa7, 0xa9, 0x48,
- 0xdc, 0x02, 0x62, 0x19, 0x8e, 0x58, 0xd1, 0x16, 0xe4, 0xa9, 0x43, 0x74, 0x1e, 0x80, 0xc9, 0x15,
- 0x45, 0x19, 0x9c, 0x87, 0x4a, 0x7c, 0xb7, 0x6d, 0x87, 0xe8, 0xea, 0x54, 0xe8, 0x21, 0xfb, 0xc2,
- 0x9c, 0x09, 0xdd, 0x86, 0x31, 0xea, 0x69, 0x9e, 0x4f, 0x4b, 0xa3, 0x3d, 0x37, 0xce, 0xe2, 0xe4,
- 0x7a, 0xea, 0x8c, 0x60, 0x1d, 0x0b, 0xbe, 0xb1, 0xe0, 0x93, 0x9f, 0xe4, 0x60, 0x3e, 0x06, 0xaf,
- 0xd9, 0x56, 0xcd, 0xe0, 0x99, 0xf2, 0x66, 0x22, 0xea, 0x27, 0xba, 0xa2, 0xbe, 0xd8, 0x47, 0x25,
- 0x8e, 0x38, 0x7a, 0x3d, 0xba, 0x6e, 0x8e, 0xab, 0x1f, 0x4f, 0x1a, 0x7f, 0xd6, 0xaa, 0xcc, 0x46,
- 0x6a, 0xc9, 0xfb, 0xa0, 0x7d, 0x40, 0x4d, 0x8d, 0x7a, 0x37, 0x5d, 0xcd, 0xa2, 0x01, 0xad, 0x61,
- 0x12, 0xe1, 0xf5, 0x2b, 0xc3, 0xbd, 0x13, 0xd3, 0x50, 0x97, 0x84, 0x49, 0xb4, 0xde, 0xc3, 0x86,
- 0xfb, 0x58, 0x40, 0xff, 0x85, 0x31, 0x97, 0x68, 0xd4, 0xb6, 0x4a, 0x79, 0x7e, 0xe5, 0x28, 0x5e,
- 0x98, 0x4b, 0xb1, 0x38, 0x45, 0xff, 0x87, 0x71, 0x93, 0x50, 0xaa, 0xd5, 0x49, 0xa9, 0xc0, 0x81,
- 0xb3, 0x02, 0x38, 0xbe, 0x11, 0x88, 0x71, 0x78, 0x2e, 0xff, 0x28, 0xc1, 0x4c, 0x1c, 0xa7, 0x75,
- 0x83, 0x7a, 0xe8, 0x6e, 0x4f, 0xee, 0x29, 0xc3, 0xf9, 0xc4, 0xb4, 0x79, 0xe6, 0xcd, 0x09, 0x73,
- 0x13, 0xa1, 0xa4, 0x23, 0xef, 0xae, 0x43, 0xc1, 0xf0, 0x88, 0xc9, 0xa2, 0x3e, 0xda, 0x15, 0xae,
- 0x8c, 0x24, 0x51, 0xa7, 0x05, 0x6d, 0xe1, 0x2a, 0x23, 0xc0, 0x01, 0x8f, 0xfc, 0xfb, 0x68, 0xa7,
- 0x07, 0x2c, 0x1f, 0xd1, 0xb7, 0x12, 0x2c, 0x39, 0xa9, 0x0d, 0x46, 0x38, 0xb5, 0x96, 0x65, 0x39,
- 0xbd, 0x45, 0x61, 0xb2, 0x4b, 0x58, 0x5f, 0x21, 0xaa, 0x2c, 0xae, 0xb4, 0x34, 0x00, 0x3c, 0xe0,
- 0x2a, 0xe8, 0x03, 0x40, 0xa6, 0xe6, 0xb1, 0x88, 0xd6, 0xb7, 0x5c, 0xa2, 0x93, 0x1a, 0x63, 0x15,
- 0x4d, 0x29, 0xca, 0x8e, 0x8d, 0x1e, 0x04, 0xee, 0xa3, 0x85, 0xbe, 0x90, 0x60, 0xbe, 0xd6, 0xdb,
- 0x64, 0x44, 0x5e, 0x5e, 0x18, 0x26, 0xd0, 0x7d, 0x7a, 0x94, 0xba, 0xd8, 0x6e, 0x55, 0xe6, 0xfb,
- 0x1c, 0xe0, 0x7e, 0xc6, 0xd0, 0x5d, 0x28, 0xb8, 0x7e, 0x93, 0xd0, 0x52, 0x9e, 0x3f, 0x6f, 0xa6,
- 0xd5, 0x2d, 0xbb, 0x69, 0xe8, 0x87, 0x98, 0xa9, 0x7c, 0x64, 0x78, 0x8d, 0x6d, 0x9f, 0xf7, 0x2a,
- 0x1a, 0xbf, 0x35, 0x3f, 0xc2, 0x01, 0xa9, 0xfc, 0x10, 0xe6, 0xba, 0x9b, 0x06, 0xaa, 0x03, 0xe8,
- 0x61, 0x9d, 0xb2, 0x01, 0xc1, 0xcc, 0xbe, 0x3a, 0x7c, 0x56, 0x45, 0x35, 0x1e, 0xf7, 0xcb, 0x48,
- 0x44, 0x71, 0x07, 0xb5, 0x7c, 0x06, 0xa6, 0xae, 0xb8, 0xb6, 0xef, 0x88, 0x3b, 0xa2, 0x65, 0xc8,
- 0x5b, 0x9a, 0x19, 0x76, 0x9f, 0xa8, 0x23, 0x6e, 0x6a, 0x26, 0xc1, 0xfc, 0x44, 0xfe, 0x46, 0x82,
- 0xe9, 0x75, 0xc3, 0x34, 0x3c, 0x4c, 0xa8, 0x63, 0x5b, 0x94, 0xa0, 0xf3, 0x89, 0x8e, 0x75, 0xbc,
- 0xab, 0x63, 0x1d, 0x49, 0x80, 0x3b, 0x7a, 0xd5, 0xc7, 0x30, 0xfe, 0xc0, 0x27, 0xbe, 0x61, 0xd5,
- 0x45, 0xbf, 0x3e, 0x97, 0xe5, 0xe0, 0x8d, 0x00, 0x9e, 0xc8, 0x36, 0x75, 0x92, 0xb5, 0x00, 0x71,
- 0x82, 0x43, 0x46, 0xf9, 0xaf, 0x1c, 0x1c, 0xe7, 0x86, 0x49, 0x6d, 0xc0, 0x54, 0xbe, 0x0b, 0x25,
- 0x8d, 0x52, 0xdf, 0x25, 0xb5, 0xb4, 0xa9, 0xbc, 0x2c, 0xbc, 0x29, 0xad, 0xa6, 0xe0, 0x70, 0x2a,
- 0x03, 0xba, 0x0f, 0xd3, 0xcd, 0x4e, 0xdf, 0x85, 0x9b, 0xa7, 0xb3, 0xdc, 0x4c, 0x04, 0x4c, 0x5d,
- 0x10, 0x37, 0x48, 0x06, 0x1d, 0x27, 0xa9, 0xfb, 0x6d, 0x01, 0xa3, 0xc3, 0x6f, 0x01, 0xe8, 0x3a,
- 0x2c, 0xec, 0xd8, 0xae, 0x6b, 0x1f, 0x18, 0x56, 0x9d, 0xdb, 0x09, 0x49, 0xf2, 0x9c, 0xe4, 0x5f,
- 0xed, 0x56, 0x65, 0x41, 0xed, 0x07, 0xc0, 0xfd, 0xf5, 0xe4, 0x03, 0x58, 0xd8, 0x64, 0x3d, 0x85,
- 0xda, 0xbe, 0xab, 0x93, 0xb8, 0x20, 0x50, 0x05, 0x0a, 0xfb, 0xc4, 0xdd, 0x09, 0x92, 0xba, 0xa8,
- 0x16, 0x59, 0x39, 0x7c, 0xc8, 0x04, 0x38, 0x90, 0x33, 0x4f, 0xac, 0x58, 0xf3, 0x16, 0x5e, 0xa7,
- 0xa5, 0x31, 0x0e, 0xe5, 0x9e, 0x6c, 0x26, 0x8f, 0x70, 0x37, 0x56, 0x6e, 0xe5, 0x60, 0x31, 0xa5,
- 0xfe, 0xd0, 0x2d, 0x98, 0xa0, 0xe2, 0x6f, 0x51, 0x53, 0x27, 0xb2, 0xde, 0x42, 0xe8, 0xc6, 0xdd,
- 0x3f, 0x24, 0xc3, 0x11, 0x15, 0xb2, 0x61, 0xda, 0x15, 0x57, 0xe0, 0x36, 0xc5, 0x14, 0x58, 0xc9,
- 0xe2, 0xee, 0x8d, 0x4e, 0xfc, 0xd8, 0xb8, 0x93, 0x10, 0x27, 0xf9, 0xd1, 0x43, 0x98, 0xeb, 0x70,
- 0x3b, 0xb0, 0x39, 0xca, 0x6d, 0x9e, 0xcf, 0xb2, 0xd9, 0xf7, 0x51, 0xd4, 0x92, 0x30, 0x3b, 0xb7,
- 0xd9, 0x45, 0x8b, 0x7b, 0x0c, 0xc9, 0x3f, 0xe7, 0x60, 0xc0, 0x60, 0x78, 0x09, 0x4b, 0xde, 0xbd,
- 0xc4, 0x92, 0xf7, 0xf6, 0xf3, 0x4f, 0xbc, 0xd4, 0xa5, 0xaf, 0xd1, 0xb5, 0xf4, 0xbd, 0xfb, 0x02,
- 0x36, 0x06, 0x2f, 0x81, 0x7f, 0xe4, 0xe0, 0x3f, 0xe9, 0xca, 0xf1, 0x52, 0x78, 0x2d, 0xd1, 0x62,
- 0x2f, 0x74, 0xb5, 0xd8, 0x13, 0x43, 0x50, 0xfc, 0xb3, 0x24, 0x76, 0x2d, 0x89, 0xbf, 0x48, 0x50,
- 0x4e, 0x8f, 0xdb, 0x4b, 0x58, 0x1a, 0x3f, 0x49, 0x2e, 0x8d, 0x6f, 0x3c, 0x7f, 0x92, 0xa5, 0x2c,
- 0x91, 0x57, 0x06, 0xe5, 0x56, 0xb4, 0xee, 0x0d, 0x31, 0xf2, 0xbf, 0xcb, 0x0d, 0x0a, 0x15, 0xdf,
- 0x4e, 0x33, 0x7e, 0xb5, 0x24, 0xb4, 0x2f, 0x5b, 0x6c, 0xf4, 0x98, 0x6c, 0x7a, 0x04, 0x09, 0xd9,
- 0x80, 0xf1, 0x66, 0x30, 0xab, 0x45, 0x51, 0xaf, 0x0e, 0x35, 0x22, 0x07, 0x8d, 0xf6, 0x60, 0x2d,
- 0x10, 0x30, 0x1c, 0xd2, 0xa3, 0x1a, 0x8c, 0x11, 0xfe, 0x53, 0x7d, 0xd8, 0xca, 0xce, 0xfa, 0x61,
- 0xaf, 0x02, 0xcb, 0xc2, 0x00, 0x85, 0x05, 0xb7, 0xfc, 0xb5, 0x04, 0xcb, 0x59, 0x2d, 0x01, 0x1d,
- 0xf4, 0x59, 0xf1, 0x5e, 0x60, 0x7d, 0x1f, 0x7e, 0xe5, 0xfb, 0x5e, 0x82, 0xa3, 0xfd, 0x36, 0x29,
- 0x56, 0x64, 0x6c, 0x7d, 0x8a, 0x76, 0x9f, 0xa8, 0xc8, 0x6e, 0x70, 0x29, 0x16, 0xa7, 0xe8, 0x14,
- 0x4c, 0x34, 0x34, 0xab, 0xb6, 0x6d, 0x7c, 0x16, 0x6e, 0xf5, 0x51, 0x9a, 0xbf, 0x2f, 0xe4, 0x38,
- 0x42, 0xa0, 0x4b, 0x30, 0xc7, 0xf5, 0xd6, 0x89, 0x55, 0xf7, 0x1a, 0xfc, 0x45, 0xc4, 0x6a, 0x12,
- 0x4d, 0x9d, 0x1b, 0x5d, 0xe7, 0xb8, 0x47, 0x43, 0xfe, 0x53, 0x02, 0xf4, 0x3c, 0xdb, 0xc4, 0x49,
- 0x28, 0x6a, 0x8e, 0xc1, 0x57, 0xdc, 0xa0, 0xd0, 0x8a, 0xea, 0x74, 0xbb, 0x55, 0x29, 0xae, 0x6e,
- 0x5d, 0x0d, 0x84, 0x38, 0x3e, 0x67, 0xe0, 0x70, 0xd0, 0x06, 0x03, 0x55, 0x80, 0x43, 0xc3, 0x14,
- 0xc7, 0xe7, 0xe8, 0x22, 0x4c, 0xe9, 0x4d, 0x9f, 0x7a, 0xc4, 0xdd, 0xd6, 0x6d, 0x87, 0xf0, 0xc6,
- 0x34, 0xa1, 0x1e, 0x15, 0x3e, 0x4d, 0xad, 0x75, 0x9c, 0xe1, 0x04, 0x12, 0x29, 0x00, 0xac, 0xac,
- 0xa8, 0xa3, 0x31, 0x3b, 0x05, 0x6e, 0x67, 0x86, 0x3d, 0xd8, 0x66, 0x24, 0xc5, 0x1d, 0x08, 0xf9,
- 0x3e, 0x2c, 0x6c, 0x13, 0x77, 0xdf, 0xd0, 0xc9, 0xaa, 0xae, 0xdb, 0xbe, 0xe5, 0x85, 0xcb, 0x7a,
- 0x15, 0x8a, 0x11, 0x4c, 0x54, 0xde, 0x11, 0x61, 0xbf, 0x18, 0x71, 0xe1, 0x18, 0x13, 0x95, 0x7a,
- 0x2e, 0xb5, 0xd4, 0x7f, 0xca, 0xc1, 0x78, 0x4c, 0x9f, 0xdf, 0x33, 0xac, 0x9a, 0x60, 0x3e, 0x16,
- 0xa2, 0xaf, 0x19, 0x56, 0xed, 0x59, 0xab, 0x32, 0x29, 0x60, 0xec, 0x13, 0x73, 0x20, 0xba, 0x0a,
- 0x79, 0x9f, 0x12, 0x57, 0x14, 0xf1, 0xc9, 0xac, 0x64, 0xbe, 0x45, 0x89, 0x1b, 0xee, 0x57, 0x13,
- 0x8c, 0x99, 0x09, 0x30, 0xa7, 0x40, 0x1b, 0x50, 0xa8, 0xb3, 0x47, 0x11, 0x75, 0x7a, 0x2a, 0x8b,
- 0xab, 0xf3, 0x47, 0x4c, 0x90, 0x06, 0x5c, 0x82, 0x03, 0x16, 0xf4, 0x00, 0x66, 0x68, 0x22, 0x84,
- 0xfc, 0xb9, 0x86, 0xd8, 0x97, 0xfa, 0x06, 0x5e, 0x45, 0xed, 0x56, 0x65, 0x26, 0x79, 0x84, 0xbb,
- 0x0c, 0xc8, 0x55, 0x98, 0xec, 0x70, 0x30, 0xbb, 0xcb, 0xaa, 0x97, 0x1e, 0x3d, 0x2d, 0x8f, 0x3c,
- 0x7e, 0x5a, 0x1e, 0x79, 0xf2, 0xb4, 0x3c, 0xf2, 0x79, 0xbb, 0x2c, 0x3d, 0x6a, 0x97, 0xa5, 0xc7,
- 0xed, 0xb2, 0xf4, 0xa4, 0x5d, 0x96, 0x7e, 0x6d, 0x97, 0xa5, 0x2f, 0x7f, 0x2b, 0x8f, 0xdc, 0x29,
- 0x0f, 0xfe, 0x5f, 0xec, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xd5, 0xd0, 0x62, 0xac, 0x15,
- 0x00, 0x00,
-}
+func (m *UserSubject) Reset() { *m = UserSubject{} }
func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.protomessage.pb.go
new file mode 100644
index 00000000..672834e9
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/generated.protomessage.pb.go
@@ -0,0 +1,68 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta2
+
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*FlowDistinguisherMethod) ProtoMessage() {}
+
+func (*FlowSchema) ProtoMessage() {}
+
+func (*FlowSchemaCondition) ProtoMessage() {}
+
+func (*FlowSchemaList) ProtoMessage() {}
+
+func (*FlowSchemaSpec) ProtoMessage() {}
+
+func (*FlowSchemaStatus) ProtoMessage() {}
+
+func (*GroupSubject) ProtoMessage() {}
+
+func (*LimitResponse) ProtoMessage() {}
+
+func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*NonResourcePolicyRule) ProtoMessage() {}
+
+func (*PolicyRulesWithSubjects) ProtoMessage() {}
+
+func (*PriorityLevelConfiguration) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationList) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationReference) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
+
+func (*QueuingConfiguration) ProtoMessage() {}
+
+func (*ResourcePolicyRule) ProtoMessage() {}
+
+func (*ServiceAccountSubject) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
+
+func (*UserSubject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.model_name.go
new file mode 100644
index 00000000..eb25256d
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta2/zz_generated.model_name.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExemptPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.ExemptPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowDistinguisherMethod) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.FlowDistinguisherMethod"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchema) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.FlowSchema"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.FlowSchemaCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.FlowSchemaList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.FlowSchemaSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.FlowSchemaStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.GroupSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitResponse) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.LimitResponse"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitedPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.NonResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRulesWithSubjects) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PolicyRulesWithSubjects"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationReference) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfigurationStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QueuingConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.QueuingConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.ResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccountSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.ServiceAccountSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.Subject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UserSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta2.UserSubject"
+}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
index 95f4430d..ad57ab2b 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.flowcontrol.v1beta3
// +groupName=flowcontrol.apiserver.k8s.io
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go
index e0a3fc1e..2d6d94af 100644
--- a/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.pb.go
@@ -24,802 +24,56 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (m *ExemptPriorityLevelConfiguration) Reset() { *m = ExemptPriorityLevelConfiguration{} }
-func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
-func (*ExemptPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{0}
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *ExemptPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_ExemptPriorityLevelConfiguration.DiscardUnknown(m)
-}
+func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-var xxx_messageInfo_ExemptPriorityLevelConfiguration proto.InternalMessageInfo
+func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (m *FlowDistinguisherMethod) Reset() { *m = FlowDistinguisherMethod{} }
-func (*FlowDistinguisherMethod) ProtoMessage() {}
-func (*FlowDistinguisherMethod) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{1}
-}
-func (m *FlowDistinguisherMethod) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowDistinguisherMethod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowDistinguisherMethod) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowDistinguisherMethod.Merge(m, src)
-}
-func (m *FlowDistinguisherMethod) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowDistinguisherMethod) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowDistinguisherMethod.DiscardUnknown(m)
-}
+func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-var xxx_messageInfo_FlowDistinguisherMethod proto.InternalMessageInfo
+func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (m *FlowSchema) Reset() { *m = FlowSchema{} }
-func (*FlowSchema) ProtoMessage() {}
-func (*FlowSchema) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{2}
-}
-func (m *FlowSchema) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchema) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchema.Merge(m, src)
-}
-func (m *FlowSchema) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchema) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchema.DiscardUnknown(m)
-}
+func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-var xxx_messageInfo_FlowSchema proto.InternalMessageInfo
+func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (m *FlowSchemaCondition) Reset() { *m = FlowSchemaCondition{} }
-func (*FlowSchemaCondition) ProtoMessage() {}
-func (*FlowSchemaCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{3}
-}
-func (m *FlowSchemaCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaCondition.Merge(m, src)
-}
-func (m *FlowSchemaCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaCondition.DiscardUnknown(m)
-}
+func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-var xxx_messageInfo_FlowSchemaCondition proto.InternalMessageInfo
+func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (m *FlowSchemaList) Reset() { *m = FlowSchemaList{} }
-func (*FlowSchemaList) ProtoMessage() {}
-func (*FlowSchemaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{4}
-}
-func (m *FlowSchemaList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaList.Merge(m, src)
-}
-func (m *FlowSchemaList) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaList) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaList.DiscardUnknown(m)
-}
+func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-var xxx_messageInfo_FlowSchemaList proto.InternalMessageInfo
+func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (m *FlowSchemaSpec) Reset() { *m = FlowSchemaSpec{} }
-func (*FlowSchemaSpec) ProtoMessage() {}
-func (*FlowSchemaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{5}
-}
-func (m *FlowSchemaSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaSpec.Merge(m, src)
-}
-func (m *FlowSchemaSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaSpec.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-var xxx_messageInfo_FlowSchemaSpec proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (m *FlowSchemaStatus) Reset() { *m = FlowSchemaStatus{} }
-func (*FlowSchemaStatus) ProtoMessage() {}
-func (*FlowSchemaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{6}
-}
-func (m *FlowSchemaStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FlowSchemaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FlowSchemaStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlowSchemaStatus.Merge(m, src)
-}
-func (m *FlowSchemaStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *FlowSchemaStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_FlowSchemaStatus.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-var xxx_messageInfo_FlowSchemaStatus proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (m *GroupSubject) Reset() { *m = GroupSubject{} }
-func (*GroupSubject) ProtoMessage() {}
-func (*GroupSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{7}
-}
-func (m *GroupSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupSubject.Merge(m, src)
-}
-func (m *GroupSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupSubject.DiscardUnknown(m)
-}
+func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-var xxx_messageInfo_GroupSubject proto.InternalMessageInfo
+func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (m *LimitResponse) Reset() { *m = LimitResponse{} }
-func (*LimitResponse) ProtoMessage() {}
-func (*LimitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{8}
-}
-func (m *LimitResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitResponse.Merge(m, src)
-}
-func (m *LimitResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitResponse.DiscardUnknown(m)
-}
+func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-var xxx_messageInfo_LimitResponse proto.InternalMessageInfo
+func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (m *LimitedPriorityLevelConfiguration) Reset() { *m = LimitedPriorityLevelConfiguration{} }
-func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
-func (*LimitedPriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{9}
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.Merge(m, src)
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *LimitedPriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_LimitedPriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LimitedPriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *NonResourcePolicyRule) Reset() { *m = NonResourcePolicyRule{} }
-func (*NonResourcePolicyRule) ProtoMessage() {}
-func (*NonResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{10}
-}
-func (m *NonResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NonResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NonResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NonResourcePolicyRule.Merge(m, src)
-}
-func (m *NonResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NonResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NonResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NonResourcePolicyRule proto.InternalMessageInfo
-
-func (m *PolicyRulesWithSubjects) Reset() { *m = PolicyRulesWithSubjects{} }
-func (*PolicyRulesWithSubjects) ProtoMessage() {}
-func (*PolicyRulesWithSubjects) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{11}
-}
-func (m *PolicyRulesWithSubjects) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRulesWithSubjects) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRulesWithSubjects) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRulesWithSubjects.Merge(m, src)
-}
-func (m *PolicyRulesWithSubjects) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRulesWithSubjects) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRulesWithSubjects.DiscardUnknown(m)
-}
+func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-var xxx_messageInfo_PolicyRulesWithSubjects proto.InternalMessageInfo
+func (m *Subject) Reset() { *m = Subject{} }
-func (m *PriorityLevelConfiguration) Reset() { *m = PriorityLevelConfiguration{} }
-func (*PriorityLevelConfiguration) ProtoMessage() {}
-func (*PriorityLevelConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{12}
-}
-func (m *PriorityLevelConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfiguration.Merge(m, src)
-}
-func (m *PriorityLevelConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfiguration proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationCondition) Reset() { *m = PriorityLevelConfigurationCondition{} }
-func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
-func (*PriorityLevelConfigurationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{13}
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationCondition.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationCondition proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationList) Reset() { *m = PriorityLevelConfigurationList{} }
-func (*PriorityLevelConfigurationList) ProtoMessage() {}
-func (*PriorityLevelConfigurationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{14}
-}
-func (m *PriorityLevelConfigurationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationList.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationList proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationReference) Reset() { *m = PriorityLevelConfigurationReference{} }
-func (*PriorityLevelConfigurationReference) ProtoMessage() {}
-func (*PriorityLevelConfigurationReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{15}
-}
-func (m *PriorityLevelConfigurationReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationReference.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationReference) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationReference) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationReference proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationSpec) Reset() { *m = PriorityLevelConfigurationSpec{} }
-func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
-func (*PriorityLevelConfigurationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{16}
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationSpec.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationSpec proto.InternalMessageInfo
-
-func (m *PriorityLevelConfigurationStatus) Reset() { *m = PriorityLevelConfigurationStatus{} }
-func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
-func (*PriorityLevelConfigurationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{17}
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityLevelConfigurationStatus.Merge(m, src)
-}
-func (m *PriorityLevelConfigurationStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityLevelConfigurationStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityLevelConfigurationStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityLevelConfigurationStatus proto.InternalMessageInfo
-
-func (m *QueuingConfiguration) Reset() { *m = QueuingConfiguration{} }
-func (*QueuingConfiguration) ProtoMessage() {}
-func (*QueuingConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{18}
-}
-func (m *QueuingConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *QueuingConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *QueuingConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueuingConfiguration.Merge(m, src)
-}
-func (m *QueuingConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *QueuingConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_QueuingConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueuingConfiguration proto.InternalMessageInfo
-
-func (m *ResourcePolicyRule) Reset() { *m = ResourcePolicyRule{} }
-func (*ResourcePolicyRule) ProtoMessage() {}
-func (*ResourcePolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{19}
-}
-func (m *ResourcePolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePolicyRule.Merge(m, src)
-}
-func (m *ResourcePolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePolicyRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourcePolicyRule proto.InternalMessageInfo
-
-func (m *ServiceAccountSubject) Reset() { *m = ServiceAccountSubject{} }
-func (*ServiceAccountSubject) ProtoMessage() {}
-func (*ServiceAccountSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{20}
-}
-func (m *ServiceAccountSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceAccountSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceAccountSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceAccountSubject.Merge(m, src)
-}
-func (m *ServiceAccountSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceAccountSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceAccountSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceAccountSubject proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{21}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func (m *UserSubject) Reset() { *m = UserSubject{} }
-func (*UserSubject) ProtoMessage() {}
-func (*UserSubject) Descriptor() ([]byte, []int) {
- return fileDescriptor_52ab6629c083d251, []int{22}
-}
-func (m *UserSubject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UserSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UserSubject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserSubject.Merge(m, src)
-}
-func (m *UserSubject) XXX_Size() int {
- return m.Size()
-}
-func (m *UserSubject) XXX_DiscardUnknown() {
- xxx_messageInfo_UserSubject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserSubject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ExemptPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration")
- proto.RegisterType((*FlowDistinguisherMethod)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowDistinguisherMethod")
- proto.RegisterType((*FlowSchema)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchema")
- proto.RegisterType((*FlowSchemaCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaCondition")
- proto.RegisterType((*FlowSchemaList)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaList")
- proto.RegisterType((*FlowSchemaSpec)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaSpec")
- proto.RegisterType((*FlowSchemaStatus)(nil), "k8s.io.api.flowcontrol.v1beta3.FlowSchemaStatus")
- proto.RegisterType((*GroupSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.GroupSubject")
- proto.RegisterType((*LimitResponse)(nil), "k8s.io.api.flowcontrol.v1beta3.LimitResponse")
- proto.RegisterType((*LimitedPriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration")
- proto.RegisterType((*NonResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta3.NonResourcePolicyRule")
- proto.RegisterType((*PolicyRulesWithSubjects)(nil), "k8s.io.api.flowcontrol.v1beta3.PolicyRulesWithSubjects")
- proto.RegisterType((*PriorityLevelConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfiguration")
- proto.RegisterType((*PriorityLevelConfigurationCondition)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition")
- proto.RegisterType((*PriorityLevelConfigurationList)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationList")
- proto.RegisterType((*PriorityLevelConfigurationReference)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference")
- proto.RegisterType((*PriorityLevelConfigurationSpec)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec")
- proto.RegisterType((*PriorityLevelConfigurationStatus)(nil), "k8s.io.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus")
- proto.RegisterType((*QueuingConfiguration)(nil), "k8s.io.api.flowcontrol.v1beta3.QueuingConfiguration")
- proto.RegisterType((*ResourcePolicyRule)(nil), "k8s.io.api.flowcontrol.v1beta3.ResourcePolicyRule")
- proto.RegisterType((*ServiceAccountSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.ServiceAccountSubject")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.flowcontrol.v1beta3.Subject")
- proto.RegisterType((*UserSubject)(nil), "k8s.io.api.flowcontrol.v1beta3.UserSubject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/flowcontrol/v1beta3/generated.proto", fileDescriptor_52ab6629c083d251)
-}
-
-var fileDescriptor_52ab6629c083d251 = []byte{
- // 1589 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcb, 0x6f, 0xdc, 0x54,
- 0x17, 0x8f, 0x27, 0x33, 0x49, 0xe6, 0xe4, 0xd9, 0x9b, 0x46, 0x99, 0x2f, 0xfd, 0x34, 0x93, 0xfa,
- 0x93, 0xbe, 0x02, 0x6d, 0x3d, 0x7d, 0xd2, 0x02, 0xe2, 0x51, 0xa7, 0xa5, 0x94, 0x26, 0x69, 0x7a,
- 0xd3, 0x42, 0x55, 0x2a, 0x51, 0xc7, 0x73, 0xe3, 0x71, 0x33, 0x7e, 0xd4, 0xd7, 0x4e, 0x08, 0xdd,
- 0x20, 0xfe, 0x02, 0xd6, 0xb0, 0x64, 0xc1, 0x8a, 0x0d, 0x5b, 0x16, 0x2c, 0xa9, 0x58, 0x75, 0xd9,
- 0xd5, 0x40, 0x87, 0x15, 0xff, 0x01, 0x54, 0x42, 0x42, 0xf7, 0xfa, 0xda, 0x1e, 0xcf, 0xcb, 0xa3,
- 0x54, 0xea, 0x8a, 0x5d, 0x7c, 0xee, 0x39, 0xbf, 0x73, 0xcf, 0xb9, 0xe7, 0xf1, 0x9b, 0x80, 0xb2,
- 0x73, 0x91, 0x2a, 0xa6, 0x53, 0xd5, 0x5c, 0xb3, 0xba, 0xdd, 0x70, 0xf6, 0x74, 0xc7, 0xf6, 0x3d,
- 0xa7, 0x51, 0xdd, 0x3d, 0xbd, 0x45, 0x7c, 0xed, 0x6c, 0xd5, 0x20, 0x36, 0xf1, 0x34, 0x9f, 0xd4,
- 0x14, 0xd7, 0x73, 0x7c, 0x07, 0x95, 0x43, 0x7d, 0x45, 0x73, 0x4d, 0xa5, 0x4d, 0x5f, 0x11, 0xfa,
- 0x4b, 0x27, 0x0d, 0xd3, 0xaf, 0x07, 0x5b, 0x8a, 0xee, 0x58, 0x55, 0xc3, 0x31, 0x9c, 0x2a, 0x37,
- 0xdb, 0x0a, 0xb6, 0xf9, 0x17, 0xff, 0xe0, 0x7f, 0x85, 0x70, 0x4b, 0xe7, 0x12, 0xf7, 0x96, 0xa6,
- 0xd7, 0x4d, 0x9b, 0x78, 0xfb, 0x55, 0x77, 0xc7, 0x60, 0x02, 0x5a, 0xb5, 0x88, 0xaf, 0x55, 0x77,
- 0x4f, 0x77, 0x5e, 0x62, 0xa9, 0xda, 0xcf, 0xca, 0x0b, 0x6c, 0xdf, 0xb4, 0x48, 0x97, 0xc1, 0xeb,
- 0x59, 0x06, 0x54, 0xaf, 0x13, 0x4b, 0xeb, 0xb4, 0x93, 0x7f, 0x94, 0x60, 0xf9, 0xca, 0x67, 0xc4,
- 0x72, 0xfd, 0x0d, 0xcf, 0x74, 0x3c, 0xd3, 0xdf, 0x5f, 0x25, 0xbb, 0xa4, 0xb1, 0xe2, 0xd8, 0xdb,
- 0xa6, 0x11, 0x78, 0x9a, 0x6f, 0x3a, 0x36, 0xba, 0x03, 0x25, 0xdb, 0xb1, 0x4c, 0x5b, 0x63, 0x72,
- 0x3d, 0xf0, 0x3c, 0x62, 0xeb, 0xfb, 0x9b, 0x75, 0xcd, 0x23, 0xb4, 0x24, 0x2d, 0x4b, 0xaf, 0x14,
- 0xd4, 0xff, 0xb6, 0x9a, 0x95, 0xd2, 0x7a, 0x1f, 0x1d, 0xdc, 0xd7, 0x1a, 0xbd, 0x0d, 0xb3, 0x0d,
- 0x62, 0xd7, 0xb4, 0xad, 0x06, 0xd9, 0x20, 0x9e, 0x4e, 0x6c, 0xbf, 0x94, 0xe3, 0x80, 0xf3, 0xad,
- 0x66, 0x65, 0x76, 0x35, 0x7d, 0x84, 0x3b, 0x75, 0xe5, 0xbb, 0xb0, 0xf8, 0x7e, 0xc3, 0xd9, 0xbb,
- 0x6c, 0x52, 0xdf, 0xb4, 0x8d, 0xc0, 0xa4, 0x75, 0xe2, 0xad, 0x11, 0xbf, 0xee, 0xd4, 0xd0, 0xbb,
- 0x90, 0xf7, 0xf7, 0x5d, 0xc2, 0xef, 0x57, 0x54, 0x8f, 0x3f, 0x6e, 0x56, 0x46, 0x5a, 0xcd, 0x4a,
- 0xfe, 0xd6, 0xbe, 0x4b, 0x9e, 0x37, 0x2b, 0x47, 0xfa, 0x98, 0xb1, 0x63, 0xcc, 0x0d, 0xe5, 0xaf,
- 0x73, 0x00, 0x4c, 0x6b, 0x93, 0x27, 0x0e, 0xdd, 0x87, 0x09, 0xf6, 0x58, 0x35, 0xcd, 0xd7, 0x38,
- 0xe6, 0xe4, 0x99, 0x53, 0x4a, 0x52, 0x29, 0x71, 0xce, 0x15, 0x77, 0xc7, 0x60, 0x02, 0xaa, 0x30,
- 0x6d, 0x65, 0xf7, 0xb4, 0x72, 0x63, 0xeb, 0x01, 0xd1, 0xfd, 0x35, 0xe2, 0x6b, 0x2a, 0x12, 0xb7,
- 0x80, 0x44, 0x86, 0x63, 0x54, 0xb4, 0x01, 0x79, 0xea, 0x12, 0x9d, 0x27, 0x60, 0xf2, 0x8c, 0xa2,
- 0x0c, 0xae, 0x43, 0x25, 0xb9, 0xdb, 0xa6, 0x4b, 0x74, 0x75, 0x2a, 0x8a, 0x90, 0x7d, 0x61, 0x8e,
- 0x84, 0xee, 0xc0, 0x18, 0xf5, 0x35, 0x3f, 0xa0, 0xa5, 0xd1, 0xae, 0x1b, 0x67, 0x61, 0x72, 0x3b,
- 0x75, 0x46, 0xa0, 0x8e, 0x85, 0xdf, 0x58, 0xe0, 0xc9, 0x4f, 0x73, 0x30, 0x9f, 0x28, 0xaf, 0x38,
- 0x76, 0xcd, 0xe4, 0x95, 0xf2, 0x56, 0x2a, 0xeb, 0xc7, 0x3a, 0xb2, 0xbe, 0xd8, 0xc3, 0x24, 0xc9,
- 0x38, 0x7a, 0x23, 0xbe, 0x6e, 0x8e, 0x9b, 0x1f, 0x4d, 0x3b, 0x7f, 0xde, 0xac, 0xcc, 0xc6, 0x66,
- 0xe9, 0xfb, 0xa0, 0x5d, 0x40, 0x0d, 0x8d, 0xfa, 0xb7, 0x3c, 0xcd, 0xa6, 0x21, 0xac, 0x69, 0x11,
- 0x11, 0xf5, 0x6b, 0xc3, 0xbd, 0x13, 0xb3, 0x50, 0x97, 0x84, 0x4b, 0xb4, 0xda, 0x85, 0x86, 0x7b,
- 0x78, 0x40, 0xff, 0x87, 0x31, 0x8f, 0x68, 0xd4, 0xb1, 0x4b, 0x79, 0x7e, 0xe5, 0x38, 0x5f, 0x98,
- 0x4b, 0xb1, 0x38, 0x45, 0xaf, 0xc2, 0xb8, 0x45, 0x28, 0xd5, 0x0c, 0x52, 0x2a, 0x70, 0xc5, 0x59,
- 0xa1, 0x38, 0xbe, 0x16, 0x8a, 0x71, 0x74, 0x2e, 0xff, 0x24, 0xc1, 0x4c, 0x92, 0xa7, 0x55, 0x93,
- 0xfa, 0xe8, 0x5e, 0x57, 0xed, 0x29, 0xc3, 0xc5, 0xc4, 0xac, 0x79, 0xe5, 0xcd, 0x09, 0x77, 0x13,
- 0x91, 0xa4, 0xad, 0xee, 0x6e, 0x40, 0xc1, 0xf4, 0x89, 0xc5, 0xb2, 0x3e, 0xda, 0x91, 0xae, 0x8c,
- 0x22, 0x51, 0xa7, 0x05, 0x6c, 0xe1, 0x1a, 0x03, 0xc0, 0x21, 0x8e, 0xfc, 0xc7, 0x68, 0x7b, 0x04,
- 0xac, 0x1e, 0xd1, 0x77, 0x12, 0x2c, 0xb9, 0x7d, 0x07, 0x8c, 0x08, 0x6a, 0x25, 0xcb, 0x73, 0xff,
- 0x11, 0x85, 0xc9, 0x36, 0x61, 0x73, 0x85, 0xa8, 0xb2, 0xb8, 0xd2, 0xd2, 0x00, 0xe5, 0x01, 0x57,
- 0x41, 0x1f, 0x02, 0xb2, 0x34, 0x9f, 0x65, 0xd4, 0xd8, 0xf0, 0x88, 0x4e, 0x6a, 0x0c, 0x55, 0x0c,
- 0xa5, 0xb8, 0x3a, 0xd6, 0xba, 0x34, 0x70, 0x0f, 0x2b, 0xf4, 0xa5, 0x04, 0xf3, 0xb5, 0xee, 0x21,
- 0x23, 0xea, 0xf2, 0xc2, 0x30, 0x89, 0xee, 0x31, 0xa3, 0xd4, 0xc5, 0x56, 0xb3, 0x32, 0xdf, 0xe3,
- 0x00, 0xf7, 0x72, 0x86, 0xee, 0x41, 0xc1, 0x0b, 0x1a, 0x84, 0x96, 0xf2, 0xfc, 0x79, 0x33, 0xbd,
- 0x6e, 0x38, 0x0d, 0x53, 0xdf, 0xc7, 0xcc, 0xe4, 0x63, 0xd3, 0xaf, 0x6f, 0x06, 0x7c, 0x56, 0xd1,
- 0xe4, 0xad, 0xf9, 0x11, 0x0e, 0x41, 0xe5, 0x47, 0x30, 0xd7, 0x39, 0x34, 0x90, 0x01, 0xa0, 0x47,
- 0x7d, 0xca, 0x16, 0x04, 0x73, 0x7b, 0x76, 0xf8, 0xaa, 0x8a, 0x7b, 0x3c, 0x99, 0x97, 0xb1, 0x88,
- 0xe2, 0x36, 0x68, 0xf9, 0x14, 0x4c, 0x5d, 0xf5, 0x9c, 0xc0, 0x15, 0x77, 0x44, 0xcb, 0x90, 0xb7,
- 0x35, 0x2b, 0x9a, 0x3e, 0xf1, 0x44, 0x5c, 0xd7, 0x2c, 0x82, 0xf9, 0x89, 0xfc, 0xad, 0x04, 0xd3,
- 0xab, 0xa6, 0x65, 0xfa, 0x98, 0x50, 0xd7, 0xb1, 0x29, 0x41, 0xe7, 0x53, 0x13, 0xeb, 0x68, 0xc7,
- 0xc4, 0x3a, 0x94, 0x52, 0x6e, 0x9b, 0x55, 0x9f, 0xc0, 0xf8, 0xc3, 0x80, 0x04, 0xa6, 0x6d, 0x88,
- 0x79, 0x7d, 0x2e, 0x2b, 0xc0, 0x9b, 0xa1, 0x7a, 0xaa, 0xda, 0xd4, 0x49, 0x36, 0x02, 0xc4, 0x09,
- 0x8e, 0x10, 0xe5, 0xbf, 0x73, 0x70, 0x94, 0x3b, 0x26, 0xb5, 0x01, 0x5b, 0xf9, 0x5e, 0xe6, 0x56,
- 0x5e, 0x16, 0xd1, 0x1c, 0x64, 0x33, 0x3f, 0x80, 0xe9, 0x46, 0x7b, 0xec, 0x22, 0xcc, 0x93, 0x59,
- 0x61, 0xa6, 0x12, 0xa6, 0x2e, 0x88, 0x1b, 0xa4, 0x93, 0x8e, 0xd3, 0xd0, 0xbd, 0x58, 0xc0, 0xe8,
- 0xf0, 0x2c, 0x00, 0xdd, 0x80, 0x85, 0x2d, 0xc7, 0xf3, 0x9c, 0x3d, 0xd3, 0x36, 0xb8, 0x9f, 0x08,
- 0x24, 0xcf, 0x41, 0xfe, 0xd3, 0x6a, 0x56, 0x16, 0xd4, 0x5e, 0x0a, 0xb8, 0xb7, 0x9d, 0xbc, 0x07,
- 0x0b, 0xeb, 0x6c, 0xa6, 0x50, 0x27, 0xf0, 0x74, 0x92, 0x34, 0x04, 0xaa, 0x40, 0x61, 0x97, 0x78,
- 0x5b, 0x61, 0x51, 0x17, 0xd5, 0x22, 0x6b, 0x87, 0x8f, 0x98, 0x00, 0x87, 0x72, 0x16, 0x89, 0x9d,
- 0x58, 0xde, 0xc6, 0xab, 0xb4, 0x34, 0xc6, 0x55, 0x79, 0x24, 0xeb, 0xe9, 0x23, 0xdc, 0xa9, 0x2b,
- 0x37, 0x73, 0xb0, 0xd8, 0xa7, 0xff, 0xd0, 0x6d, 0x98, 0xa0, 0xe2, 0x6f, 0xd1, 0x53, 0xc7, 0xb2,
- 0xde, 0x42, 0xd8, 0x26, 0xd3, 0x3f, 0x02, 0xc3, 0x31, 0x14, 0x72, 0x60, 0xda, 0x13, 0x57, 0xe0,
- 0x3e, 0xc5, 0x16, 0x38, 0x93, 0x85, 0xdd, 0x9d, 0x9d, 0xe4, 0xb1, 0x71, 0x3b, 0x20, 0x4e, 0xe3,
- 0xa3, 0x47, 0x30, 0xd7, 0x16, 0x76, 0xe8, 0x73, 0x94, 0xfb, 0x3c, 0x9f, 0xe5, 0xb3, 0xe7, 0xa3,
- 0xa8, 0x25, 0xe1, 0x76, 0x6e, 0xbd, 0x03, 0x16, 0x77, 0x39, 0x92, 0x7f, 0xc9, 0xc1, 0x80, 0xc5,
- 0xf0, 0x12, 0x48, 0xde, 0xfd, 0x14, 0xc9, 0x7b, 0xe7, 0xe0, 0x1b, 0xaf, 0x2f, 0xe9, 0xab, 0x77,
- 0x90, 0xbe, 0xf7, 0x5e, 0xc0, 0xc7, 0x60, 0x12, 0xf8, 0x67, 0x0e, 0xfe, 0xd7, 0xdf, 0x38, 0x21,
- 0x85, 0xd7, 0x53, 0x23, 0xf6, 0x42, 0xc7, 0x88, 0x3d, 0x36, 0x04, 0xc4, 0xbf, 0x24, 0xb1, 0x83,
- 0x24, 0xfe, 0x2a, 0x41, 0xb9, 0x7f, 0xde, 0x5e, 0x02, 0x69, 0xfc, 0x34, 0x4d, 0x1a, 0xdf, 0x3c,
- 0x78, 0x91, 0xf5, 0x21, 0x91, 0x57, 0x07, 0xd5, 0x56, 0x4c, 0xf7, 0x86, 0x58, 0xf9, 0xdf, 0xe7,
- 0x06, 0xa5, 0x8a, 0xb3, 0xd3, 0x8c, 0x5f, 0x2d, 0x29, 0xeb, 0x2b, 0x36, 0x5b, 0x3d, 0x16, 0xdb,
- 0x1e, 0x61, 0x41, 0xd6, 0x61, 0xbc, 0x11, 0xee, 0x6a, 0xd1, 0xd4, 0x97, 0x86, 0x5a, 0x91, 0x83,
- 0x56, 0x7b, 0x48, 0x0b, 0x84, 0x1a, 0x8e, 0xe0, 0x51, 0x0d, 0xc6, 0x08, 0xff, 0xa9, 0x3e, 0x6c,
- 0x67, 0x67, 0xfd, 0xb0, 0x57, 0x81, 0x55, 0x61, 0xa8, 0x85, 0x05, 0xb6, 0xfc, 0x8d, 0x04, 0xcb,
- 0x59, 0x23, 0x01, 0xed, 0xf5, 0xa0, 0x78, 0x2f, 0x40, 0xdf, 0x87, 0xa7, 0x7c, 0x3f, 0x48, 0x70,
- 0xb8, 0x17, 0x93, 0x62, 0x4d, 0xc6, 0xe8, 0x53, 0xcc, 0x7d, 0xe2, 0x26, 0xbb, 0xc9, 0xa5, 0x58,
- 0x9c, 0xa2, 0x13, 0x30, 0x51, 0xd7, 0xec, 0xda, 0xa6, 0xf9, 0x79, 0xc4, 0xea, 0xe3, 0x32, 0xff,
- 0x40, 0xc8, 0x71, 0xac, 0x81, 0x2e, 0xc3, 0x1c, 0xb7, 0x5b, 0x25, 0xb6, 0xe1, 0xd7, 0xf9, 0x8b,
- 0x08, 0x6a, 0x12, 0x6f, 0x9d, 0x9b, 0x1d, 0xe7, 0xb8, 0xcb, 0x42, 0xfe, 0x4b, 0x02, 0x74, 0x10,
- 0x36, 0x71, 0x1c, 0x8a, 0x9a, 0x6b, 0x72, 0x8a, 0x1b, 0x36, 0x5a, 0x51, 0x9d, 0x6e, 0x35, 0x2b,
- 0xc5, 0x4b, 0x1b, 0xd7, 0x42, 0x21, 0x4e, 0xce, 0x99, 0x72, 0xb4, 0x68, 0xc3, 0x85, 0x2a, 0x94,
- 0x23, 0xc7, 0x14, 0x27, 0xe7, 0xe8, 0x22, 0x4c, 0xe9, 0x8d, 0x80, 0xfa, 0xc4, 0xdb, 0xd4, 0x1d,
- 0x97, 0xf0, 0xc1, 0x34, 0xa1, 0x1e, 0x16, 0x31, 0x4d, 0xad, 0xb4, 0x9d, 0xe1, 0x94, 0x26, 0x52,
- 0x00, 0x58, 0x5b, 0x51, 0x57, 0x63, 0x7e, 0x0a, 0xdc, 0xcf, 0x0c, 0x7b, 0xb0, 0xf5, 0x58, 0x8a,
- 0xdb, 0x34, 0xe4, 0x07, 0xb0, 0xb0, 0x49, 0xbc, 0x5d, 0x53, 0x27, 0x97, 0x74, 0xdd, 0x09, 0x6c,
- 0x3f, 0x22, 0xeb, 0x55, 0x28, 0xc6, 0x6a, 0xa2, 0xf3, 0x0e, 0x09, 0xff, 0xc5, 0x18, 0x0b, 0x27,
- 0x3a, 0x71, 0xab, 0xe7, 0xfa, 0xb6, 0xfa, 0xcf, 0x39, 0x18, 0x4f, 0xe0, 0xf3, 0x3b, 0xa6, 0x5d,
- 0x13, 0xc8, 0x47, 0x22, 0xed, 0xeb, 0xa6, 0x5d, 0x7b, 0xde, 0xac, 0x4c, 0x0a, 0x35, 0xf6, 0x89,
- 0xb9, 0x22, 0xba, 0x06, 0xf9, 0x80, 0x12, 0x4f, 0x34, 0xf1, 0xf1, 0xac, 0x62, 0xbe, 0x4d, 0x89,
- 0x17, 0xf1, 0xab, 0x09, 0x86, 0xcc, 0x04, 0x98, 0x43, 0xa0, 0x35, 0x28, 0x18, 0xec, 0x51, 0x44,
- 0x9f, 0x9e, 0xc8, 0xc2, 0x6a, 0xff, 0x11, 0x13, 0x96, 0x01, 0x97, 0xe0, 0x10, 0x05, 0x3d, 0x84,
- 0x19, 0x9a, 0x4a, 0x21, 0x7f, 0xae, 0x21, 0xf8, 0x52, 0xcf, 0xc4, 0xab, 0xa8, 0xd5, 0xac, 0xcc,
- 0xa4, 0x8f, 0x70, 0x87, 0x03, 0xb9, 0x0a, 0x93, 0x6d, 0x01, 0x66, 0x4f, 0x59, 0xf5, 0xf2, 0xe3,
- 0x67, 0xe5, 0x91, 0x27, 0xcf, 0xca, 0x23, 0x4f, 0x9f, 0x95, 0x47, 0xbe, 0x68, 0x95, 0xa5, 0xc7,
- 0xad, 0xb2, 0xf4, 0xa4, 0x55, 0x96, 0x9e, 0xb6, 0xca, 0xd2, 0x6f, 0xad, 0xb2, 0xf4, 0xd5, 0xef,
- 0xe5, 0x91, 0xbb, 0xe5, 0xc1, 0xff, 0x8b, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x03, 0x5d, 0xec,
- 0x01, 0xac, 0x15, 0x00, 0x00,
-}
+func (m *UserSubject) Reset() { *m = UserSubject{} }
func (m *ExemptPriorityLevelConfiguration) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.protomessage.pb.go
new file mode 100644
index 00000000..20407c04
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/generated.protomessage.pb.go
@@ -0,0 +1,68 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta3
+
+func (*ExemptPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*FlowDistinguisherMethod) ProtoMessage() {}
+
+func (*FlowSchema) ProtoMessage() {}
+
+func (*FlowSchemaCondition) ProtoMessage() {}
+
+func (*FlowSchemaList) ProtoMessage() {}
+
+func (*FlowSchemaSpec) ProtoMessage() {}
+
+func (*FlowSchemaStatus) ProtoMessage() {}
+
+func (*GroupSubject) ProtoMessage() {}
+
+func (*LimitResponse) ProtoMessage() {}
+
+func (*LimitedPriorityLevelConfiguration) ProtoMessage() {}
+
+func (*NonResourcePolicyRule) ProtoMessage() {}
+
+func (*PolicyRulesWithSubjects) ProtoMessage() {}
+
+func (*PriorityLevelConfiguration) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationCondition) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationList) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationReference) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationSpec) ProtoMessage() {}
+
+func (*PriorityLevelConfigurationStatus) ProtoMessage() {}
+
+func (*QueuingConfiguration) ProtoMessage() {}
+
+func (*ResourcePolicyRule) ProtoMessage() {}
+
+func (*ServiceAccountSubject) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
+
+func (*UserSubject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.model_name.go b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.model_name.go
new file mode 100644
index 00000000..0aae0627
--- /dev/null
+++ b/operator/vendor/k8s.io/api/flowcontrol/v1beta3/zz_generated.model_name.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta3
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExemptPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowDistinguisherMethod) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchema) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.FlowSchema"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.FlowSchemaList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FlowSchemaStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.GroupSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitResponse) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.LimitResponse"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LimitedPriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NonResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRulesWithSubjects) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationCondition) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationList) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationReference) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationSpec) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityLevelConfigurationStatus) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QueuingConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceAccountSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.Subject"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UserSubject) OpenAPIModelName() string {
+ return "io.k8s.api.flowcontrol.v1beta3.UserSubject"
+}
diff --git a/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
index f5fbbdbf..7e7a980c 100644
--- a/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.api.imagepolicy.v1alpha1
// +groupName=imagepolicy.k8s.io
diff --git a/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
index 57732a51..c8e53c7f 100644
--- a/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
@@ -23,193 +23,20 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
-
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *ImageReview) Reset() { *m = ImageReview{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} }
-func (m *ImageReview) Reset() { *m = ImageReview{} }
-func (*ImageReview) ProtoMessage() {}
-func (*ImageReview) Descriptor() ([]byte, []int) {
- return fileDescriptor_7620d1538838ac6f, []int{0}
-}
-func (m *ImageReview) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ImageReview) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ImageReview.Merge(m, src)
-}
-func (m *ImageReview) XXX_Size() int {
- return m.Size()
-}
-func (m *ImageReview) XXX_DiscardUnknown() {
- xxx_messageInfo_ImageReview.DiscardUnknown(m)
-}
+func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} }
-var xxx_messageInfo_ImageReview proto.InternalMessageInfo
-
-func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} }
-func (*ImageReviewContainerSpec) ProtoMessage() {}
-func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7620d1538838ac6f, []int{1}
-}
-func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src)
-}
-func (m *ImageReviewContainerSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo
-
-func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} }
-func (*ImageReviewSpec) ProtoMessage() {}
-func (*ImageReviewSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_7620d1538838ac6f, []int{2}
-}
-func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ImageReviewSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ImageReviewSpec.Merge(m, src)
-}
-func (m *ImageReviewSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ImageReviewSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo
-
-func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} }
-func (*ImageReviewStatus) ProtoMessage() {}
-func (*ImageReviewStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_7620d1538838ac6f, []int{3}
-}
-func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ImageReviewStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ImageReviewStatus.Merge(m, src)
-}
-func (m *ImageReviewStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ImageReviewStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview")
- proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec")
- proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry")
- proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_7620d1538838ac6f)
-}
-
-var fileDescriptor_7620d1538838ac6f = []byte{
- // 593 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4f, 0x6f, 0xd3, 0x30,
- 0x18, 0xc6, 0x9b, 0x74, 0xff, 0xea, 0x02, 0xeb, 0x0c, 0x48, 0x51, 0x0f, 0xe9, 0x54, 0x24, 0x34,
- 0x0e, 0xd8, 0xb4, 0x42, 0x68, 0x70, 0x00, 0x35, 0xd3, 0x24, 0x38, 0x00, 0x92, 0xb9, 0xed, 0x84,
- 0x9b, 0x9a, 0xd4, 0xb4, 0x89, 0xa3, 0xd8, 0xe9, 0xe8, 0x8d, 0x4f, 0x80, 0xf8, 0x06, 0x7c, 0x11,
- 0x3e, 0x40, 0x8f, 0x3b, 0xee, 0x34, 0xd1, 0x70, 0xe4, 0x4b, 0xa0, 0x38, 0x69, 0x13, 0xda, 0xa1,
- 0xa9, 0xb7, 0xbc, 0xef, 0xeb, 0xe7, 0xf7, 0x3e, 0x79, 0x62, 0x05, 0xe0, 0xd1, 0xb1, 0x44, 0x5c,
- 0x60, 0x1a, 0x72, 0xcc, 0x7d, 0xea, 0xb1, 0x50, 0x8c, 0xb9, 0x3b, 0xc5, 0x93, 0x0e, 0x1d, 0x87,
- 0x43, 0xda, 0xc1, 0x1e, 0x0b, 0x58, 0x44, 0x15, 0x1b, 0xa0, 0x30, 0x12, 0x4a, 0xc0, 0x56, 0x26,
- 0x40, 0x34, 0xe4, 0xa8, 0x24, 0x40, 0x0b, 0x41, 0xf3, 0xb1, 0xc7, 0xd5, 0x30, 0xee, 0x23, 0x57,
- 0xf8, 0xd8, 0x13, 0x9e, 0xc0, 0x5a, 0xd7, 0x8f, 0x3f, 0xe9, 0x4a, 0x17, 0xfa, 0x29, 0xe3, 0x35,
- 0x9f, 0x16, 0x06, 0x7c, 0xea, 0x0e, 0x79, 0xc0, 0xa2, 0x29, 0x0e, 0x47, 0x5e, 0xda, 0x90, 0xd8,
- 0x67, 0x8a, 0xe2, 0xc9, 0x9a, 0x8b, 0x26, 0xfe, 0x9f, 0x2a, 0x8a, 0x03, 0xc5, 0x7d, 0xb6, 0x26,
- 0x78, 0x76, 0x93, 0x40, 0xba, 0x43, 0xe6, 0xd3, 0x55, 0x5d, 0xfb, 0x87, 0x09, 0xea, 0x6f, 0xd2,
- 0xd7, 0x24, 0x6c, 0xc2, 0xd9, 0x39, 0xfc, 0x08, 0xf6, 0x52, 0x4f, 0x03, 0xaa, 0xa8, 0x65, 0x1c,
- 0x1a, 0x47, 0xf5, 0xee, 0x13, 0x54, 0x24, 0xb2, 0x44, 0xa3, 0x70, 0xe4, 0xa5, 0x0d, 0x89, 0xd2,
- 0xd3, 0x68, 0xd2, 0x41, 0xef, 0xfb, 0x9f, 0x99, 0xab, 0xde, 0x32, 0x45, 0x1d, 0x38, 0xbb, 0x6a,
- 0x55, 0x92, 0xab, 0x16, 0x28, 0x7a, 0x64, 0x49, 0x85, 0x04, 0x6c, 0xc9, 0x90, 0xb9, 0x96, 0xb9,
- 0x46, 0xbf, 0x36, 0x6f, 0x54, 0x72, 0xf7, 0x21, 0x64, 0xae, 0x73, 0x2b, 0xa7, 0x6f, 0xa5, 0x15,
- 0xd1, 0x2c, 0x78, 0x06, 0x76, 0xa4, 0xa2, 0x2a, 0x96, 0x56, 0x55, 0x53, 0xbb, 0x1b, 0x51, 0xb5,
- 0xd2, 0xb9, 0x93, 0x73, 0x77, 0xb2, 0x9a, 0xe4, 0xc4, 0xf6, 0x2b, 0x60, 0x95, 0x0e, 0x9f, 0x88,
- 0x40, 0xd1, 0x34, 0x82, 0x74, 0x3b, 0x7c, 0x00, 0xb6, 0x35, 0x5d, 0x47, 0x55, 0x73, 0x6e, 0xe7,
- 0x88, 0xed, 0x4c, 0x90, 0xcd, 0xda, 0x7f, 0x4c, 0xb0, 0xbf, 0xf2, 0x12, 0xd0, 0x07, 0xc0, 0x5d,
- 0x90, 0xa4, 0x65, 0x1c, 0x56, 0x8f, 0xea, 0xdd, 0xe7, 0x9b, 0x98, 0xfe, 0xc7, 0x47, 0x91, 0xf8,
- 0xb2, 0x2d, 0x49, 0x69, 0x01, 0xfc, 0x02, 0xea, 0x34, 0x08, 0x84, 0xa2, 0x8a, 0x8b, 0x40, 0x5a,
- 0xa6, 0xde, 0xd7, 0xdb, 0x34, 0x7a, 0xd4, 0x2b, 0x18, 0xa7, 0x81, 0x8a, 0xa6, 0xce, 0xdd, 0x7c,
- 0x6f, 0xbd, 0x34, 0x21, 0xe5, 0x55, 0x10, 0x83, 0x5a, 0x40, 0x7d, 0x26, 0x43, 0xea, 0x32, 0xfd,
- 0x71, 0x6a, 0xce, 0x41, 0x2e, 0xaa, 0xbd, 0x5b, 0x0c, 0x48, 0x71, 0xa6, 0xf9, 0x12, 0x34, 0x56,
- 0xd7, 0xc0, 0x06, 0xa8, 0x8e, 0xd8, 0x34, 0x0b, 0x99, 0xa4, 0x8f, 0xf0, 0x1e, 0xd8, 0x9e, 0xd0,
- 0x71, 0xcc, 0xf4, 0x2d, 0xaa, 0x91, 0xac, 0x78, 0x61, 0x1e, 0x1b, 0xed, 0x9f, 0x26, 0x38, 0x58,
- 0xfb, 0xb8, 0xf0, 0x11, 0xd8, 0xa5, 0xe3, 0xb1, 0x38, 0x67, 0x03, 0x4d, 0xd9, 0x73, 0xf6, 0x73,
- 0x13, 0xbb, 0xbd, 0xac, 0x4d, 0x16, 0x73, 0xf8, 0x10, 0xec, 0x44, 0x8c, 0x4a, 0x11, 0x64, 0xec,
- 0xe2, 0x5e, 0x10, 0xdd, 0x25, 0xf9, 0x14, 0x7e, 0x33, 0x40, 0x83, 0xc6, 0x03, 0xae, 0x4a, 0x76,
- 0xad, 0xaa, 0x4e, 0xf6, 0xf5, 0xe6, 0xd7, 0x0f, 0xf5, 0x56, 0x50, 0x59, 0xc0, 0x56, 0xbe, 0xbc,
- 0xb1, 0x3a, 0x26, 0x6b, 0xbb, 0x9b, 0x27, 0xe0, 0xfe, 0xb5, 0x90, 0x4d, 0xe2, 0x73, 0x4e, 0x67,
- 0x73, 0xbb, 0x72, 0x31, 0xb7, 0x2b, 0x97, 0x73, 0xbb, 0xf2, 0x35, 0xb1, 0x8d, 0x59, 0x62, 0x1b,
- 0x17, 0x89, 0x6d, 0x5c, 0x26, 0xb6, 0xf1, 0x2b, 0xb1, 0x8d, 0xef, 0xbf, 0xed, 0xca, 0x59, 0xeb,
- 0x86, 0xbf, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0x86, 0x92, 0x15, 0x77, 0x05, 0x00,
- 0x00,
-}
+func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} }
func (m *ImageReview) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -322,7 +149,7 @@ func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Annotations {
keysForAnnotations = append(keysForAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ sort.Strings(keysForAnnotations)
for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.Annotations[string(keysForAnnotations[iNdEx])]
baseI := i
@@ -383,7 +210,7 @@ func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.AuditAnnotations {
keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ sort.Strings(keysForAuditAnnotations)
for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
baseI := i
@@ -541,7 +368,7 @@ func (this *ImageReviewSpec) String() string {
for k := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ sort.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
@@ -563,7 +390,7 @@ func (this *ImageReviewStatus) String() string {
for k := range this.AuditAnnotations {
keysForAuditAnnotations = append(keysForAuditAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+ sort.Strings(keysForAuditAnnotations)
mapStringForAuditAnnotations := "map[string]string{"
for _, k := range keysForAuditAnnotations {
mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
diff --git a/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..215f52a3
--- /dev/null
+++ b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,30 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*ImageReview) ProtoMessage() {}
+
+func (*ImageReviewContainerSpec) ProtoMessage() {}
+
+func (*ImageReviewSpec) ProtoMessage() {}
+
+func (*ImageReviewStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..e21c127a
--- /dev/null
+++ b/operator/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,42 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ImageReview) OpenAPIModelName() string {
+ return "io.k8s.api.imagepolicy.v1alpha1.ImageReview"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ImageReviewContainerSpec) OpenAPIModelName() string {
+ return "io.k8s.api.imagepolicy.v1alpha1.ImageReviewContainerSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ImageReviewSpec) OpenAPIModelName() string {
+ return "io.k8s.api.imagepolicy.v1alpha1.ImageReviewSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ImageReviewStatus) OpenAPIModelName() string {
+ return "io.k8s.api.imagepolicy.v1alpha1.ImageReviewStatus"
+}
diff --git a/operator/vendor/k8s.io/api/networking/v1/doc.go b/operator/vendor/k8s.io/api/networking/v1/doc.go
index e2093b7d..42da00e2 100644
--- a/operator/vendor/k8s.io/api/networking/v1/doc.go
+++ b/operator/vendor/k8s.io/api/networking/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.networking.v1
+
// +groupName=networking.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/networking/v1/generated.pb.go b/operator/vendor/k8s.io/api/networking/v1/generated.pb.go
index 062382b6..ee5429f1 100644
--- a/operator/vendor/k8s.io/api/networking/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/networking/v1/generated.pb.go
@@ -24,12 +24,10 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -37,1160 +35,75 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
-func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
-func (*HTTPIngressPath) ProtoMessage() {}
-func (*HTTPIngressPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{0}
-}
-func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPIngressPath) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPIngressPath.Merge(m, src)
-}
-func (m *HTTPIngressPath) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPIngressPath) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo
-
-func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
-func (*HTTPIngressRuleValue) ProtoMessage() {}
-func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{1}
-}
-func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src)
-}
-func (m *HTTPIngressRuleValue) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
-
-func (m *IPAddress) Reset() { *m = IPAddress{} }
-func (*IPAddress) ProtoMessage() {}
-func (*IPAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{2}
-}
-func (m *IPAddress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddress.Merge(m, src)
-}
-func (m *IPAddress) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddress) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddress proto.InternalMessageInfo
-
-func (m *IPAddressList) Reset() { *m = IPAddressList{} }
-func (*IPAddressList) ProtoMessage() {}
-func (*IPAddressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{3}
-}
-func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddressList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddressList.Merge(m, src)
-}
-func (m *IPAddressList) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddressList) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddressList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
-
-func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} }
-func (*IPAddressSpec) ProtoMessage() {}
-func (*IPAddressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{4}
-}
-func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddressSpec.Merge(m, src)
-}
-func (m *IPAddressSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddressSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
-
-func (m *IPBlock) Reset() { *m = IPBlock{} }
-func (*IPBlock) ProtoMessage() {}
-func (*IPBlock) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{5}
-}
-func (m *IPBlock) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPBlock) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPBlock.Merge(m, src)
-}
-func (m *IPBlock) XXX_Size() int {
- return m.Size()
-}
-func (m *IPBlock) XXX_DiscardUnknown() {
- xxx_messageInfo_IPBlock.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPBlock proto.InternalMessageInfo
-
-func (m *Ingress) Reset() { *m = Ingress{} }
-func (*Ingress) ProtoMessage() {}
-func (*Ingress) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{6}
-}
-func (m *Ingress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Ingress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Ingress.Merge(m, src)
-}
-func (m *Ingress) XXX_Size() int {
- return m.Size()
-}
-func (m *Ingress) XXX_DiscardUnknown() {
- xxx_messageInfo_Ingress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Ingress proto.InternalMessageInfo
-
-func (m *IngressBackend) Reset() { *m = IngressBackend{} }
-func (*IngressBackend) ProtoMessage() {}
-func (*IngressBackend) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{7}
-}
-func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressBackend) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressBackend.Merge(m, src)
-}
-func (m *IngressBackend) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressBackend) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressBackend.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
-
-func (m *IngressClass) Reset() { *m = IngressClass{} }
-func (*IngressClass) ProtoMessage() {}
-func (*IngressClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{8}
-}
-func (m *IngressClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClass.Merge(m, src)
-}
-func (m *IngressClass) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClass) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClass proto.InternalMessageInfo
-
-func (m *IngressClassList) Reset() { *m = IngressClassList{} }
-func (*IngressClassList) ProtoMessage() {}
-func (*IngressClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{9}
-}
-func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClassList.Merge(m, src)
-}
-func (m *IngressClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
-
-func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} }
-func (*IngressClassParametersReference) ProtoMessage() {}
-func (*IngressClassParametersReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{10}
-}
-func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClassParametersReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClassParametersReference.Merge(m, src)
-}
-func (m *IngressClassParametersReference) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClassParametersReference) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClassParametersReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo
-
-func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} }
-func (*IngressClassSpec) ProtoMessage() {}
-func (*IngressClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{11}
-}
-func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClassSpec.Merge(m, src)
-}
-func (m *IngressClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClassSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
-
-func (m *IngressList) Reset() { *m = IngressList{} }
-func (*IngressList) ProtoMessage() {}
-func (*IngressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{12}
-}
-func (m *IngressList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressList.Merge(m, src)
-}
-func (m *IngressList) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressList) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressList proto.InternalMessageInfo
-
-func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
-func (*IngressLoadBalancerIngress) ProtoMessage() {}
-func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{13}
-}
-func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src)
-}
-func (m *IngressLoadBalancerIngress) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
-
-func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
-func (*IngressLoadBalancerStatus) ProtoMessage() {}
-func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{14}
-}
-func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src)
-}
-func (m *IngressLoadBalancerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
-
-func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
-func (*IngressPortStatus) ProtoMessage() {}
-func (*IngressPortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{15}
-}
-func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressPortStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressPortStatus.Merge(m, src)
-}
-func (m *IngressPortStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressPortStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressPortStatus.DiscardUnknown(m)
-}
+func (m *IPAddress) Reset() { *m = IPAddress{} }
-var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
+func (m *IPAddressList) Reset() { *m = IPAddressList{} }
-func (m *IngressRule) Reset() { *m = IngressRule{} }
-func (*IngressRule) ProtoMessage() {}
-func (*IngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{16}
-}
-func (m *IngressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressRule.Merge(m, src)
-}
-func (m *IngressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressRule.DiscardUnknown(m)
-}
+func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} }
-var xxx_messageInfo_IngressRule proto.InternalMessageInfo
+func (m *IPBlock) Reset() { *m = IPBlock{} }
-func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
-func (*IngressRuleValue) ProtoMessage() {}
-func (*IngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{17}
-}
-func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressRuleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressRuleValue.Merge(m, src)
-}
-func (m *IngressRuleValue) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressRuleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressRuleValue.DiscardUnknown(m)
-}
+func (m *Ingress) Reset() { *m = Ingress{} }
-var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
+func (m *IngressBackend) Reset() { *m = IngressBackend{} }
-func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} }
-func (*IngressServiceBackend) ProtoMessage() {}
-func (*IngressServiceBackend) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{18}
-}
-func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressServiceBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressServiceBackend) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressServiceBackend.Merge(m, src)
-}
-func (m *IngressServiceBackend) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressServiceBackend) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressServiceBackend.DiscardUnknown(m)
-}
+func (m *IngressClass) Reset() { *m = IngressClass{} }
-var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo
+func (m *IngressClassList) Reset() { *m = IngressClassList{} }
-func (m *IngressSpec) Reset() { *m = IngressSpec{} }
-func (*IngressSpec) ProtoMessage() {}
-func (*IngressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{19}
-}
-func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressSpec.Merge(m, src)
-}
-func (m *IngressSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressSpec.DiscardUnknown(m)
-}
+func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} }
-var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
+func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} }
-func (m *IngressStatus) Reset() { *m = IngressStatus{} }
-func (*IngressStatus) ProtoMessage() {}
-func (*IngressStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{20}
-}
-func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressStatus.Merge(m, src)
-}
-func (m *IngressStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
-
-func (m *IngressTLS) Reset() { *m = IngressTLS{} }
-func (*IngressTLS) ProtoMessage() {}
-func (*IngressTLS) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{21}
-}
-func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressTLS) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressTLS.Merge(m, src)
-}
-func (m *IngressTLS) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressTLS) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressTLS.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
+func (m *IngressList) Reset() { *m = IngressList{} }
-func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
-func (*NetworkPolicy) ProtoMessage() {}
-func (*NetworkPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{22}
-}
-func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicy.Merge(m, src)
-}
-func (m *NetworkPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicy.DiscardUnknown(m)
-}
+func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
-var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
-
-func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
-func (*NetworkPolicyEgressRule) ProtoMessage() {}
-func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{23}
-}
-func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src)
-}
-func (m *NetworkPolicyEgressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo
-
-func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
-func (*NetworkPolicyIngressRule) ProtoMessage() {}
-func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{24}
-}
-func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src)
-}
-func (m *NetworkPolicyIngressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m)
-}
+func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
-var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo
+func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
-func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
-func (*NetworkPolicyList) ProtoMessage() {}
-func (*NetworkPolicyList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{25}
-}
-func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyList.Merge(m, src)
-}
-func (m *NetworkPolicyList) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyList) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m)
-}
+func (m *IngressRule) Reset() { *m = IngressRule{} }
-var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo
+func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
-func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
-func (*NetworkPolicyPeer) ProtoMessage() {}
-func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{26}
-}
-func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyPeer.Merge(m, src)
-}
-func (m *NetworkPolicyPeer) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyPeer) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m)
-}
+func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} }
-var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo
+func (m *IngressSpec) Reset() { *m = IngressSpec{} }
-func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
-func (*NetworkPolicyPort) ProtoMessage() {}
-func (*NetworkPolicyPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{27}
-}
-func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicyPort.Merge(m, src)
-}
-func (m *NetworkPolicyPort) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicyPort) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m)
-}
+func (m *IngressStatus) Reset() { *m = IngressStatus{} }
-var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo
+func (m *IngressTLS) Reset() { *m = IngressTLS{} }
-func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
-func (*NetworkPolicySpec) ProtoMessage() {}
-func (*NetworkPolicySpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{28}
-}
-func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkPolicySpec.Merge(m, src)
-}
-func (m *NetworkPolicySpec) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m)
-}
+func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
-var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
+func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
-func (m *ParentReference) Reset() { *m = ParentReference{} }
-func (*ParentReference) ProtoMessage() {}
-func (*ParentReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{29}
-}
-func (m *ParentReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParentReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParentReference.Merge(m, src)
-}
-func (m *ParentReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ParentReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ParentReference.DiscardUnknown(m)
-}
+func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
-var xxx_messageInfo_ParentReference proto.InternalMessageInfo
+func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
-func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} }
-func (*ServiceBackendPort) ProtoMessage() {}
-func (*ServiceBackendPort) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{30}
-}
-func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceBackendPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceBackendPort) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceBackendPort.Merge(m, src)
-}
-func (m *ServiceBackendPort) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceBackendPort) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceBackendPort.DiscardUnknown(m)
-}
+func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
-var xxx_messageInfo_ServiceBackendPort proto.InternalMessageInfo
+func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
-func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
-func (*ServiceCIDR) ProtoMessage() {}
-func (*ServiceCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{31}
-}
-func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDR.Merge(m, src)
-}
-func (m *ServiceCIDR) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDR) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
-}
+func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
-var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
+func (m *ParentReference) Reset() { *m = ParentReference{} }
-func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
-func (*ServiceCIDRList) ProtoMessage() {}
-func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{32}
-}
-func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRList.Merge(m, src)
-}
-func (m *ServiceCIDRList) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRList) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
-}
+func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} }
-var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
+func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
-func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
-func (*ServiceCIDRSpec) ProtoMessage() {}
-func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{33}
-}
-func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
-}
-func (m *ServiceCIDRSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
-}
+func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
-var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
+func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
-func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
-func (*ServiceCIDRStatus) ProtoMessage() {}
-func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2c41434372fec1d7, []int{34}
-}
-func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
-}
-func (m *ServiceCIDRStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1.HTTPIngressPath")
- proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1.HTTPIngressRuleValue")
- proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1.IPAddress")
- proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1.IPAddressList")
- proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1.IPAddressSpec")
- proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock")
- proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1.Ingress")
- proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1.IngressBackend")
- proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1.IngressClass")
- proto.RegisterType((*IngressClassList)(nil), "k8s.io.api.networking.v1.IngressClassList")
- proto.RegisterType((*IngressClassParametersReference)(nil), "k8s.io.api.networking.v1.IngressClassParametersReference")
- proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1.IngressClassSpec")
- proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1.IngressList")
- proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.networking.v1.IngressLoadBalancerIngress")
- proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.networking.v1.IngressLoadBalancerStatus")
- proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.networking.v1.IngressPortStatus")
- proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1.IngressRule")
- proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1.IngressRuleValue")
- proto.RegisterType((*IngressServiceBackend)(nil), "k8s.io.api.networking.v1.IngressServiceBackend")
- proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1.IngressSpec")
- proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1.IngressStatus")
- proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1.IngressTLS")
- proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.networking.v1.NetworkPolicy")
- proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyEgressRule")
- proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyIngressRule")
- proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.networking.v1.NetworkPolicyList")
- proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer")
- proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort")
- proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec")
- proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1.ParentReference")
- proto.RegisterType((*ServiceBackendPort)(nil), "k8s.io.api.networking.v1.ServiceBackendPort")
- proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1.ServiceCIDR")
- proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1.ServiceCIDRList")
- proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1.ServiceCIDRSpec")
- proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1.ServiceCIDRStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/networking/v1/generated.proto", fileDescriptor_2c41434372fec1d7)
-}
-
-var fileDescriptor_2c41434372fec1d7 = []byte{
- // 1884 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x8f, 0x1b, 0x49,
- 0x15, 0x9f, 0xf6, 0x8c, 0x67, 0xec, 0xe7, 0xf9, 0xc8, 0x14, 0x59, 0x61, 0x06, 0x61, 0x87, 0x5e,
- 0xb2, 0x3b, 0x4b, 0x76, 0x6d, 0x32, 0x1b, 0x21, 0xb8, 0x00, 0xdb, 0x93, 0x6c, 0xe2, 0xcd, 0xc4,
- 0xb1, 0xca, 0x56, 0x10, 0x88, 0x8f, 0xed, 0x69, 0xd7, 0x78, 0x7a, 0xa7, 0xdd, 0xd5, 0xaa, 0x2e,
- 0x87, 0x44, 0x42, 0x88, 0x0b, 0x07, 0x6e, 0xf0, 0x27, 0x20, 0xfe, 0x02, 0x04, 0xd2, 0xae, 0xb4,
- 0x82, 0x85, 0x0b, 0xca, 0x71, 0x25, 0x2e, 0x7b, 0xc1, 0x22, 0xe6, 0xbf, 0xc8, 0x09, 0xd5, 0x47,
- 0x7f, 0xd9, 0xee, 0xb1, 0x89, 0x22, 0x9f, 0xc6, 0xfd, 0xde, 0xab, 0xdf, 0x7b, 0xf5, 0xea, 0x7d,
- 0x55, 0x0d, 0x1c, 0x5e, 0x7c, 0x27, 0x6c, 0xb8, 0xb4, 0x69, 0x07, 0x6e, 0xd3, 0x27, 0xfc, 0x17,
- 0x94, 0x5d, 0xb8, 0xfe, 0xa0, 0xf9, 0xf8, 0x66, 0x73, 0x40, 0x7c, 0xc2, 0x6c, 0x4e, 0xfa, 0x8d,
- 0x80, 0x51, 0x4e, 0x51, 0x55, 0x49, 0x36, 0xec, 0xc0, 0x6d, 0x24, 0x92, 0x8d, 0xc7, 0x37, 0x0f,
- 0xde, 0x19, 0xb8, 0xfc, 0x7c, 0x74, 0xda, 0x70, 0xe8, 0xb0, 0x39, 0xa0, 0x03, 0xda, 0x94, 0x0b,
- 0x4e, 0x47, 0x67, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x0a, 0xe8, 0xc0, 0x4c, 0xa9, 0x74, 0x28, 0x23,
- 0x73, 0x94, 0x1d, 0xdc, 0x4a, 0x64, 0x86, 0xb6, 0x73, 0xee, 0xfa, 0x84, 0x3d, 0x6d, 0x06, 0x17,
- 0x03, 0x41, 0x08, 0x9b, 0x43, 0xc2, 0xed, 0x79, 0xab, 0x9a, 0x79, 0xab, 0xd8, 0xc8, 0xe7, 0xee,
- 0x90, 0xcc, 0x2c, 0xf8, 0xf6, 0xa2, 0x05, 0xa1, 0x73, 0x4e, 0x86, 0xf6, 0xcc, 0xba, 0x77, 0xf3,
- 0xd6, 0x8d, 0xb8, 0xeb, 0x35, 0x5d, 0x9f, 0x87, 0x9c, 0x4d, 0x2f, 0x32, 0xff, 0x66, 0xc0, 0xde,
- 0xbd, 0x5e, 0xaf, 0xd3, 0xf2, 0x07, 0x8c, 0x84, 0x61, 0xc7, 0xe6, 0xe7, 0xe8, 0x1a, 0x6c, 0x04,
- 0x36, 0x3f, 0xaf, 0x1a, 0xd7, 0x8c, 0xc3, 0xb2, 0xb5, 0xfd, 0x6c, 0x5c, 0x5f, 0x9b, 0x8c, 0xeb,
- 0x1b, 0x82, 0x87, 0x25, 0x07, 0xdd, 0x82, 0x92, 0xf8, 0xdb, 0x7b, 0x1a, 0x90, 0xea, 0xba, 0x94,
- 0xaa, 0x4e, 0xc6, 0xf5, 0x52, 0x47, 0xd3, 0x5e, 0xa4, 0x7e, 0xe3, 0x58, 0x12, 0x75, 0x61, 0xeb,
- 0xd4, 0x76, 0x2e, 0x88, 0xdf, 0xaf, 0x16, 0xae, 0x19, 0x87, 0x95, 0xa3, 0xc3, 0x46, 0xde, 0xf1,
- 0x35, 0xb4, 0x3d, 0x96, 0x92, 0xb7, 0xf6, 0xb4, 0x11, 0x5b, 0x9a, 0x80, 0x23, 0x24, 0xf3, 0x0c,
- 0xae, 0xa6, 0xec, 0xc7, 0x23, 0x8f, 0x3c, 0xb2, 0xbd, 0x11, 0x41, 0x6d, 0x28, 0x0a, 0xc5, 0x61,
- 0xd5, 0xb8, 0xb6, 0x7e, 0x58, 0x39, 0x7a, 0x2b, 0x5f, 0xd5, 0xd4, 0xf6, 0xad, 0x1d, 0xad, 0xab,
- 0x28, 0xbe, 0x42, 0xac, 0x60, 0xcc, 0x4f, 0x0c, 0x28, 0xb7, 0x3a, 0xef, 0xf5, 0xfb, 0x42, 0x0e,
- 0x7d, 0x08, 0x25, 0x71, 0xde, 0x7d, 0x9b, 0xdb, 0xd2, 0x4d, 0x95, 0xa3, 0x6f, 0xa5, 0x14, 0xc4,
- 0xee, 0x6f, 0x04, 0x17, 0x03, 0x41, 0x08, 0x1b, 0x42, 0x5a, 0x28, 0x7b, 0x78, 0xfa, 0x11, 0x71,
- 0xf8, 0x03, 0xc2, 0x6d, 0x0b, 0x69, 0x3d, 0x90, 0xd0, 0x70, 0x8c, 0x8a, 0x5a, 0xb0, 0x11, 0x06,
- 0xc4, 0xd1, 0x9e, 0x7a, 0xf3, 0x12, 0x4f, 0x45, 0x46, 0x75, 0x03, 0xe2, 0x24, 0xa7, 0x25, 0xbe,
- 0xb0, 0x84, 0x30, 0x3f, 0x36, 0x60, 0x27, 0x96, 0x3a, 0x71, 0x43, 0x8e, 0x7e, 0x32, 0x63, 0x7e,
- 0x63, 0x39, 0xf3, 0xc5, 0x6a, 0x69, 0xfc, 0x15, 0xad, 0xa7, 0x14, 0x51, 0x52, 0xa6, 0xdf, 0x83,
- 0xa2, 0xcb, 0xc9, 0x30, 0xac, 0x16, 0xa4, 0xeb, 0x5f, 0x5f, 0xc2, 0xf6, 0xc4, 0xe9, 0x2d, 0xb1,
- 0x12, 0x2b, 0x00, 0x73, 0x90, 0x32, 0x5c, 0x6c, 0x08, 0x3d, 0x82, 0x72, 0x60, 0x33, 0xe2, 0x73,
- 0x4c, 0xce, 0xb4, 0xe5, 0x97, 0x9c, 0x6c, 0x27, 0x12, 0x25, 0x8c, 0xf8, 0x0e, 0xb1, 0x76, 0x26,
- 0xe3, 0x7a, 0x39, 0x26, 0xe2, 0x04, 0xca, 0x7c, 0x08, 0x5b, 0xad, 0x8e, 0xe5, 0x51, 0xe7, 0x42,
- 0x44, 0xbf, 0xe3, 0xf6, 0xd9, 0x74, 0xf4, 0x1f, 0xb7, 0x6e, 0x63, 0x2c, 0x39, 0xc8, 0x84, 0x4d,
- 0xf2, 0xc4, 0x21, 0x01, 0x97, 0x1b, 0x2c, 0x5b, 0x30, 0x19, 0xd7, 0x37, 0xef, 0x48, 0x0a, 0xd6,
- 0x1c, 0xf3, 0x37, 0x05, 0xd8, 0xd2, 0x41, 0xb5, 0x82, 0x60, 0xb9, 0x9b, 0x09, 0x96, 0xeb, 0x0b,
- 0xd3, 0x2a, 0x2f, 0x54, 0xd0, 0x43, 0xd8, 0x0c, 0xb9, 0xcd, 0x47, 0xa1, 0x4c, 0xeb, 0xcb, 0xe3,
- 0x4e, 0x43, 0x49, 0x71, 0x6b, 0x57, 0x83, 0x6d, 0xaa, 0x6f, 0xac, 0x61, 0xcc, 0x7f, 0x18, 0xb0,
- 0x9b, 0xcd, 0x65, 0xf4, 0x08, 0xb6, 0x42, 0xc2, 0x1e, 0xbb, 0x0e, 0xa9, 0x6e, 0x48, 0x25, 0xcd,
- 0xc5, 0x4a, 0x94, 0x7c, 0x54, 0x0d, 0x2a, 0xa2, 0x12, 0x68, 0x1a, 0x8e, 0xc0, 0xd0, 0x0f, 0xa1,
- 0xc4, 0x48, 0x48, 0x47, 0xcc, 0x21, 0xda, 0xfa, 0x77, 0xd2, 0xc0, 0xa2, 0xaa, 0x0b, 0x48, 0x51,
- 0x8a, 0xfa, 0x27, 0xd4, 0xb1, 0x3d, 0xe5, 0xca, 0x24, 0x3c, 0xb6, 0x45, 0x3c, 0x63, 0x0d, 0x81,
- 0x63, 0x30, 0x51, 0x23, 0xb7, 0xb5, 0x21, 0xc7, 0x9e, 0xbd, 0x92, 0x03, 0x3d, 0xc9, 0x1c, 0xe8,
- 0x37, 0x17, 0x3a, 0x48, 0xda, 0x95, 0x5b, 0x00, 0xfe, 0x6a, 0xc0, 0x95, 0xb4, 0xe0, 0x0a, 0x6a,
- 0xc0, 0xfd, 0x6c, 0x0d, 0x78, 0x63, 0xb9, 0x1d, 0xe4, 0x94, 0x81, 0x7f, 0x1b, 0x50, 0x4f, 0x8b,
- 0x75, 0x6c, 0x66, 0x0f, 0x09, 0x27, 0x2c, 0x8c, 0x0f, 0x0f, 0x1d, 0x42, 0xc9, 0xee, 0xb4, 0xee,
- 0x32, 0x3a, 0x0a, 0xa2, 0xd4, 0x15, 0xa6, 0xbd, 0xa7, 0x69, 0x38, 0xe6, 0x8a, 0x04, 0xbf, 0x70,
- 0x75, 0x0f, 0x4a, 0x25, 0xf8, 0x7d, 0xd7, 0xef, 0x63, 0xc9, 0x11, 0x12, 0xbe, 0x3d, 0x8c, 0x5a,
- 0x5b, 0x2c, 0xd1, 0xb6, 0x87, 0x04, 0x4b, 0x0e, 0xaa, 0x43, 0x31, 0x74, 0x68, 0xa0, 0x22, 0xb8,
- 0x6c, 0x95, 0x85, 0xc9, 0x5d, 0x41, 0xc0, 0x8a, 0x8e, 0x6e, 0x40, 0x59, 0x08, 0x86, 0x81, 0xed,
- 0x90, 0x6a, 0x51, 0x0a, 0xc9, 0xea, 0xd3, 0x8e, 0x88, 0x38, 0xe1, 0x9b, 0x7f, 0x9a, 0x3a, 0x1f,
- 0x59, 0xea, 0x8e, 0x00, 0x1c, 0xea, 0x73, 0x46, 0x3d, 0x8f, 0x44, 0xd5, 0x28, 0x0e, 0x9a, 0xe3,
- 0x98, 0x83, 0x53, 0x52, 0xc8, 0x05, 0x08, 0x62, 0xdf, 0xe8, 0xe0, 0xf9, 0xee, 0x72, 0xae, 0x9f,
- 0xe3, 0x53, 0x6b, 0x57, 0xa8, 0x4a, 0x31, 0x52, 0xe0, 0xe6, 0x9f, 0x0d, 0xa8, 0xe8, 0xf5, 0x2b,
- 0x08, 0xa7, 0xf7, 0xb3, 0xe1, 0xf4, 0xf5, 0xc5, 0x83, 0xc3, 0xfc, 0x48, 0xfa, 0xc4, 0x80, 0x83,
- 0xc8, 0x6a, 0x6a, 0xf7, 0x2d, 0xdb, 0xb3, 0x7d, 0x87, 0xb0, 0xa8, 0x52, 0x1f, 0x40, 0xc1, 0x8d,
- 0xc2, 0x07, 0x34, 0x40, 0xa1, 0xd5, 0xc1, 0x05, 0x37, 0x40, 0x6f, 0x43, 0xe9, 0x9c, 0x86, 0x5c,
- 0x06, 0x86, 0x0a, 0x9d, 0xd8, 0xe0, 0x7b, 0x9a, 0x8e, 0x63, 0x09, 0xd4, 0x81, 0x62, 0x40, 0x19,
- 0x0f, 0xab, 0x1b, 0xd2, 0xe0, 0x1b, 0x0b, 0x0d, 0xee, 0x50, 0xc6, 0x75, 0x2d, 0x4d, 0x06, 0x10,
- 0x81, 0x80, 0x15, 0x90, 0xf9, 0x4b, 0xf8, 0xca, 0x1c, 0xcb, 0xd5, 0x12, 0xf4, 0x73, 0xd8, 0x72,
- 0x15, 0x53, 0xcf, 0x3b, 0xb7, 0x16, 0x2a, 0x9c, 0xb3, 0xff, 0x64, 0xcc, 0x8a, 0xc6, 0xa9, 0x08,
- 0xd5, 0xfc, 0xa3, 0x01, 0xfb, 0x33, 0x96, 0xca, 0x49, 0x91, 0x32, 0x2e, 0x3d, 0x56, 0x4c, 0x4d,
- 0x8a, 0x94, 0x71, 0x2c, 0x39, 0xe8, 0x3e, 0x94, 0xe4, 0xa0, 0xe9, 0x50, 0x4f, 0x7b, 0xad, 0x19,
- 0x79, 0xad, 0xa3, 0xe9, 0x2f, 0xc6, 0xf5, 0xaf, 0xce, 0x4e, 0xdf, 0x8d, 0x88, 0x8d, 0x63, 0x00,
- 0x91, 0x75, 0x84, 0x31, 0xca, 0x74, 0x62, 0xca, 0xac, 0xbb, 0x23, 0x08, 0x58, 0xd1, 0xcd, 0x3f,
- 0x24, 0x41, 0x29, 0x26, 0x41, 0x61, 0x9f, 0x38, 0x91, 0xe9, 0x5e, 0x2e, 0xce, 0x0b, 0x4b, 0x0e,
- 0x0a, 0xe0, 0x8a, 0x3b, 0x35, 0x3a, 0x2e, 0x5d, 0x74, 0xe3, 0x15, 0x56, 0x55, 0x23, 0x5f, 0x99,
- 0xe6, 0xe0, 0x19, 0x74, 0xf3, 0x43, 0x98, 0x91, 0x12, 0xe5, 0xfe, 0x9c, 0xf3, 0x60, 0x4e, 0xe2,
- 0xe4, 0xcf, 0xaa, 0x89, 0xf6, 0x92, 0xdc, 0x53, 0xaf, 0xd7, 0xc1, 0x12, 0xc5, 0xfc, 0xad, 0x01,
- 0xaf, 0xcd, 0x6d, 0x9c, 0x71, 0x61, 0x33, 0x72, 0x0b, 0x5b, 0x5b, 0x9f, 0xa8, 0xf2, 0xc1, 0xdb,
- 0xf9, 0x96, 0x64, 0x91, 0xc5, 0x89, 0xcf, 0x3b, 0x7f, 0xf3, 0x9f, 0x85, 0xf8, 0x44, 0x64, 0x55,
- 0xfb, 0x41, 0xec, 0x6f, 0x59, 0x75, 0x84, 0x66, 0x5d, 0x43, 0xaf, 0xa6, 0xfc, 0x17, 0xf3, 0xf0,
- 0x8c, 0x34, 0xea, 0xc3, 0x6e, 0x9f, 0x9c, 0xd9, 0x23, 0x8f, 0x6b, 0xdd, 0xda, 0x6b, 0xcb, 0x5f,
- 0x26, 0xd0, 0x64, 0x5c, 0xdf, 0xbd, 0x9d, 0xc1, 0xc0, 0x53, 0x98, 0xe8, 0x18, 0xd6, 0xb9, 0x17,
- 0x95, 0x9b, 0x6f, 0x2c, 0x84, 0xee, 0x9d, 0x74, 0xad, 0x8a, 0xde, 0xfe, 0x7a, 0xef, 0xa4, 0x8b,
- 0xc5, 0x6a, 0xf4, 0x01, 0x14, 0xd9, 0xc8, 0x23, 0x62, 0x98, 0x5a, 0x5f, 0x6a, 0x2e, 0x13, 0x67,
- 0x9a, 0xa4, 0xbf, 0xf8, 0x0a, 0xb1, 0x82, 0x30, 0x7f, 0x05, 0x3b, 0x99, 0x89, 0x0b, 0x0d, 0x61,
- 0xdb, 0x4b, 0xa5, 0xb0, 0xf6, 0xc2, 0xbb, 0xff, 0x57, 0xde, 0xeb, 0x82, 0x73, 0x55, 0x6b, 0xdc,
- 0x4e, 0xf3, 0x70, 0x06, 0xde, 0xb4, 0x01, 0x92, 0xbd, 0x8a, 0x4c, 0x14, 0xe9, 0xa3, 0xaa, 0x8d,
- 0xce, 0x44, 0x91, 0x55, 0x21, 0x56, 0x74, 0xd1, 0xbd, 0x42, 0xe2, 0x30, 0xc2, 0xdb, 0x49, 0xbd,
- 0x8c, 0xbb, 0x57, 0x37, 0xe6, 0xe0, 0x94, 0x94, 0xf9, 0x77, 0x03, 0x76, 0xda, 0xca, 0xe4, 0x0e,
- 0xf5, 0x5c, 0xe7, 0xe9, 0x0a, 0x06, 0xad, 0x07, 0x99, 0x41, 0xeb, 0x92, 0x32, 0x9d, 0x31, 0x2c,
- 0x77, 0xd2, 0xfa, 0x8b, 0x01, 0x5f, 0xce, 0x48, 0xde, 0x49, 0x8a, 0x51, 0xdc, 0x12, 0x8c, 0x45,
- 0x2d, 0x21, 0x83, 0x20, 0x53, 0x6b, 0x6e, 0x4b, 0x40, 0x77, 0xa1, 0xc0, 0xa9, 0x8e, 0xd1, 0xa5,
- 0xe1, 0x08, 0x61, 0x49, 0x6f, 0xeb, 0x51, 0x5c, 0xe0, 0xd4, 0xfc, 0xd4, 0x80, 0x6a, 0x46, 0x2a,
- 0x5d, 0x44, 0x5f, 0xbd, 0xdd, 0x0f, 0x60, 0xe3, 0x8c, 0xd1, 0xe1, 0xcb, 0x58, 0x1e, 0x3b, 0xfd,
- 0x7d, 0x46, 0x87, 0x58, 0xc2, 0x98, 0x9f, 0x19, 0xb0, 0x9f, 0x91, 0x5c, 0xc1, 0x40, 0x72, 0x92,
- 0x1d, 0x48, 0xde, 0x5c, 0x72, 0x0f, 0x39, 0x63, 0xc9, 0x67, 0x85, 0xa9, 0x1d, 0x88, 0xbd, 0xa2,
- 0x33, 0xa8, 0x04, 0xb4, 0xdf, 0x25, 0x1e, 0x71, 0x38, 0x9d, 0x97, 0xe0, 0x97, 0x6d, 0xc2, 0x3e,
- 0x25, 0x5e, 0xb4, 0xd4, 0xda, 0x9b, 0x8c, 0xeb, 0x95, 0x4e, 0x82, 0x85, 0xd3, 0xc0, 0xe8, 0x09,
- 0xec, 0xc7, 0xb3, 0x68, 0xac, 0xad, 0xf0, 0xf2, 0xda, 0x5e, 0x9b, 0x8c, 0xeb, 0xfb, 0xed, 0x69,
- 0x44, 0x3c, 0xab, 0x04, 0xdd, 0x83, 0x2d, 0x37, 0x90, 0xd7, 0x6e, 0x7d, 0x63, 0xbb, 0x6c, 0xb0,
- 0x53, 0xf7, 0x73, 0x75, 0xf9, 0xd3, 0x1f, 0x38, 0x5a, 0x6e, 0xfe, 0x6b, 0x3a, 0x06, 0x44, 0xc0,
- 0xa1, 0xbb, 0xa9, 0xe9, 0x43, 0xf5, 0xbc, 0x1b, 0x2f, 0x37, 0x79, 0x64, 0xdb, 0x62, 0x7e, 0x11,
- 0x1a, 0x71, 0xd7, 0x6b, 0xa8, 0xa7, 0xb6, 0x46, 0xcb, 0xe7, 0x0f, 0x59, 0x97, 0x33, 0xd7, 0x1f,
- 0xa8, 0x16, 0x9d, 0x1a, 0x8b, 0xae, 0xc3, 0x96, 0xee, 0x9a, 0x72, 0xe3, 0x45, 0xb5, 0xab, 0x3b,
- 0x8a, 0x84, 0x23, 0x9e, 0xf9, 0x62, 0x3a, 0x2e, 0x64, 0x0f, 0xfd, 0xe8, 0x95, 0xc5, 0xc5, 0x97,
- 0x74, 0x34, 0xe6, 0xc7, 0xc6, 0x4f, 0x93, 0xc1, 0x52, 0x45, 0xfa, 0xd1, 0x92, 0x91, 0x9e, 0xee,
- 0x68, 0xb9, 0x63, 0x25, 0xfa, 0x11, 0x6c, 0x12, 0x85, 0xae, 0x5a, 0xe4, 0xcd, 0x25, 0xd1, 0x93,
- 0xb2, 0x9a, 0xbc, 0x3c, 0x68, 0x9a, 0x06, 0x44, 0xdf, 0x17, 0x5e, 0x12, 0xb2, 0xe2, 0xc2, 0xaf,
- 0xe6, 0xf0, 0xb2, 0xf5, 0x35, 0xb5, 0xd9, 0x98, 0xfc, 0x42, 0x5c, 0x70, 0xe2, 0x4f, 0x9c, 0x5e,
- 0x61, 0x7e, 0x6c, 0xc0, 0xde, 0xd4, 0x0b, 0x12, 0x7a, 0x1d, 0x8a, 0x83, 0xd4, 0x15, 0x33, 0xce,
- 0x66, 0x75, 0xc7, 0x54, 0x3c, 0x71, 0x53, 0x88, 0x1f, 0x22, 0xa6, 0x6e, 0x0a, 0xb3, 0xaf, 0x0b,
- 0xa8, 0x99, 0xbe, 0x29, 0xaa, 0xc1, 0x76, 0x5f, 0x8b, 0xcf, 0xbd, 0x2d, 0xc6, 0x43, 0xdc, 0x46,
- 0xde, 0x10, 0x67, 0xfe, 0x0c, 0xd0, 0xec, 0x78, 0xb6, 0xc4, 0xf0, 0xf7, 0x06, 0x6c, 0xfa, 0xa3,
- 0xe1, 0x29, 0x51, 0xd9, 0x5f, 0x4c, 0x5c, 0xdb, 0x96, 0x54, 0xac, 0xb9, 0xe6, 0xef, 0x0b, 0x50,
- 0xd1, 0x0a, 0x8e, 0x5b, 0xb7, 0xf1, 0x0a, 0xda, 0xf4, 0xfd, 0x4c, 0x9b, 0x7e, 0x6b, 0xe1, 0x58,
- 0x2a, 0xcc, 0xca, 0x7d, 0xe4, 0xea, 0x4e, 0x3d, 0x72, 0xdd, 0x58, 0x0e, 0xee, 0xf2, 0x87, 0xae,
- 0x4f, 0x0d, 0xd8, 0x4b, 0x49, 0xaf, 0xa0, 0x05, 0x7d, 0x90, 0x6d, 0x41, 0xd7, 0x97, 0xda, 0x45,
- 0x4e, 0x03, 0x3a, 0xca, 0x18, 0x2f, 0xab, 0x4c, 0x1d, 0x8a, 0x8e, 0xdb, 0x67, 0x99, 0x11, 0x4f,
- 0x30, 0x43, 0xac, 0xe8, 0xe6, 0x13, 0xd8, 0x9f, 0x71, 0x0f, 0x72, 0xe4, 0xab, 0x45, 0xdf, 0xe5,
- 0x2e, 0xf5, 0xa3, 0x89, 0xa1, 0xb9, 0xdc, 0xa6, 0x8f, 0xa3, 0x75, 0x99, 0x67, 0x0e, 0x0d, 0x85,
- 0x53, 0xb0, 0xd6, 0xf7, 0x9e, 0x3d, 0xaf, 0xad, 0x7d, 0xfe, 0xbc, 0xb6, 0xf6, 0xc5, 0xf3, 0xda,
- 0xda, 0xaf, 0x27, 0x35, 0xe3, 0xd9, 0xa4, 0x66, 0x7c, 0x3e, 0xa9, 0x19, 0x5f, 0x4c, 0x6a, 0xc6,
- 0x7f, 0x26, 0x35, 0xe3, 0x77, 0xff, 0xad, 0xad, 0xfd, 0xb8, 0x9a, 0xf7, 0x5f, 0xa4, 0xff, 0x05,
- 0x00, 0x00, 0xff, 0xff, 0xb5, 0x6b, 0x8c, 0x52, 0x60, 0x1a, 0x00, 0x00,
-}
+func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/networking/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/networking/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..54918d47
--- /dev/null
+++ b/operator/vendor/k8s.io/api/networking/v1/generated.protomessage.pb.go
@@ -0,0 +1,92 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*HTTPIngressPath) ProtoMessage() {}
+
+func (*HTTPIngressRuleValue) ProtoMessage() {}
+
+func (*IPAddress) ProtoMessage() {}
+
+func (*IPAddressList) ProtoMessage() {}
+
+func (*IPAddressSpec) ProtoMessage() {}
+
+func (*IPBlock) ProtoMessage() {}
+
+func (*Ingress) ProtoMessage() {}
+
+func (*IngressBackend) ProtoMessage() {}
+
+func (*IngressClass) ProtoMessage() {}
+
+func (*IngressClassList) ProtoMessage() {}
+
+func (*IngressClassParametersReference) ProtoMessage() {}
+
+func (*IngressClassSpec) ProtoMessage() {}
+
+func (*IngressList) ProtoMessage() {}
+
+func (*IngressLoadBalancerIngress) ProtoMessage() {}
+
+func (*IngressLoadBalancerStatus) ProtoMessage() {}
+
+func (*IngressPortStatus) ProtoMessage() {}
+
+func (*IngressRule) ProtoMessage() {}
+
+func (*IngressRuleValue) ProtoMessage() {}
+
+func (*IngressServiceBackend) ProtoMessage() {}
+
+func (*IngressSpec) ProtoMessage() {}
+
+func (*IngressStatus) ProtoMessage() {}
+
+func (*IngressTLS) ProtoMessage() {}
+
+func (*NetworkPolicy) ProtoMessage() {}
+
+func (*NetworkPolicyEgressRule) ProtoMessage() {}
+
+func (*NetworkPolicyIngressRule) ProtoMessage() {}
+
+func (*NetworkPolicyList) ProtoMessage() {}
+
+func (*NetworkPolicyPeer) ProtoMessage() {}
+
+func (*NetworkPolicyPort) ProtoMessage() {}
+
+func (*NetworkPolicySpec) ProtoMessage() {}
+
+func (*ParentReference) ProtoMessage() {}
+
+func (*ServiceBackendPort) ProtoMessage() {}
+
+func (*ServiceCIDR) ProtoMessage() {}
+
+func (*ServiceCIDRList) ProtoMessage() {}
+
+func (*ServiceCIDRSpec) ProtoMessage() {}
+
+func (*ServiceCIDRStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/networking/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/networking/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..c5778959
--- /dev/null
+++ b/operator/vendor/k8s.io/api/networking/v1/zz_generated.model_name.go
@@ -0,0 +1,197 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPIngressPath) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.HTTPIngressPath"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPIngressRuleValue) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.HTTPIngressRuleValue"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPAddress) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IPAddress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPAddressList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IPAddressList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPAddressSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IPAddressSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPBlock) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IPBlock"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Ingress) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.Ingress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressBackend) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressBackend"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClass) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClassList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClassParametersReference) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressClassParametersReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClassSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressClassSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressLoadBalancerIngress) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressLoadBalancerIngress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressLoadBalancerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressLoadBalancerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressPortStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressPortStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressRule) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressRuleValue) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressRuleValue"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressServiceBackend) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressServiceBackend"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressTLS) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.IngressTLS"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyEgressRule) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicyEgressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyIngressRule) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicyIngressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicyList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyPeer) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicyPeer"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicyPort) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicyPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkPolicySpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.NetworkPolicySpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParentReference) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.ParentReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceBackendPort) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.ServiceBackendPort"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDR) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.ServiceCIDR"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDRList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.ServiceCIDRList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDRSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.ServiceCIDRSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDRStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1.ServiceCIDRStatus"
+}
diff --git a/operator/vendor/k8s.io/api/networking/v1beta1/doc.go b/operator/vendor/k8s.io/api/networking/v1beta1/doc.go
index c5a03e04..6ec15316 100644
--- a/operator/vendor/k8s.io/api/networking/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/networking/v1beta1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.networking.v1beta1
+
// +groupName=networking.k8s.io
package v1beta1
diff --git a/operator/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
index a924725f..fb3b9f6a 100644
--- a/operator/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
@@ -24,855 +24,64 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
-func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
-func (*HTTPIngressPath) ProtoMessage() {}
-func (*HTTPIngressPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{0}
-}
-func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPIngressPath) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPIngressPath.Merge(m, src)
-}
-func (m *HTTPIngressPath) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPIngressPath) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo
-
-func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
-func (*HTTPIngressRuleValue) ProtoMessage() {}
-func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{1}
-}
-func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src)
-}
-func (m *HTTPIngressRuleValue) XXX_Size() int {
- return m.Size()
-}
-func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
-
-func (m *IPAddress) Reset() { *m = IPAddress{} }
-func (*IPAddress) ProtoMessage() {}
-func (*IPAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{2}
-}
-func (m *IPAddress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddress.Merge(m, src)
-}
-func (m *IPAddress) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddress) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddress proto.InternalMessageInfo
-
-func (m *IPAddressList) Reset() { *m = IPAddressList{} }
-func (*IPAddressList) ProtoMessage() {}
-func (*IPAddressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{3}
-}
-func (m *IPAddressList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddressList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddressList.Merge(m, src)
-}
-func (m *IPAddressList) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddressList) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddressList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressList proto.InternalMessageInfo
-
-func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} }
-func (*IPAddressSpec) ProtoMessage() {}
-func (*IPAddressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{4}
-}
-func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IPAddressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IPAddressSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IPAddressSpec.Merge(m, src)
-}
-func (m *IPAddressSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IPAddressSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IPAddressSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo
-
-func (m *Ingress) Reset() { *m = Ingress{} }
-func (*Ingress) ProtoMessage() {}
-func (*Ingress) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{5}
-}
-func (m *Ingress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Ingress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Ingress.Merge(m, src)
-}
-func (m *Ingress) XXX_Size() int {
- return m.Size()
-}
-func (m *Ingress) XXX_DiscardUnknown() {
- xxx_messageInfo_Ingress.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Ingress proto.InternalMessageInfo
-
-func (m *IngressBackend) Reset() { *m = IngressBackend{} }
-func (*IngressBackend) ProtoMessage() {}
-func (*IngressBackend) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{6}
-}
-func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressBackend) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressBackend.Merge(m, src)
-}
-func (m *IngressBackend) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressBackend) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressBackend.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
-
-func (m *IngressClass) Reset() { *m = IngressClass{} }
-func (*IngressClass) ProtoMessage() {}
-func (*IngressClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{7}
-}
-func (m *IngressClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClass.Merge(m, src)
-}
-func (m *IngressClass) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClass) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClass proto.InternalMessageInfo
-
-func (m *IngressClassList) Reset() { *m = IngressClassList{} }
-func (*IngressClassList) ProtoMessage() {}
-func (*IngressClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{8}
-}
-func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClassList.Merge(m, src)
-}
-func (m *IngressClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
-
-func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} }
-func (*IngressClassParametersReference) ProtoMessage() {}
-func (*IngressClassParametersReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{9}
-}
-func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClassParametersReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClassParametersReference.Merge(m, src)
-}
-func (m *IngressClassParametersReference) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClassParametersReference) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClassParametersReference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo
-
-func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} }
-func (*IngressClassSpec) ProtoMessage() {}
-func (*IngressClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{10}
-}
-func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressClassSpec.Merge(m, src)
-}
-func (m *IngressClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressClassSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
-
-func (m *IngressList) Reset() { *m = IngressList{} }
-func (*IngressList) ProtoMessage() {}
-func (*IngressList) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{11}
-}
-func (m *IngressList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressList.Merge(m, src)
-}
-func (m *IngressList) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressList) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressList.DiscardUnknown(m)
-}
+func (m *IPAddress) Reset() { *m = IPAddress{} }
-var xxx_messageInfo_IngressList proto.InternalMessageInfo
-
-func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
-func (*IngressLoadBalancerIngress) ProtoMessage() {}
-func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{12}
-}
-func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src)
-}
-func (m *IngressLoadBalancerIngress) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m)
-}
+func (m *IPAddressList) Reset() { *m = IPAddressList{} }
-var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
-
-func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
-func (*IngressLoadBalancerStatus) ProtoMessage() {}
-func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{13}
-}
-func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src)
-}
-func (m *IngressLoadBalancerStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
-
-func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
-func (*IngressPortStatus) ProtoMessage() {}
-func (*IngressPortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{14}
-}
-func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressPortStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressPortStatus.Merge(m, src)
-}
-func (m *IngressPortStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressPortStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressPortStatus.DiscardUnknown(m)
-}
+func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} }
-var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
+func (m *Ingress) Reset() { *m = Ingress{} }
-func (m *IngressRule) Reset() { *m = IngressRule{} }
-func (*IngressRule) ProtoMessage() {}
-func (*IngressRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{15}
-}
-func (m *IngressRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressRule.Merge(m, src)
-}
-func (m *IngressRule) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressRule) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressRule.DiscardUnknown(m)
-}
+func (m *IngressBackend) Reset() { *m = IngressBackend{} }
-var xxx_messageInfo_IngressRule proto.InternalMessageInfo
+func (m *IngressClass) Reset() { *m = IngressClass{} }
-func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
-func (*IngressRuleValue) ProtoMessage() {}
-func (*IngressRuleValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{16}
-}
-func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressRuleValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressRuleValue.Merge(m, src)
-}
-func (m *IngressRuleValue) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressRuleValue) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressRuleValue.DiscardUnknown(m)
-}
+func (m *IngressClassList) Reset() { *m = IngressClassList{} }
-var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
+func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} }
-func (m *IngressSpec) Reset() { *m = IngressSpec{} }
-func (*IngressSpec) ProtoMessage() {}
-func (*IngressSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{17}
-}
-func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressSpec.Merge(m, src)
-}
-func (m *IngressSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressSpec.DiscardUnknown(m)
-}
+func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} }
-var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
+func (m *IngressList) Reset() { *m = IngressList{} }
-func (m *IngressStatus) Reset() { *m = IngressStatus{} }
-func (*IngressStatus) ProtoMessage() {}
-func (*IngressStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{18}
-}
-func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressStatus.Merge(m, src)
-}
-func (m *IngressStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressStatus.DiscardUnknown(m)
-}
+func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
-var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
+func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
-func (m *IngressTLS) Reset() { *m = IngressTLS{} }
-func (*IngressTLS) ProtoMessage() {}
-func (*IngressTLS) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{19}
-}
-func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IngressTLS) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IngressTLS.Merge(m, src)
-}
-func (m *IngressTLS) XXX_Size() int {
- return m.Size()
-}
-func (m *IngressTLS) XXX_DiscardUnknown() {
- xxx_messageInfo_IngressTLS.DiscardUnknown(m)
-}
+func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
-var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
+func (m *IngressRule) Reset() { *m = IngressRule{} }
-func (m *ParentReference) Reset() { *m = ParentReference{} }
-func (*ParentReference) ProtoMessage() {}
-func (*ParentReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{20}
-}
-func (m *ParentReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ParentReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ParentReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ParentReference.Merge(m, src)
-}
-func (m *ParentReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ParentReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ParentReference.DiscardUnknown(m)
-}
+func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
-var xxx_messageInfo_ParentReference proto.InternalMessageInfo
+func (m *IngressSpec) Reset() { *m = IngressSpec{} }
-func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
-func (*ServiceCIDR) ProtoMessage() {}
-func (*ServiceCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{21}
-}
-func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDR.Merge(m, src)
-}
-func (m *ServiceCIDR) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDR) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
-}
+func (m *IngressStatus) Reset() { *m = IngressStatus{} }
-var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
+func (m *IngressTLS) Reset() { *m = IngressTLS{} }
-func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
-func (*ServiceCIDRList) ProtoMessage() {}
-func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{22}
-}
-func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRList.Merge(m, src)
-}
-func (m *ServiceCIDRList) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRList) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
-}
+func (m *ParentReference) Reset() { *m = ParentReference{} }
-var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
+func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
-func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
-func (*ServiceCIDRSpec) ProtoMessage() {}
-func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{23}
-}
-func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
-}
-func (m *ServiceCIDRSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
-}
+func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
-var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
+func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
-func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
-func (*ServiceCIDRStatus) ProtoMessage() {}
-func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_9497719c79c89d2d, []int{24}
-}
-func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
-}
-func (m *ServiceCIDRStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath")
- proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue")
- proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1beta1.IPAddress")
- proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1beta1.IPAddressList")
- proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1beta1.IPAddressSpec")
- proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress")
- proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend")
- proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass")
- proto.RegisterType((*IngressClassList)(nil), "k8s.io.api.networking.v1beta1.IngressClassList")
- proto.RegisterType((*IngressClassParametersReference)(nil), "k8s.io.api.networking.v1beta1.IngressClassParametersReference")
- proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec")
- proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList")
- proto.RegisterType((*IngressLoadBalancerIngress)(nil), "k8s.io.api.networking.v1beta1.IngressLoadBalancerIngress")
- proto.RegisterType((*IngressLoadBalancerStatus)(nil), "k8s.io.api.networking.v1beta1.IngressLoadBalancerStatus")
- proto.RegisterType((*IngressPortStatus)(nil), "k8s.io.api.networking.v1beta1.IngressPortStatus")
- proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule")
- proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue")
- proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec")
- proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus")
- proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS")
- proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1beta1.ParentReference")
- proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDR")
- proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRList")
- proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRSpec")
- proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1beta1.ServiceCIDRStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_9497719c79c89d2d)
-}
-
-var fileDescriptor_9497719c79c89d2d = []byte{
- // 1457 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcb, 0x6f, 0x1b, 0xc5,
- 0x1f, 0xcf, 0x3a, 0x71, 0xe3, 0x8c, 0xd3, 0x26, 0x9d, 0x5f, 0x0f, 0xfe, 0x05, 0xd5, 0x8e, 0x16,
- 0x09, 0x85, 0x3e, 0x76, 0xdb, 0xb4, 0xa0, 0x72, 0x41, 0xd4, 0x01, 0x51, 0xab, 0x69, 0xb2, 0x8c,
- 0x0d, 0x54, 0xc0, 0x81, 0xc9, 0x7a, 0x6a, 0x2f, 0x5e, 0xef, 0xae, 0x66, 0xc7, 0x81, 0xde, 0x40,
- 0x9c, 0x38, 0xc1, 0x9d, 0x23, 0x12, 0x7f, 0x02, 0x70, 0xa0, 0x52, 0x05, 0x97, 0x1e, 0x7b, 0xec,
- 0x85, 0x88, 0x9a, 0xff, 0xa2, 0x27, 0xf4, 0x9d, 0x9d, 0x7d, 0xf9, 0xd1, 0x6c, 0x38, 0xe4, 0x54,
- 0xef, 0xf7, 0x3d, 0xdf, 0xe7, 0xa7, 0x41, 0x57, 0x07, 0xb7, 0x42, 0xc3, 0xf1, 0x4d, 0x1a, 0x38,
- 0xa6, 0xc7, 0xc4, 0x97, 0x3e, 0x1f, 0x38, 0x5e, 0xcf, 0x3c, 0xbc, 0x7e, 0xc0, 0x04, 0xbd, 0x6e,
- 0xf6, 0x98, 0xc7, 0x38, 0x15, 0xac, 0x6b, 0x04, 0xdc, 0x17, 0x3e, 0xbe, 0x18, 0x89, 0x1b, 0x34,
- 0x70, 0x8c, 0x54, 0xdc, 0x50, 0xe2, 0x1b, 0x57, 0x7b, 0x8e, 0xe8, 0x8f, 0x0e, 0x0c, 0xdb, 0x1f,
- 0x9a, 0x3d, 0xbf, 0xe7, 0x9b, 0x52, 0xeb, 0x60, 0xf4, 0x40, 0x7e, 0xc9, 0x0f, 0xf9, 0x2b, 0xb2,
- 0xb6, 0xa1, 0x67, 0x9c, 0xdb, 0x3e, 0x67, 0xe6, 0xe1, 0x94, 0xc7, 0x8d, 0x9b, 0xa9, 0xcc, 0x90,
- 0xda, 0x7d, 0xc7, 0x63, 0xfc, 0xa1, 0x19, 0x0c, 0x7a, 0x40, 0x08, 0xcd, 0x21, 0x13, 0x74, 0x96,
- 0x96, 0x39, 0x4f, 0x8b, 0x8f, 0x3c, 0xe1, 0x0c, 0xd9, 0x94, 0xc2, 0x9b, 0xc7, 0x29, 0x84, 0x76,
- 0x9f, 0x0d, 0xe9, 0x94, 0xde, 0x8d, 0x79, 0x7a, 0x23, 0xe1, 0xb8, 0xa6, 0xe3, 0x89, 0x50, 0xf0,
- 0x49, 0x25, 0xfd, 0x4f, 0x0d, 0xad, 0xdd, 0xe9, 0x74, 0xac, 0x96, 0xd7, 0xe3, 0x2c, 0x0c, 0x2d,
- 0x2a, 0xfa, 0x78, 0x13, 0x2d, 0x05, 0x54, 0xf4, 0x6b, 0xda, 0xa6, 0xb6, 0xb5, 0xd2, 0x5c, 0x7d,
- 0x72, 0xd4, 0x58, 0x18, 0x1f, 0x35, 0x96, 0x80, 0x47, 0x24, 0x07, 0xdf, 0x44, 0x15, 0xf8, 0xb7,
- 0xf3, 0x30, 0x60, 0xb5, 0x45, 0x29, 0x55, 0x1b, 0x1f, 0x35, 0x2a, 0x96, 0xa2, 0xbd, 0xc8, 0xfc,
- 0x26, 0x89, 0x24, 0xbe, 0x8f, 0x96, 0x0f, 0xa8, 0x3d, 0x60, 0x5e, 0xb7, 0x56, 0xda, 0xd4, 0xb6,
- 0xaa, 0xdb, 0x57, 0x8d, 0x97, 0xd6, 0xd0, 0x50, 0x41, 0x35, 0x23, 0xa5, 0xe6, 0x9a, 0x8a, 0x64,
- 0x59, 0x11, 0x48, 0x6c, 0x4e, 0x1f, 0xa0, 0x0b, 0x99, 0x47, 0x90, 0x91, 0xcb, 0x3e, 0xa2, 0xee,
- 0x88, 0xe1, 0x36, 0x2a, 0x83, 0xf7, 0xb0, 0xa6, 0x6d, 0x2e, 0x6e, 0x55, 0xb7, 0x8d, 0x63, 0xfc,
- 0x4d, 0x24, 0xa2, 0x79, 0x56, 0x39, 0x2c, 0xc3, 0x57, 0x48, 0x22, 0x5b, 0xfa, 0x23, 0x0d, 0xad,
- 0xb4, 0xac, 0xdb, 0xdd, 0x2e, 0xc8, 0xe1, 0xcf, 0x51, 0x05, 0x2a, 0xdf, 0xa5, 0x82, 0xca, 0x84,
- 0x55, 0xb7, 0xaf, 0x65, 0xbc, 0x24, 0x85, 0x30, 0x82, 0x41, 0x0f, 0x08, 0xa1, 0x01, 0xd2, 0xc6,
- 0xe1, 0x75, 0x63, 0xff, 0xe0, 0x0b, 0x66, 0x8b, 0x7b, 0x4c, 0xd0, 0x26, 0x56, 0x7e, 0x50, 0x4a,
- 0x23, 0x89, 0x55, 0xbc, 0x87, 0x96, 0xc2, 0x80, 0xd9, 0x2a, 0x67, 0x57, 0x8e, 0xcb, 0x59, 0x1c,
- 0x59, 0x3b, 0x60, 0x76, 0x5a, 0x3c, 0xf8, 0x22, 0xd2, 0x8e, 0xfe, 0xbb, 0x86, 0xce, 0x26, 0x52,
- 0xbb, 0x4e, 0x28, 0xf0, 0x67, 0x53, 0x6f, 0x30, 0x8a, 0xbd, 0x01, 0xb4, 0xe5, 0x0b, 0xd6, 0x95,
- 0x9f, 0x4a, 0x4c, 0xc9, 0xc4, 0x7f, 0x0f, 0x95, 0x1d, 0xc1, 0x86, 0x61, 0xad, 0x24, 0x8b, 0xb0,
- 0x55, 0xf4, 0x01, 0x69, 0xfa, 0x5b, 0xa0, 0x4e, 0x22, 0x2b, 0xba, 0x9b, 0x89, 0x1e, 0x5e, 0x85,
- 0x3f, 0x45, 0x2b, 0x01, 0xe5, 0xcc, 0x13, 0x84, 0x3d, 0x98, 0x11, 0xfe, 0x2c, 0x1f, 0x56, 0x2c,
- 0xcf, 0x38, 0xf3, 0x6c, 0xd6, 0x3c, 0x3b, 0x3e, 0x6a, 0xac, 0x24, 0x44, 0x92, 0xda, 0xd3, 0xbf,
- 0x2f, 0xa1, 0x65, 0xd5, 0x12, 0xa7, 0x50, 0xea, 0xdd, 0x5c, 0xa9, 0x2f, 0x15, 0x1b, 0x8f, 0x79,
- 0x85, 0xc6, 0x1d, 0x74, 0x26, 0x14, 0x54, 0x8c, 0x42, 0x39, 0xa3, 0x05, 0x5a, 0x47, 0xd9, 0x93,
- 0x3a, 0xcd, 0x73, 0xca, 0xe2, 0x99, 0xe8, 0x9b, 0x28, 0x5b, 0xfa, 0x77, 0x25, 0x74, 0x2e, 0x3f,
- 0x98, 0xf8, 0x0d, 0x54, 0x0d, 0x19, 0x3f, 0x74, 0x6c, 0xb6, 0x47, 0x87, 0x4c, 0xed, 0x8d, 0xff,
- 0x29, 0xfd, 0x6a, 0x3b, 0x65, 0x91, 0xac, 0x1c, 0xee, 0x25, 0x6a, 0x96, 0xcf, 0x85, 0x7a, 0xf4,
- 0xfc, 0x94, 0xc2, 0x1a, 0x33, 0xa2, 0x35, 0x66, 0xb4, 0x3c, 0xb1, 0xcf, 0xdb, 0x82, 0x3b, 0x5e,
- 0x6f, 0xca, 0x11, 0x18, 0x23, 0x59, 0xcb, 0xf8, 0x63, 0x54, 0xe1, 0x2c, 0xf4, 0x47, 0xdc, 0x66,
- 0x2a, 0x15, 0xb9, 0xcd, 0x03, 0xfb, 0x1e, 0xca, 0x04, 0x4b, 0xaa, 0xbb, 0xeb, 0xdb, 0xd4, 0x8d,
- 0x8a, 0x93, 0xf6, 0xc7, 0x2a, 0xb4, 0x36, 0x51, 0x26, 0x48, 0x62, 0x0c, 0xb6, 0xe7, 0xaa, 0xca,
- 0xc5, 0x8e, 0x4b, 0x4f, 0xa5, 0x45, 0x3e, 0xc8, 0xb5, 0x88, 0x59, 0xac, 0xa4, 0x32, 0xb8, 0xb9,
- 0x0b, 0xe1, 0x0f, 0x0d, 0xad, 0x67, 0x05, 0x4f, 0x61, 0x27, 0x58, 0xf9, 0x9d, 0x70, 0xf9, 0x04,
- 0xcf, 0x98, 0xb3, 0x16, 0xfe, 0xd2, 0x50, 0x23, 0x2b, 0x66, 0x51, 0x4e, 0x87, 0x4c, 0x30, 0x1e,
- 0x26, 0x65, 0xc4, 0x5b, 0xa8, 0x42, 0xad, 0xd6, 0xfb, 0xdc, 0x1f, 0x05, 0xf1, 0x71, 0x83, 0xf8,
- 0x6e, 0x2b, 0x1a, 0x49, 0xb8, 0x70, 0x02, 0x07, 0x8e, 0xba, 0x53, 0x99, 0x13, 0x78, 0xd7, 0xf1,
- 0xba, 0x44, 0x72, 0x40, 0xc2, 0x83, 0x66, 0x5f, 0xcc, 0x4b, 0xc8, 0x2e, 0x97, 0x1c, 0xdc, 0x40,
- 0xe5, 0xd0, 0xf6, 0x03, 0x56, 0x5b, 0x92, 0x22, 0x2b, 0x10, 0x72, 0x1b, 0x08, 0x24, 0xa2, 0xe3,
- 0xcb, 0x68, 0x05, 0x04, 0xc3, 0x80, 0xda, 0xac, 0x56, 0x96, 0x42, 0x72, 0x11, 0xed, 0xc5, 0x44,
- 0x92, 0xf2, 0xf5, 0x5f, 0x26, 0x8a, 0x24, 0x57, 0xdf, 0x36, 0x42, 0xb6, 0xef, 0x09, 0xee, 0xbb,
- 0x2e, 0xe3, 0xea, 0x49, 0x49, 0xfb, 0xec, 0x24, 0x1c, 0x92, 0x91, 0xc2, 0x1e, 0x42, 0x41, 0x92,
- 0x1b, 0xd5, 0x46, 0x6f, 0x9f, 0x20, 0xff, 0x33, 0x12, 0xdb, 0x3c, 0x07, 0xfe, 0x32, 0x8c, 0x8c,
- 0x07, 0xfd, 0x37, 0x0d, 0x55, 0x95, 0xfe, 0x29, 0x34, 0xd6, 0xdd, 0x7c, 0x63, 0xbd, 0x56, 0x10,
- 0x61, 0xcc, 0xee, 0xa9, 0x47, 0x1a, 0xda, 0x88, 0x43, 0xf7, 0x69, 0xb7, 0x49, 0x5d, 0xea, 0xd9,
- 0x8c, 0xc7, 0xf7, 0x60, 0x03, 0x95, 0x9c, 0xb8, 0x91, 0x90, 0x32, 0x50, 0x6a, 0x59, 0xa4, 0xe4,
- 0x04, 0xf8, 0x0a, 0xaa, 0xf4, 0xfd, 0x50, 0xc8, 0x16, 0x89, 0x9a, 0x28, 0x89, 0xfa, 0x8e, 0xa2,
- 0x93, 0x44, 0x02, 0x7f, 0x88, 0xca, 0x81, 0xcf, 0x45, 0x58, 0x5b, 0x92, 0x51, 0x5f, 0x2b, 0x16,
- 0x35, 0xec, 0x36, 0xb5, 0xac, 0x53, 0xa4, 0x02, 0x66, 0x48, 0x64, 0x4d, 0xff, 0x46, 0x43, 0xff,
- 0x9f, 0x11, 0x7f, 0xa4, 0x83, 0xbb, 0x68, 0xd9, 0x89, 0x98, 0x0a, 0x1e, 0xbd, 0x55, 0xcc, 0xed,
- 0x8c, 0x54, 0xa4, 0xd0, 0x2c, 0x86, 0x60, 0xb1, 0x69, 0xfd, 0x27, 0x0d, 0x9d, 0x9f, 0x8a, 0x57,
- 0x42, 0x4c, 0xd8, 0xf9, 0x90, 0xbc, 0x72, 0x06, 0x62, 0xc2, 0xea, 0x96, 0x1c, 0x7c, 0x17, 0x55,
- 0x24, 0x42, 0xb5, 0x7d, 0x57, 0x25, 0xd0, 0x8c, 0x13, 0x68, 0x29, 0xfa, 0x8b, 0xa3, 0xc6, 0x2b,
- 0xd3, 0xb0, 0xdd, 0x88, 0xd9, 0x24, 0x31, 0x00, 0xa3, 0xc8, 0x38, 0xf7, 0xb9, 0x9a, 0x56, 0x39,
- 0x8a, 0xef, 0x01, 0x81, 0x44, 0x74, 0xfd, 0xe7, 0xb4, 0x49, 0x01, 0x3d, 0x42, 0x7c, 0x50, 0x9c,
- 0x49, 0x08, 0x0c, 0xa5, 0x23, 0x92, 0x83, 0x47, 0x68, 0xdd, 0x99, 0x80, 0x9b, 0x27, 0xdb, 0xc9,
- 0x89, 0x5a, 0xb3, 0xa6, 0xcc, 0xaf, 0x4f, 0x72, 0xc8, 0x94, 0x0b, 0x9d, 0xa1, 0x29, 0x29, 0x38,
- 0x09, 0x7d, 0x21, 0x02, 0x35, 0x4d, 0x37, 0x8a, 0x83, 0xdc, 0x34, 0x84, 0x8a, 0x7c, 0x5d, 0xa7,
- 0x63, 0x11, 0x69, 0x4a, 0x7f, 0x5c, 0x4a, 0xf2, 0x21, 0x17, 0xcd, 0x3b, 0xc9, 0x6b, 0xe5, 0x0e,
- 0x90, 0x67, 0x3e, 0x5a, 0x6b, 0x17, 0x32, 0x81, 0x27, 0x3c, 0x32, 0x25, 0x8d, 0x3b, 0x29, 0xf8,
- 0xd7, 0xfe, 0x0b, 0xf8, 0xaf, 0xce, 0x02, 0xfe, 0xf8, 0x0e, 0x5a, 0x14, 0x6e, 0x3c, 0xec, 0xaf,
- 0x17, 0xb3, 0xd8, 0xd9, 0x6d, 0x37, 0xab, 0x2a, 0xe5, 0x8b, 0x9d, 0xdd, 0x36, 0x01, 0x13, 0x78,
- 0x1f, 0x95, 0xf9, 0xc8, 0x65, 0x80, 0x95, 0x16, 0x8b, 0x63, 0x2f, 0xc8, 0x60, 0x3a, 0x7c, 0xf0,
- 0x15, 0x92, 0xc8, 0x8e, 0xfe, 0x2d, 0xc0, 0xec, 0x2c, 0xa2, 0xc2, 0x1c, 0xad, 0xba, 0x99, 0xd9,
- 0x51, 0x79, 0xb8, 0x75, 0xf2, 0xa9, 0x53, 0x43, 0x7f, 0x41, 0xf9, 0x5d, 0xcd, 0xf2, 0x48, 0xce,
- 0x87, 0x4e, 0x11, 0x4a, 0x9f, 0x0d, 0x73, 0x00, 0xcd, 0x1b, 0x0d, 0xbc, 0x9a, 0x03, 0xe8, 0xe9,
- 0x90, 0x44, 0x74, 0x38, 0x28, 0x21, 0xb3, 0x39, 0x13, 0x7b, 0xe9, 0xe2, 0x4a, 0x0e, 0x4a, 0x3b,
- 0xe1, 0x90, 0x8c, 0x94, 0xfe, 0xab, 0x86, 0xd6, 0x26, 0x00, 0x35, 0x7e, 0x15, 0x95, 0x7b, 0x99,
- 0x33, 0x9b, 0x64, 0x28, 0xba, 0xb3, 0x11, 0x0f, 0x76, 0x64, 0x02, 0xcb, 0x26, 0x76, 0xe4, 0x34,
- 0xd6, 0xc2, 0x66, 0xf6, 0x5a, 0x46, 0x73, 0x7c, 0x5e, 0x89, 0xcf, 0xbc, 0x98, 0xc9, 0x85, 0x5e,
- 0x9a, 0x77, 0xa1, 0xf5, 0x1f, 0x4b, 0x28, 0x06, 0x8d, 0x3b, 0xad, 0x77, 0xc9, 0x29, 0xa0, 0x37,
- 0x2b, 0x87, 0xde, 0x8e, 0xfb, 0x6f, 0x4a, 0x26, 0xb6, 0xb9, 0x20, 0xff, 0xfe, 0x04, 0xc8, 0xbf,
- 0x76, 0x02, 0x9b, 0x2f, 0x07, 0xfa, 0x8f, 0x35, 0xb4, 0x96, 0x91, 0x3e, 0x85, 0xe3, 0xbd, 0x9f,
- 0x3f, 0xde, 0x97, 0x8a, 0x3f, 0x65, 0xce, 0x01, 0xdf, 0xce, 0xbd, 0x40, 0x6e, 0xb2, 0x06, 0x2a,
- 0xdb, 0x4e, 0x97, 0xe7, 0x46, 0x00, 0x98, 0x21, 0x89, 0xe8, 0xfa, 0x57, 0xe8, 0xfc, 0x54, 0x8e,
- 0xb0, 0x2d, 0x81, 0x56, 0xd7, 0x11, 0x8e, 0xef, 0xc5, 0xe7, 0xd2, 0x2c, 0xf6, 0xf2, 0x9d, 0x58,
- 0x2f, 0x87, 0xcc, 0x94, 0x29, 0x92, 0x31, 0xdb, 0xdc, 0x79, 0xf2, 0xbc, 0xbe, 0xf0, 0xf4, 0x79,
- 0x7d, 0xe1, 0xd9, 0xf3, 0xfa, 0xc2, 0xd7, 0xe3, 0xba, 0xf6, 0x64, 0x5c, 0xd7, 0x9e, 0x8e, 0xeb,
- 0xda, 0xb3, 0x71, 0x5d, 0xfb, 0x7b, 0x5c, 0xd7, 0x7e, 0xf8, 0xa7, 0xbe, 0xf0, 0xc9, 0xc5, 0x97,
- 0xfe, 0x99, 0xec, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc2, 0xa4, 0xff, 0x46, 0x13, 0x00,
- 0x00,
-}
+func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/networking/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/networking/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..abd29cba
--- /dev/null
+++ b/operator/vendor/k8s.io/api/networking/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,72 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*HTTPIngressPath) ProtoMessage() {}
+
+func (*HTTPIngressRuleValue) ProtoMessage() {}
+
+func (*IPAddress) ProtoMessage() {}
+
+func (*IPAddressList) ProtoMessage() {}
+
+func (*IPAddressSpec) ProtoMessage() {}
+
+func (*Ingress) ProtoMessage() {}
+
+func (*IngressBackend) ProtoMessage() {}
+
+func (*IngressClass) ProtoMessage() {}
+
+func (*IngressClassList) ProtoMessage() {}
+
+func (*IngressClassParametersReference) ProtoMessage() {}
+
+func (*IngressClassSpec) ProtoMessage() {}
+
+func (*IngressList) ProtoMessage() {}
+
+func (*IngressLoadBalancerIngress) ProtoMessage() {}
+
+func (*IngressLoadBalancerStatus) ProtoMessage() {}
+
+func (*IngressPortStatus) ProtoMessage() {}
+
+func (*IngressRule) ProtoMessage() {}
+
+func (*IngressRuleValue) ProtoMessage() {}
+
+func (*IngressSpec) ProtoMessage() {}
+
+func (*IngressStatus) ProtoMessage() {}
+
+func (*IngressTLS) ProtoMessage() {}
+
+func (*ParentReference) ProtoMessage() {}
+
+func (*ServiceCIDR) ProtoMessage() {}
+
+func (*ServiceCIDRList) ProtoMessage() {}
+
+func (*ServiceCIDRSpec) ProtoMessage() {}
+
+func (*ServiceCIDRStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/networking/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/networking/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..08bdb27a
--- /dev/null
+++ b/operator/vendor/k8s.io/api/networking/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,147 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPIngressPath) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.HTTPIngressPath"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in HTTPIngressRuleValue) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.HTTPIngressRuleValue"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPAddress) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IPAddress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPAddressList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IPAddressList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IPAddressSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IPAddressSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Ingress) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.Ingress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressBackend) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressBackend"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClass) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClassList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClassParametersReference) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressClassParametersReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressClassSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressClassSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressLoadBalancerIngress) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressLoadBalancerIngress"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressLoadBalancerStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressLoadBalancerStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressPortStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressPortStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressRule) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressRuleValue) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressRuleValue"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IngressTLS) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.IngressTLS"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ParentReference) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.ParentReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDR) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.ServiceCIDR"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDRList) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.ServiceCIDRList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDRSpec) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.ServiceCIDRSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServiceCIDRStatus) OpenAPIModelName() string {
+ return "io.k8s.api.networking.v1beta1.ServiceCIDRStatus"
+}
diff --git a/operator/vendor/k8s.io/api/node/v1/doc.go b/operator/vendor/k8s.io/api/node/v1/doc.go
index 3239af70..3f8bfbc0 100644
--- a/operator/vendor/k8s.io/api/node/v1/doc.go
+++ b/operator/vendor/k8s.io/api/node/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.node.v1
+
// +groupName=node.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/node/v1/generated.pb.go b/operator/vendor/k8s.io/api/node/v1/generated.pb.go
index 4c304f55..6fcb8ad0 100644
--- a/operator/vendor/k8s.io/api/node/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/node/v1/generated.pb.go
@@ -23,200 +23,25 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource"
resource "k8s.io/apimachinery/pkg/api/resource"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Overhead) Reset() { *m = Overhead{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *RuntimeClass) Reset() { *m = RuntimeClass{} }
-func (m *Overhead) Reset() { *m = Overhead{} }
-func (*Overhead) ProtoMessage() {}
-func (*Overhead) Descriptor() ([]byte, []int) {
- return fileDescriptor_9007436710e7565b, []int{0}
-}
-func (m *Overhead) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Overhead) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Overhead.Merge(m, src)
-}
-func (m *Overhead) XXX_Size() int {
- return m.Size()
-}
-func (m *Overhead) XXX_DiscardUnknown() {
- xxx_messageInfo_Overhead.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Overhead proto.InternalMessageInfo
-
-func (m *RuntimeClass) Reset() { *m = RuntimeClass{} }
-func (*RuntimeClass) ProtoMessage() {}
-func (*RuntimeClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_9007436710e7565b, []int{1}
-}
-func (m *RuntimeClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClass.Merge(m, src)
-}
-func (m *RuntimeClass) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClass) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo
-
-func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} }
-func (*RuntimeClassList) ProtoMessage() {}
-func (*RuntimeClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_9007436710e7565b, []int{2}
-}
-func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClassList.Merge(m, src)
-}
-func (m *RuntimeClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo
-
-func (m *Scheduling) Reset() { *m = Scheduling{} }
-func (*Scheduling) ProtoMessage() {}
-func (*Scheduling) Descriptor() ([]byte, []int) {
- return fileDescriptor_9007436710e7565b, []int{3}
-}
-func (m *Scheduling) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scheduling) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scheduling.Merge(m, src)
-}
-func (m *Scheduling) XXX_Size() int {
- return m.Size()
-}
-func (m *Scheduling) XXX_DiscardUnknown() {
- xxx_messageInfo_Scheduling.DiscardUnknown(m)
-}
+func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} }
-var xxx_messageInfo_Scheduling proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1.Overhead")
- proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1.Overhead.PodFixedEntry")
- proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1.RuntimeClass")
- proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1.RuntimeClassList")
- proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1.Scheduling")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1.Scheduling.NodeSelectorEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/node/v1/generated.proto", fileDescriptor_9007436710e7565b)
-}
-
-var fileDescriptor_9007436710e7565b = []byte{
- // 643 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
- 0x10, 0xcd, 0xa6, 0xbf, 0xaa, 0xe9, 0x26, 0xfd, 0x51, 0x96, 0x1e, 0xa2, 0x08, 0x39, 0x51, 0x4e,
- 0x05, 0xa9, 0xeb, 0xb6, 0x42, 0xa8, 0xe2, 0x82, 0x64, 0x68, 0x05, 0x12, 0x14, 0x70, 0xe1, 0x82,
- 0x38, 0xb0, 0xb5, 0x17, 0x67, 0x9b, 0xd8, 0x1b, 0xd9, 0xeb, 0x88, 0xdc, 0x10, 0x17, 0x24, 0x4e,
- 0xfd, 0x2e, 0x1c, 0xf8, 0x0a, 0x15, 0xa7, 0x1e, 0x7b, 0x6a, 0xa9, 0xf9, 0x16, 0x9c, 0xd0, 0xae,
- 0xff, 0x64, 0x83, 0x43, 0x28, 0x37, 0xef, 0xec, 0x7b, 0x6f, 0x66, 0xde, 0xec, 0x18, 0x76, 0xfb,
- 0x3b, 0x11, 0x66, 0xdc, 0x24, 0x43, 0x66, 0x06, 0xdc, 0xa5, 0xe6, 0x68, 0xcb, 0xf4, 0x68, 0x40,
- 0x43, 0x22, 0xa8, 0x8b, 0x87, 0x21, 0x17, 0x1c, 0xa1, 0x14, 0x83, 0xc9, 0x90, 0x61, 0x89, 0xc1,
- 0xa3, 0xad, 0xd6, 0x86, 0xc7, 0x44, 0x2f, 0x3e, 0xc4, 0x0e, 0xf7, 0x4d, 0x8f, 0x7b, 0xdc, 0x54,
- 0xd0, 0xc3, 0xf8, 0x9d, 0x3a, 0xa9, 0x83, 0xfa, 0x4a, 0x25, 0x5a, 0x7a, 0x1a, 0x87, 0x87, 0xb3,
- 0xd2, 0xb4, 0xee, 0x4c, 0x30, 0x3e, 0x71, 0x7a, 0x2c, 0xa0, 0xe1, 0xd8, 0x1c, 0xf6, 0x3d, 0x45,
- 0x0a, 0x69, 0xc4, 0xe3, 0xd0, 0xa1, 0xff, 0xc4, 0x8a, 0x4c, 0x9f, 0x0a, 0x32, 0x2b, 0x97, 0xf9,
- 0x27, 0x56, 0x18, 0x07, 0x82, 0xf9, 0xe5, 0x34, 0x77, 0xff, 0x46, 0x88, 0x9c, 0x1e, 0xf5, 0xc9,
- 0xef, 0xbc, 0xee, 0xb7, 0x2a, 0xac, 0x3d, 0x1b, 0xd1, 0xb0, 0x47, 0x89, 0x8b, 0x4e, 0x01, 0xac,
- 0x0d, 0xb9, 0xbb, 0xc7, 0xde, 0x53, 0xb7, 0x09, 0x3a, 0x0b, 0xeb, 0xf5, 0xed, 0xdb, 0xb8, 0x6c,
- 0x2e, 0xce, 0x09, 0xf8, 0x79, 0x06, 0xde, 0x0d, 0x44, 0x38, 0xb6, 0x3e, 0x81, 0x93, 0xf3, 0x76,
- 0x25, 0x39, 0x6f, 0xd7, 0xf2, 0xf8, 0xcf, 0xf3, 0x76, 0xbb, 0xec, 0x2c, 0xb6, 0x33, 0xb3, 0x9e,
- 0xb0, 0x48, 0x7c, 0xbc, 0x98, 0x0b, 0xd9, 0x27, 0x3e, 0xfd, 0x7c, 0xd1, 0xde, 0xb8, 0x8a, 0xf7,
- 0xf8, 0x45, 0x4c, 0x02, 0xc1, 0xc4, 0xd8, 0x2e, 0xba, 0x68, 0xf5, 0xe1, 0xca, 0x54, 0x91, 0x68,
- 0x15, 0x2e, 0xf4, 0xe9, 0xb8, 0x09, 0x3a, 0x60, 0x7d, 0xd9, 0x96, 0x9f, 0xe8, 0x21, 0x5c, 0x1c,
- 0x91, 0x41, 0x4c, 0x9b, 0xd5, 0x0e, 0x58, 0xaf, 0x6f, 0x63, 0xad, 0xe3, 0x22, 0x17, 0x1e, 0xf6,
- 0x3d, 0x65, 0x41, 0x39, 0x57, 0x4a, 0xbe, 0x57, 0xdd, 0x01, 0xdd, 0x2f, 0x55, 0xd8, 0xb0, 0x53,
- 0xbf, 0x1f, 0x0c, 0x48, 0x14, 0xa1, 0xb7, 0xb0, 0x26, 0x27, 0xec, 0x12, 0x41, 0x54, 0xc6, 0xfa,
- 0xf6, 0xe6, 0x3c, 0xf5, 0x08, 0x4b, 0xb4, 0x72, 0xf8, 0xf0, 0x88, 0x3a, 0xe2, 0x29, 0x15, 0xc4,
- 0x42, 0x99, 0xa9, 0x70, 0x12, 0xb3, 0x0b, 0x55, 0x74, 0x0b, 0x2e, 0xf5, 0x48, 0xe0, 0x0e, 0x68,
- 0xa8, 0xca, 0x5f, 0xb6, 0xae, 0x65, 0xf0, 0xa5, 0x47, 0x69, 0xd8, 0xce, 0xef, 0xd1, 0x1e, 0xac,
- 0xf1, 0x6c, 0x70, 0xcd, 0x05, 0x55, 0xcc, 0xcd, 0x79, 0xc3, 0xb5, 0x1a, 0x72, 0x92, 0xf9, 0xc9,
- 0x2e, 0xb8, 0x68, 0x1f, 0x42, 0xf9, 0x98, 0xdc, 0x78, 0xc0, 0x02, 0xaf, 0xf9, 0x9f, 0x52, 0x32,
- 0x66, 0x29, 0x1d, 0x14, 0x28, 0xeb, 0x7f, 0xd9, 0xc0, 0xe4, 0x6c, 0x6b, 0x0a, 0xdd, 0xaf, 0x00,
- 0xae, 0xea, 0xae, 0xc9, 0x57, 0x81, 0xde, 0x94, 0x9c, 0xc3, 0x57, 0x73, 0x4e, 0xb2, 0x95, 0x6f,
- 0xab, 0xf9, 0x63, 0xcc, 0x23, 0x9a, 0x6b, 0xbb, 0x70, 0x91, 0x09, 0xea, 0x47, 0xcd, 0xaa, 0x7a,
- 0xe4, 0x9d, 0x59, 0xd5, 0xeb, 0x25, 0x59, 0x2b, 0x99, 0xd8, 0xe2, 0x63, 0x49, 0xb3, 0x53, 0x76,
- 0xf7, 0xb8, 0x0a, 0xb5, 0xa6, 0xd0, 0x11, 0x6c, 0x48, 0xf2, 0x01, 0x1d, 0x50, 0x47, 0xf0, 0x30,
- 0xdb, 0xa0, 0xcd, 0xf9, 0xd6, 0xe0, 0x7d, 0x8d, 0x92, 0xee, 0xd1, 0x5a, 0x96, 0xac, 0xa1, 0x5f,
- 0xd9, 0x53, 0xda, 0xe8, 0x15, 0xac, 0x0b, 0x3e, 0x90, 0xab, 0xcc, 0x78, 0x90, 0xf7, 0x31, 0x35,
- 0x05, 0xb9, 0x49, 0x32, 0xd5, 0xcb, 0x02, 0x66, 0xdd, 0xc8, 0x84, 0xeb, 0x93, 0x58, 0x64, 0xeb,
- 0x3a, 0xad, 0xfb, 0xf0, 0x7a, 0xa9, 0x9e, 0x19, 0x2b, 0xb3, 0xa6, 0xaf, 0xcc, 0xb2, 0xb6, 0x02,
- 0xd6, 0xce, 0xc9, 0xa5, 0x51, 0x39, 0xbd, 0x34, 0x2a, 0x67, 0x97, 0x46, 0xe5, 0x43, 0x62, 0x80,
- 0x93, 0xc4, 0x00, 0xa7, 0x89, 0x01, 0xce, 0x12, 0x03, 0x7c, 0x4f, 0x0c, 0x70, 0xfc, 0xc3, 0xa8,
- 0xbc, 0x46, 0xe5, 0xbf, 0xfa, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd3, 0x3f, 0x9c, 0xd0, 0xea,
- 0x05, 0x00, 0x00,
-}
+func (m *Scheduling) Reset() { *m = Scheduling{} }
func (m *Overhead) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -243,7 +68,7 @@ func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.PodFixed {
keysForPodFixed = append(keysForPodFixed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed)
+ sort.Strings(keysForPodFixed)
for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- {
v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])]
baseI := i
@@ -418,7 +243,7 @@ func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.NodeSelector[string(keysForNodeSelector[iNdEx])]
baseI := i
@@ -544,7 +369,7 @@ func (this *Overhead) String() string {
for k := range this.PodFixed {
keysForPodFixed = append(keysForPodFixed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed)
+ sort.Strings(keysForPodFixed)
mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{"
for _, k := range keysForPodFixed {
mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)])
@@ -598,7 +423,7 @@ func (this *Scheduling) String() string {
for k := range this.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
mapStringForNodeSelector := "map[string]string{"
for _, k := range keysForNodeSelector {
mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k])
diff --git a/operator/vendor/k8s.io/api/node/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/node/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..97072dfa
--- /dev/null
+++ b/operator/vendor/k8s.io/api/node/v1/generated.protomessage.pb.go
@@ -0,0 +1,30 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*Overhead) ProtoMessage() {}
+
+func (*RuntimeClass) ProtoMessage() {}
+
+func (*RuntimeClassList) ProtoMessage() {}
+
+func (*Scheduling) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/node/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/node/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..68ca99f7
--- /dev/null
+++ b/operator/vendor/k8s.io/api/node/v1/zz_generated.model_name.go
@@ -0,0 +1,42 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Overhead) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1.Overhead"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClass) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1.RuntimeClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClassList) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1.RuntimeClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scheduling) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1.Scheduling"
+}
diff --git a/operator/vendor/k8s.io/api/node/v1alpha1/doc.go b/operator/vendor/k8s.io/api/node/v1alpha1/doc.go
index 2f3d46ac..97745728 100644
--- a/operator/vendor/k8s.io/api/node/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/node/v1alpha1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.api.node.v1alpha1
// +groupName=node.k8s.io
diff --git a/operator/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/node/v1alpha1/generated.pb.go
index 16ac6964..f25af72e 100644
--- a/operator/vendor/k8s.io/api/node/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/node/v1alpha1/generated.pb.go
@@ -23,231 +23,27 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource"
resource "k8s.io/apimachinery/pkg/api/resource"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Overhead) Reset() { *m = Overhead{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *RuntimeClass) Reset() { *m = RuntimeClass{} }
-func (m *Overhead) Reset() { *m = Overhead{} }
-func (*Overhead) ProtoMessage() {}
-func (*Overhead) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8fee97bf5273e47, []int{0}
-}
-func (m *Overhead) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Overhead) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Overhead.Merge(m, src)
-}
-func (m *Overhead) XXX_Size() int {
- return m.Size()
-}
-func (m *Overhead) XXX_DiscardUnknown() {
- xxx_messageInfo_Overhead.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Overhead proto.InternalMessageInfo
-
-func (m *RuntimeClass) Reset() { *m = RuntimeClass{} }
-func (*RuntimeClass) ProtoMessage() {}
-func (*RuntimeClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8fee97bf5273e47, []int{1}
-}
-func (m *RuntimeClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClass.Merge(m, src)
-}
-func (m *RuntimeClass) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClass) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo
-
-func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} }
-func (*RuntimeClassList) ProtoMessage() {}
-func (*RuntimeClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8fee97bf5273e47, []int{2}
-}
-func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClassList.Merge(m, src)
-}
-func (m *RuntimeClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClassList.DiscardUnknown(m)
-}
+func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} }
-var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo
+func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} }
-func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} }
-func (*RuntimeClassSpec) ProtoMessage() {}
-func (*RuntimeClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8fee97bf5273e47, []int{3}
-}
-func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClassSpec.Merge(m, src)
-}
-func (m *RuntimeClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo
-
-func (m *Scheduling) Reset() { *m = Scheduling{} }
-func (*Scheduling) ProtoMessage() {}
-func (*Scheduling) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8fee97bf5273e47, []int{4}
-}
-func (m *Scheduling) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scheduling) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scheduling.Merge(m, src)
-}
-func (m *Scheduling) XXX_Size() int {
- return m.Size()
-}
-func (m *Scheduling) XXX_DiscardUnknown() {
- xxx_messageInfo_Scheduling.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Scheduling proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead")
- proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry")
- proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass")
- proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList")
- proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec")
- proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_a8fee97bf5273e47)
-}
-
-var fileDescriptor_a8fee97bf5273e47 = []byte{
- // 683 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x3d, 0x6f, 0xd3, 0x4c,
- 0x1c, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x8f, 0x9f, 0x0a, 0x45, 0x19, 0x9c, 0xca, 0x42,
- 0x28, 0x42, 0xea, 0x99, 0x56, 0xa8, 0xaa, 0x18, 0x8a, 0x64, 0x5e, 0x04, 0xa2, 0xb4, 0x70, 0x2d,
- 0x0b, 0x62, 0xe0, 0x6a, 0x1f, 0x8e, 0x89, 0xed, 0xb3, 0xec, 0x73, 0x44, 0x36, 0xc4, 0x82, 0xc4,
- 0xc4, 0xc4, 0xb7, 0x81, 0xb9, 0x63, 0x27, 0xd4, 0xa9, 0xa5, 0xe1, 0x3b, 0x30, 0x30, 0xa1, 0xb3,
- 0xcf, 0xc9, 0x25, 0x69, 0x68, 0xd8, 0x7c, 0x77, 0xbf, 0x97, 0xff, 0x6b, 0x02, 0x5b, 0x9d, 0xed,
- 0x04, 0x79, 0xcc, 0x24, 0x91, 0x67, 0x86, 0xcc, 0xa1, 0x66, 0x77, 0x83, 0xf8, 0x51, 0x9b, 0x6c,
- 0x98, 0x2e, 0x0d, 0x69, 0x4c, 0x38, 0x75, 0x50, 0x14, 0x33, 0xce, 0xb4, 0x7a, 0x8e, 0x44, 0x24,
- 0xf2, 0x90, 0x40, 0xa2, 0x02, 0xd9, 0x58, 0x77, 0x3d, 0xde, 0x4e, 0x8f, 0x90, 0xcd, 0x02, 0xd3,
- 0x65, 0x2e, 0x33, 0x33, 0xc2, 0x51, 0xfa, 0x26, 0x3b, 0x65, 0x87, 0xec, 0x2b, 0x17, 0x6a, 0x18,
- 0x8a, 0xa5, 0xcd, 0x62, 0x61, 0x39, 0x6e, 0xd6, 0xb8, 0x3d, 0xc4, 0x04, 0xc4, 0x6e, 0x7b, 0x21,
- 0x8d, 0x7b, 0x66, 0xd4, 0x71, 0x33, 0x52, 0x4c, 0x13, 0x96, 0xc6, 0x36, 0xfd, 0x27, 0x56, 0x62,
- 0x06, 0x94, 0x93, 0xcb, 0xbc, 0xcc, 0x69, 0xac, 0x38, 0x0d, 0xb9, 0x17, 0x4c, 0xda, 0x6c, 0x5d,
- 0x45, 0x48, 0xec, 0x36, 0x0d, 0xc8, 0x38, 0xcf, 0x38, 0x29, 0xc3, 0xca, 0x7e, 0x97, 0xc6, 0x6d,
- 0x4a, 0x1c, 0xed, 0x3b, 0x80, 0x95, 0x88, 0x39, 0x0f, 0xbd, 0x77, 0xd4, 0xa9, 0x83, 0xb5, 0xb9,
- 0x56, 0x75, 0xf3, 0x16, 0x9a, 0x56, 0x62, 0x54, 0xd0, 0xd0, 0x33, 0x49, 0x79, 0x10, 0xf2, 0xb8,
- 0x67, 0x7d, 0x04, 0xc7, 0x67, 0xcd, 0x52, 0xff, 0xac, 0x59, 0x29, 0xee, 0x7f, 0x9f, 0x35, 0x9b,
- 0x93, 0xf5, 0x45, 0x58, 0x96, 0x6c, 0xd7, 0x4b, 0xf8, 0x87, 0xf3, 0xbf, 0x42, 0xf6, 0x48, 0x40,
- 0x3f, 0x9d, 0x37, 0xd7, 0x67, 0xe9, 0x00, 0x7a, 0x9e, 0x92, 0x90, 0x7b, 0xbc, 0x87, 0x07, 0xb9,
- 0x34, 0x3a, 0x70, 0x69, 0x24, 0x48, 0x6d, 0x05, 0xce, 0x75, 0x68, 0xaf, 0x0e, 0xd6, 0x40, 0x6b,
- 0x11, 0x8b, 0x4f, 0xed, 0x3e, 0x5c, 0xe8, 0x12, 0x3f, 0xa5, 0xf5, 0xf2, 0x1a, 0x68, 0x55, 0x37,
- 0x91, 0x92, 0xf7, 0xc0, 0x0b, 0x45, 0x1d, 0x37, 0x2b, 0xc4, 0xa4, 0x57, 0x4e, 0xbe, 0x53, 0xde,
- 0x06, 0xc6, 0x37, 0x00, 0x6b, 0x38, 0xaf, 0xfa, 0x3d, 0x9f, 0x24, 0x89, 0xf6, 0x1a, 0x56, 0x44,
- 0x9f, 0x1d, 0xc2, 0x49, 0xe6, 0x38, 0x5a, 0xd5, 0x09, 0xf5, 0x04, 0x09, 0x34, 0xea, 0x6e, 0xa0,
- 0xfd, 0xa3, 0xb7, 0xd4, 0xe6, 0x4f, 0x29, 0x27, 0x96, 0x26, 0x8b, 0x0a, 0x87, 0x77, 0x78, 0xa0,
- 0xaa, 0xed, 0xc2, 0xf9, 0x24, 0xa2, 0xb6, 0x8c, 0xfd, 0xe6, 0xf4, 0x9e, 0xa9, 0x71, 0x1d, 0x44,
- 0xd4, 0xb6, 0x6a, 0x52, 0x77, 0x5e, 0x9c, 0x70, 0xa6, 0x62, 0x7c, 0x05, 0x70, 0x45, 0x05, 0x8a,
- 0x06, 0x69, 0xaf, 0x26, 0x92, 0x40, 0xb3, 0x25, 0x21, 0xd8, 0x59, 0x0a, 0x2b, 0xc5, 0x5c, 0x14,
- 0x37, 0x4a, 0x02, 0x4f, 0xe0, 0x82, 0xc7, 0x69, 0x90, 0xd4, 0xcb, 0xd9, 0xd4, 0xdd, 0x98, 0x2d,
- 0x03, 0x6b, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x9c, 0x6b, 0x18, 0xbf, 0xc6, 0xe2, 0x17, 0xa9,
- 0x69, 0x3b, 0x70, 0x59, 0xae, 0xc2, 0x23, 0x12, 0x3a, 0x3e, 0x8d, 0xf3, 0xe6, 0x5b, 0xd7, 0xa4,
- 0xc4, 0x32, 0x1e, 0x79, 0xc5, 0x63, 0x68, 0x6d, 0x17, 0x56, 0x98, 0x1c, 0x78, 0x59, 0x66, 0xe3,
- 0xea, 0xd5, 0xb0, 0x6a, 0x22, 0xdf, 0xe2, 0x84, 0x07, 0x0a, 0xda, 0x21, 0x84, 0x62, 0x21, 0x9d,
- 0xd4, 0xf7, 0x42, 0xb7, 0x3e, 0x97, 0xe9, 0x5d, 0x9f, 0xae, 0x77, 0x30, 0xc0, 0x5a, 0xcb, 0x62,
- 0x08, 0x86, 0x67, 0xac, 0xe8, 0x18, 0x5f, 0xca, 0x50, 0x79, 0xd2, 0x22, 0x58, 0x13, 0x32, 0x07,
- 0xd4, 0xa7, 0x36, 0x67, 0xb1, 0xdc, 0xe8, 0xad, 0x59, 0x6c, 0xd0, 0x9e, 0x42, 0xcc, 0xf7, 0x7a,
- 0x55, 0x16, 0xaa, 0xa6, 0x3e, 0xe1, 0x11, 0x07, 0xed, 0x05, 0xac, 0x72, 0xe6, 0x8b, 0x1f, 0x18,
- 0x8f, 0x85, 0x45, 0x33, 0x75, 0xd5, 0x50, 0x6c, 0xb6, 0x98, 0x8a, 0xc3, 0x01, 0xcc, 0xfa, 0x5f,
- 0x0a, 0x57, 0x87, 0x77, 0x09, 0x56, 0x75, 0x1a, 0x77, 0xe1, 0x7f, 0x13, 0xf1, 0x5c, 0xb2, 0xc2,
- 0xab, 0xea, 0x0a, 0x2f, 0x2a, 0x2b, 0x69, 0xed, 0x1c, 0x5f, 0xe8, 0xa5, 0x93, 0x0b, 0xbd, 0x74,
- 0x7a, 0xa1, 0x97, 0xde, 0xf7, 0x75, 0x70, 0xdc, 0xd7, 0xc1, 0x49, 0x5f, 0x07, 0xa7, 0x7d, 0x1d,
- 0xfc, 0xe8, 0xeb, 0xe0, 0xf3, 0x4f, 0xbd, 0xf4, 0xb2, 0x3e, 0xed, 0x7f, 0xe7, 0x4f, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xa7, 0x9b, 0x7f, 0x45, 0x92, 0x06, 0x00, 0x00,
-}
+func (m *Scheduling) Reset() { *m = Scheduling{} }
func (m *Overhead) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -274,7 +70,7 @@ func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.PodFixed {
keysForPodFixed = append(keysForPodFixed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed)
+ sort.Strings(keysForPodFixed)
for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- {
v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])]
baseI := i
@@ -482,7 +278,7 @@ func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.NodeSelector[string(keysForNodeSelector[iNdEx])]
baseI := i
@@ -619,7 +415,7 @@ func (this *Overhead) String() string {
for k := range this.PodFixed {
keysForPodFixed = append(keysForPodFixed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed)
+ sort.Strings(keysForPodFixed)
mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{"
for _, k := range keysForPodFixed {
mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)])
@@ -683,7 +479,7 @@ func (this *Scheduling) String() string {
for k := range this.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
mapStringForNodeSelector := "map[string]string{"
for _, k := range keysForNodeSelector {
mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k])
diff --git a/operator/vendor/k8s.io/api/node/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/node/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..acd3b604
--- /dev/null
+++ b/operator/vendor/k8s.io/api/node/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,32 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*Overhead) ProtoMessage() {}
+
+func (*RuntimeClass) ProtoMessage() {}
+
+func (*RuntimeClassList) ProtoMessage() {}
+
+func (*RuntimeClassSpec) ProtoMessage() {}
+
+func (*Scheduling) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/node/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/node/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..444825de
--- /dev/null
+++ b/operator/vendor/k8s.io/api/node/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Overhead) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1alpha1.Overhead"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClass) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1alpha1.RuntimeClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClassList) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1alpha1.RuntimeClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClassSpec) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1alpha1.RuntimeClassSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scheduling) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1alpha1.Scheduling"
+}
diff --git a/operator/vendor/k8s.io/api/node/v1beta1/doc.go b/operator/vendor/k8s.io/api/node/v1beta1/doc.go
index 7b47c8df..2cf0f4a1 100644
--- a/operator/vendor/k8s.io/api/node/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/node/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.node.v1beta1
// +groupName=node.k8s.io
diff --git a/operator/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/node/v1beta1/generated.pb.go
index 537961c2..c97b8fc0 100644
--- a/operator/vendor/k8s.io/api/node/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/node/v1beta1/generated.pb.go
@@ -23,200 +23,25 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource"
resource "k8s.io/apimachinery/pkg/api/resource"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Overhead) Reset() { *m = Overhead{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *RuntimeClass) Reset() { *m = RuntimeClass{} }
-func (m *Overhead) Reset() { *m = Overhead{} }
-func (*Overhead) ProtoMessage() {}
-func (*Overhead) Descriptor() ([]byte, []int) {
- return fileDescriptor_73bb62abe8438af4, []int{0}
-}
-func (m *Overhead) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Overhead) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Overhead.Merge(m, src)
-}
-func (m *Overhead) XXX_Size() int {
- return m.Size()
-}
-func (m *Overhead) XXX_DiscardUnknown() {
- xxx_messageInfo_Overhead.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Overhead proto.InternalMessageInfo
-
-func (m *RuntimeClass) Reset() { *m = RuntimeClass{} }
-func (*RuntimeClass) ProtoMessage() {}
-func (*RuntimeClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_73bb62abe8438af4, []int{1}
-}
-func (m *RuntimeClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClass.Merge(m, src)
-}
-func (m *RuntimeClass) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClass) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo
-
-func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} }
-func (*RuntimeClassList) ProtoMessage() {}
-func (*RuntimeClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73bb62abe8438af4, []int{2}
-}
-func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RuntimeClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RuntimeClassList.Merge(m, src)
-}
-func (m *RuntimeClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *RuntimeClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_RuntimeClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo
-
-func (m *Scheduling) Reset() { *m = Scheduling{} }
-func (*Scheduling) ProtoMessage() {}
-func (*Scheduling) Descriptor() ([]byte, []int) {
- return fileDescriptor_73bb62abe8438af4, []int{3}
-}
-func (m *Scheduling) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Scheduling) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Scheduling.Merge(m, src)
-}
-func (m *Scheduling) XXX_Size() int {
- return m.Size()
-}
-func (m *Scheduling) XXX_DiscardUnknown() {
- xxx_messageInfo_Scheduling.DiscardUnknown(m)
-}
+func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} }
-var xxx_messageInfo_Scheduling proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead")
- proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry")
- proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass")
- proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList")
- proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_73bb62abe8438af4)
-}
-
-var fileDescriptor_73bb62abe8438af4 = []byte{
- // 654 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbb, 0x6f, 0x13, 0x31,
- 0x18, 0x8f, 0x53, 0xaa, 0xa6, 0x4e, 0x0a, 0xc5, 0x54, 0x6a, 0x94, 0xe1, 0x52, 0x82, 0x10, 0x65,
- 0xa8, 0x8f, 0x56, 0x80, 0x2a, 0x24, 0x84, 0x74, 0x3c, 0xc4, 0xb3, 0x85, 0x2b, 0x2c, 0x88, 0x01,
- 0xe7, 0xce, 0x5c, 0x4c, 0x72, 0xe7, 0xe8, 0xce, 0x17, 0x91, 0x0d, 0xb1, 0x20, 0x31, 0xb1, 0xf0,
- 0xdf, 0xc0, 0xde, 0x8d, 0x2e, 0x48, 0x9d, 0x5a, 0x1a, 0xfe, 0x0b, 0x26, 0x64, 0xdf, 0x23, 0x6e,
- 0xd3, 0xb4, 0x61, 0x8b, 0x7d, 0xbf, 0xc7, 0xf7, 0xfd, 0x3e, 0x7f, 0x81, 0x57, 0xda, 0xeb, 0x11,
- 0x66, 0xdc, 0x24, 0x5d, 0x66, 0x06, 0xdc, 0xa5, 0x66, 0x6f, 0xb5, 0x49, 0x05, 0x59, 0x35, 0x3d,
- 0x1a, 0xd0, 0x90, 0x08, 0xea, 0xe2, 0x6e, 0xc8, 0x05, 0x47, 0x8b, 0x09, 0x10, 0x93, 0x2e, 0xc3,
- 0x12, 0x88, 0x53, 0x60, 0x6d, 0xc5, 0x63, 0xa2, 0x15, 0x37, 0xb1, 0xc3, 0x7d, 0xd3, 0xe3, 0x1e,
- 0x37, 0x15, 0xbe, 0x19, 0xbf, 0x53, 0x27, 0x75, 0x50, 0xbf, 0x12, 0x9d, 0x5a, 0x43, 0x33, 0x74,
- 0x78, 0x28, 0x0d, 0x8f, 0x7a, 0xd5, 0xae, 0x0f, 0x31, 0x3e, 0x71, 0x5a, 0x2c, 0xa0, 0x61, 0xdf,
- 0xec, 0xb6, 0x3d, 0x45, 0x0a, 0x69, 0xc4, 0xe3, 0xd0, 0xa1, 0xff, 0xc5, 0x8a, 0x4c, 0x9f, 0x0a,
- 0x72, 0x9c, 0x97, 0x39, 0x8e, 0x15, 0xc6, 0x81, 0x60, 0xfe, 0xa8, 0xcd, 0xcd, 0xd3, 0x08, 0x91,
- 0xd3, 0xa2, 0x3e, 0x39, 0xca, 0x6b, 0xfc, 0x2c, 0xc2, 0xd2, 0x66, 0x8f, 0x86, 0x2d, 0x4a, 0x5c,
- 0xf4, 0x0b, 0xc0, 0x52, 0x97, 0xbb, 0x0f, 0xd8, 0x07, 0xea, 0x56, 0xc1, 0xd2, 0xd4, 0x72, 0x79,
- 0xcd, 0xc4, 0x63, 0x12, 0xc6, 0x19, 0x0b, 0x3f, 0x4f, 0x19, 0xf7, 0x03, 0x11, 0xf6, 0xad, 0xcf,
- 0x60, 0x7b, 0xaf, 0x5e, 0x18, 0xec, 0xd5, 0x4b, 0xd9, 0xfd, 0xdf, 0xbd, 0x7a, 0x7d, 0x34, 0x5e,
- 0x6c, 0xa7, 0x89, 0x3d, 0x65, 0x91, 0xf8, 0xb4, 0x7f, 0x22, 0x64, 0x83, 0xf8, 0xf4, 0xcb, 0x7e,
- 0x7d, 0x65, 0x92, 0x01, 0xe0, 0x17, 0x31, 0x09, 0x04, 0x13, 0x7d, 0x3b, 0x6f, 0xa5, 0xd6, 0x86,
- 0x73, 0x87, 0x8a, 0x44, 0xf3, 0x70, 0xaa, 0x4d, 0xfb, 0x55, 0xb0, 0x04, 0x96, 0x67, 0x6d, 0xf9,
- 0x13, 0xdd, 0x83, 0xd3, 0x3d, 0xd2, 0x89, 0x69, 0xb5, 0xb8, 0x04, 0x96, 0xcb, 0x6b, 0x58, 0x6b,
- 0x3b, 0xf7, 0xc2, 0xdd, 0xb6, 0xa7, 0x72, 0x18, 0xf5, 0x4a, 0xc8, 0xb7, 0x8a, 0xeb, 0xa0, 0xf1,
- 0xa3, 0x08, 0x2b, 0x76, 0x12, 0xfa, 0xdd, 0x0e, 0x89, 0x22, 0xf4, 0x16, 0x96, 0xe4, 0x98, 0x5d,
- 0x22, 0x88, 0x72, 0x2c, 0xaf, 0x5d, 0x3b, 0x49, 0x3d, 0xc2, 0x12, 0x8d, 0x7b, 0xab, 0x78, 0xb3,
- 0xf9, 0x9e, 0x3a, 0xe2, 0x19, 0x15, 0xc4, 0x42, 0x69, 0xa8, 0x70, 0x78, 0x67, 0xe7, 0xaa, 0xe8,
- 0x2a, 0x9c, 0x69, 0x91, 0xc0, 0xed, 0xd0, 0x50, 0x95, 0x3f, 0x6b, 0x9d, 0x4b, 0xe1, 0x33, 0x0f,
- 0x93, 0x6b, 0x3b, 0xfb, 0x8e, 0x9e, 0xc0, 0x12, 0x4f, 0x07, 0x57, 0x9d, 0x52, 0xc5, 0x5c, 0x3c,
- 0x75, 0xc2, 0x56, 0x45, 0x8e, 0x33, 0x3b, 0xd9, 0xb9, 0x00, 0xda, 0x82, 0x50, 0x3e, 0x2b, 0x37,
- 0xee, 0xb0, 0xc0, 0xab, 0x9e, 0x51, 0x72, 0x97, 0xc6, 0xca, 0x6d, 0xe5, 0x50, 0xeb, 0xac, 0x6c,
- 0x65, 0x78, 0xb6, 0x35, 0x99, 0xc6, 0x77, 0x00, 0xe7, 0xf5, 0xfc, 0xe4, 0xfb, 0x40, 0x6f, 0x46,
- 0x32, 0xc4, 0x93, 0x65, 0x28, 0xd9, 0x2a, 0xc1, 0xf9, 0xec, 0x59, 0x66, 0x37, 0x5a, 0x7e, 0x8f,
- 0xe1, 0x34, 0x13, 0xd4, 0x8f, 0xaa, 0x45, 0xf5, 0xe6, 0x2f, 0x8f, 0x6d, 0x41, 0xaf, 0xcb, 0x9a,
- 0x4b, 0x15, 0xa7, 0x1f, 0x49, 0xae, 0x9d, 0x48, 0x34, 0xbe, 0x15, 0xa1, 0xd6, 0x19, 0xe2, 0xb0,
- 0x22, 0x15, 0xb6, 0x68, 0x87, 0x3a, 0x82, 0x87, 0xe9, 0x56, 0xdd, 0x98, 0x20, 0x24, 0xbc, 0xa1,
- 0xf1, 0x92, 0xdd, 0x5a, 0x48, 0x1d, 0x2b, 0xfa, 0x27, 0xfb, 0x90, 0x01, 0x7a, 0x05, 0xcb, 0x82,
- 0x77, 0xe4, 0x8e, 0x33, 0x1e, 0x64, 0x1d, 0x19, 0xba, 0x9f, 0xdc, 0x2e, 0x19, 0xcd, 0xcb, 0x1c,
- 0x66, 0x5d, 0x48, 0x85, 0xcb, 0xc3, 0xbb, 0xc8, 0xd6, 0x75, 0x6a, 0x77, 0xe0, 0xf9, 0x91, 0x7a,
- 0x8e, 0x59, 0xa3, 0x05, 0x7d, 0x8d, 0x66, 0xb5, 0xb5, 0xb0, 0x6e, 0x6f, 0x1f, 0x18, 0x85, 0x9d,
- 0x03, 0xa3, 0xb0, 0x7b, 0x60, 0x14, 0x3e, 0x0e, 0x0c, 0xb0, 0x3d, 0x30, 0xc0, 0xce, 0xc0, 0x00,
- 0xbb, 0x03, 0x03, 0xfc, 0x1e, 0x18, 0xe0, 0xeb, 0x1f, 0xa3, 0xf0, 0x7a, 0x71, 0xcc, 0x1f, 0xff,
- 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x67, 0x22, 0x03, 0x12, 0x06, 0x00, 0x00,
-}
+func (m *Scheduling) Reset() { *m = Scheduling{} }
func (m *Overhead) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -243,7 +68,7 @@ func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.PodFixed {
keysForPodFixed = append(keysForPodFixed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed)
+ sort.Strings(keysForPodFixed)
for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- {
v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])]
baseI := i
@@ -418,7 +243,7 @@ func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- {
v := m.NodeSelector[string(keysForNodeSelector[iNdEx])]
baseI := i
@@ -544,7 +369,7 @@ func (this *Overhead) String() string {
for k := range this.PodFixed {
keysForPodFixed = append(keysForPodFixed, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed)
+ sort.Strings(keysForPodFixed)
mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{"
for _, k := range keysForPodFixed {
mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)])
@@ -598,7 +423,7 @@ func (this *Scheduling) String() string {
for k := range this.NodeSelector {
keysForNodeSelector = append(keysForNodeSelector, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+ sort.Strings(keysForNodeSelector)
mapStringForNodeSelector := "map[string]string{"
for _, k := range keysForNodeSelector {
mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k])
diff --git a/operator/vendor/k8s.io/api/node/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/node/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..dfe8e29d
--- /dev/null
+++ b/operator/vendor/k8s.io/api/node/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,30 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*Overhead) ProtoMessage() {}
+
+func (*RuntimeClass) ProtoMessage() {}
+
+func (*RuntimeClassList) ProtoMessage() {}
+
+func (*Scheduling) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/node/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/node/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..06d7c803
--- /dev/null
+++ b/operator/vendor/k8s.io/api/node/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,42 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Overhead) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1beta1.Overhead"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClass) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1beta1.RuntimeClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RuntimeClassList) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1beta1.RuntimeClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scheduling) OpenAPIModelName() string {
+ return "io.k8s.api.node.v1beta1.Scheduling"
+}
diff --git a/operator/vendor/k8s.io/api/policy/v1/doc.go b/operator/vendor/k8s.io/api/policy/v1/doc.go
index ff47e7fd..9b5314bc 100644
--- a/operator/vendor/k8s.io/api/policy/v1/doc.go
+++ b/operator/vendor/k8s.io/api/policy/v1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.policy.v1
// Package policy is for any kind of policy object. Suitable examples, even if
// they aren't all here, are PodDisruptionBudget,
diff --git a/operator/vendor/k8s.io/api/policy/v1/generated.pb.go b/operator/vendor/k8s.io/api/policy/v1/generated.pb.go
index dd61b726..1d3e0e33 100644
--- a/operator/vendor/k8s.io/api/policy/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/policy/v1/generated.pb.go
@@ -23,12 +23,10 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -36,226 +34,15 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Eviction) Reset() { *m = Eviction{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} }
-func (m *Eviction) Reset() { *m = Eviction{} }
-func (*Eviction) ProtoMessage() {}
-func (*Eviction) Descriptor() ([]byte, []int) {
- return fileDescriptor_204bc6fa48ff56f7, []int{0}
-}
-func (m *Eviction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Eviction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Eviction.Merge(m, src)
-}
-func (m *Eviction) XXX_Size() int {
- return m.Size()
-}
-func (m *Eviction) XXX_DiscardUnknown() {
- xxx_messageInfo_Eviction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Eviction proto.InternalMessageInfo
-
-func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} }
-func (*PodDisruptionBudget) ProtoMessage() {}
-func (*PodDisruptionBudget) Descriptor() ([]byte, []int) {
- return fileDescriptor_204bc6fa48ff56f7, []int{1}
-}
-func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudget.Merge(m, src)
-}
-func (m *PodDisruptionBudget) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudget) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo
-
-func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} }
-func (*PodDisruptionBudgetList) ProtoMessage() {}
-func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_204bc6fa48ff56f7, []int{2}
-}
-func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src)
-}
-func (m *PodDisruptionBudgetList) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m)
-}
+func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} }
-var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo
+func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} }
-func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} }
-func (*PodDisruptionBudgetSpec) ProtoMessage() {}
-func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_204bc6fa48ff56f7, []int{3}
-}
-func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src)
-}
-func (m *PodDisruptionBudgetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo
-
-func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} }
-func (*PodDisruptionBudgetStatus) ProtoMessage() {}
-func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_204bc6fa48ff56f7, []int{4}
-}
-func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src)
-}
-func (m *PodDisruptionBudgetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1.Eviction")
- proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1.PodDisruptionBudget")
- proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetList")
- proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetSpec")
- proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetStatus")
- proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetStatus.DisruptedPodsEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/policy/v1/generated.proto", fileDescriptor_204bc6fa48ff56f7)
-}
-
-var fileDescriptor_204bc6fa48ff56f7 = []byte{
- // 840 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x8f, 0xdb, 0x44,
- 0x18, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x4e, 0x93, 0x68, 0x19, 0x16, 0x58, 0x72, 0x70, 0xaa, 0x88,
- 0xc3, 0x82, 0xd4, 0x31, 0xdb, 0x22, 0xb4, 0xea, 0x01, 0xb5, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xab,
- 0xd9, 0x56, 0x48, 0x08, 0x24, 0x26, 0xf6, 0xd3, 0x64, 0x58, 0xdb, 0x63, 0x79, 0xc6, 0xa1, 0x39,
- 0xd1, 0x8f, 0xc0, 0x57, 0xe0, 0xa3, 0x70, 0x62, 0x8f, 0xe5, 0x56, 0x71, 0x88, 0x58, 0xf3, 0x2d,
- 0x38, 0x21, 0x8f, 0x9d, 0x17, 0x27, 0x0e, 0xcd, 0x72, 0xe8, 0xcd, 0xf3, 0xcc, 0xf3, 0xff, 0x3d,
- 0xf3, 0xbc, 0xcc, 0x24, 0xe8, 0xc3, 0x8b, 0x63, 0x49, 0xb8, 0xb0, 0x58, 0xc8, 0xad, 0x50, 0x78,
- 0xdc, 0x19, 0x5b, 0xa3, 0x23, 0x6b, 0x00, 0x01, 0x44, 0x4c, 0x81, 0x4b, 0xc2, 0x48, 0x28, 0x81,
- 0xf7, 0x33, 0x2f, 0xc2, 0x42, 0x4e, 0x32, 0x2f, 0x32, 0x3a, 0x6a, 0xdd, 0x1e, 0x70, 0x35, 0x8c,
- 0xfb, 0xc4, 0x11, 0xbe, 0x35, 0x10, 0x03, 0x61, 0x69, 0xe7, 0x7e, 0xfc, 0x4c, 0xaf, 0xf4, 0x42,
- 0x7f, 0x65, 0x90, 0xd6, 0xa7, 0xf3, 0x50, 0x3e, 0x73, 0x86, 0x3c, 0x80, 0x68, 0x6c, 0x85, 0x17,
- 0x83, 0xd4, 0x20, 0x2d, 0x1f, 0x14, 0x2b, 0x09, 0xdd, 0xb2, 0xd6, 0xa9, 0xa2, 0x38, 0x50, 0xdc,
- 0x87, 0x15, 0xc1, 0x67, 0xaf, 0x13, 0x48, 0x67, 0x08, 0x3e, 0x5b, 0xd1, 0xdd, 0x5d, 0xa7, 0x8b,
- 0x15, 0xf7, 0x2c, 0x1e, 0x28, 0xa9, 0xa2, 0x65, 0x51, 0xe7, 0x4f, 0x03, 0xed, 0x9e, 0x8c, 0xb8,
- 0xa3, 0xb8, 0x08, 0xf0, 0x0f, 0x68, 0x37, 0xcd, 0xc2, 0x65, 0x8a, 0x1d, 0x18, 0xb7, 0x8c, 0xc3,
- 0x9b, 0x77, 0x3e, 0x21, 0xf3, 0xc2, 0xcd, 0xa0, 0x24, 0xbc, 0x18, 0xa4, 0x06, 0x49, 0x52, 0x6f,
- 0x32, 0x3a, 0x22, 0xbd, 0xfe, 0x8f, 0xe0, 0xa8, 0x53, 0x50, 0xcc, 0xc6, 0x97, 0x93, 0x76, 0x25,
- 0x99, 0xb4, 0xd1, 0xdc, 0x46, 0x67, 0x54, 0xec, 0xa1, 0x86, 0x0b, 0x1e, 0x28, 0xe8, 0x85, 0x69,
- 0x44, 0x79, 0xb0, 0xa5, 0xc3, 0xdc, 0xdd, 0x2c, 0x4c, 0x77, 0x51, 0x6a, 0xbf, 0x9d, 0x4c, 0xda,
- 0x8d, 0x82, 0x89, 0x16, 0xe1, 0x9d, 0x5f, 0xb7, 0xd0, 0x3b, 0x67, 0xc2, 0xed, 0x72, 0x19, 0xc5,
- 0xda, 0x64, 0xc7, 0xee, 0x00, 0xd4, 0x1b, 0xc8, 0xb3, 0x87, 0xb6, 0x65, 0x08, 0x4e, 0x9e, 0xde,
- 0x6d, 0x52, 0x36, 0x7e, 0xa4, 0xe4, 0x68, 0xe7, 0x21, 0x38, 0x76, 0x3d, 0x47, 0x6f, 0xa7, 0x2b,
- 0xaa, 0x41, 0xf8, 0x1b, 0xb4, 0x23, 0x15, 0x53, 0xb1, 0x3c, 0xa8, 0x6a, 0xa4, 0xb5, 0x39, 0x52,
- 0xcb, 0xec, 0x66, 0x0e, 0xdd, 0xc9, 0xd6, 0x34, 0xc7, 0x75, 0x7e, 0x37, 0xd0, 0xfb, 0x25, 0xaa,
- 0xc7, 0x5c, 0x2a, 0xfc, 0xdd, 0x4a, 0x9d, 0xc8, 0x66, 0x75, 0x4a, 0xd5, 0xba, 0x4a, 0x7b, 0x79,
- 0xd4, 0xdd, 0xa9, 0x65, 0xa1, 0x46, 0x5f, 0xa3, 0x1a, 0x57, 0xe0, 0xa7, 0x33, 0x50, 0x3d, 0xbc,
- 0x79, 0xe7, 0xa3, 0x8d, 0x33, 0xb2, 0x1b, 0x39, 0xb5, 0xf6, 0x28, 0xd5, 0xd3, 0x0c, 0xd3, 0xf9,
- 0xa3, 0x5a, 0x9a, 0x49, 0x5a, 0x44, 0xfc, 0x0c, 0xd5, 0x7d, 0x1e, 0x3c, 0x18, 0x31, 0xee, 0xb1,
- 0xbe, 0x07, 0xaf, 0xed, 0x7a, 0x7a, 0x65, 0x48, 0x76, 0x65, 0xc8, 0xa3, 0x40, 0xf5, 0xa2, 0x73,
- 0x15, 0xf1, 0x60, 0x60, 0xef, 0x25, 0x93, 0x76, 0xfd, 0x74, 0x81, 0x44, 0x0b, 0x5c, 0xfc, 0x3d,
- 0xda, 0x95, 0xe0, 0x81, 0xa3, 0x44, 0x74, 0xbd, 0xd1, 0x7e, 0xcc, 0xfa, 0xe0, 0x9d, 0xe7, 0x52,
- 0xbb, 0x9e, 0x96, 0x6c, 0xba, 0xa2, 0x33, 0x24, 0xf6, 0x50, 0xd3, 0x67, 0xcf, 0x9f, 0x06, 0x6c,
- 0x96, 0x48, 0xf5, 0x7f, 0x26, 0x82, 0x93, 0x49, 0xbb, 0x79, 0x5a, 0x60, 0xd1, 0x25, 0x36, 0x7e,
- 0x61, 0xa0, 0x56, 0x1c, 0x0c, 0x81, 0x79, 0x6a, 0x38, 0x3e, 0x13, 0xee, 0xf4, 0x9d, 0x38, 0xd3,
- 0xcd, 0x39, 0xd8, 0xbe, 0x65, 0x1c, 0xde, 0xb0, 0xef, 0x27, 0x93, 0x76, 0xeb, 0xe9, 0x5a, 0xaf,
- 0x7f, 0x26, 0x6d, 0x73, 0xfd, 0xee, 0x93, 0x71, 0x08, 0xf4, 0x3f, 0x62, 0x74, 0x7e, 0xab, 0xa1,
- 0x0f, 0xd6, 0xce, 0x34, 0xfe, 0x0a, 0x61, 0xd1, 0x97, 0x10, 0x8d, 0xc0, 0xfd, 0x22, 0x7b, 0xd7,
- 0xb8, 0x08, 0x74, 0x6f, 0xab, 0x76, 0x2b, 0x9f, 0x11, 0xdc, 0x5b, 0xf1, 0xa0, 0x25, 0x2a, 0xfc,
- 0x33, 0x6a, 0xb8, 0x59, 0x14, 0x70, 0xcf, 0x84, 0x3b, 0x9d, 0x4a, 0xfb, 0x9a, 0xf7, 0x8c, 0x74,
- 0x17, 0x21, 0x27, 0x81, 0x8a, 0xc6, 0xf6, 0xbb, 0xf9, 0x51, 0x1a, 0x85, 0x3d, 0x5a, 0x8c, 0x97,
- 0x26, 0xe3, 0xce, 0x90, 0xf2, 0x81, 0xe7, 0x89, 0x9f, 0xc0, 0xd5, 0xfd, 0xad, 0xcd, 0x93, 0xe9,
- 0xae, 0x78, 0xd0, 0x12, 0x15, 0xfe, 0x1c, 0x35, 0x9d, 0x38, 0x8a, 0x20, 0x50, 0x5f, 0x66, 0x95,
- 0xd5, 0xcd, 0xaa, 0xd9, 0xef, 0xe5, 0x9c, 0xe6, 0xc3, 0xc2, 0x2e, 0x5d, 0xf2, 0x4e, 0xf5, 0x2e,
- 0x48, 0x1e, 0x81, 0x3b, 0xd5, 0xd7, 0x8a, 0xfa, 0x6e, 0x61, 0x97, 0x2e, 0x79, 0xe3, 0x63, 0x54,
- 0x87, 0xe7, 0x21, 0x38, 0xd3, 0x5a, 0xee, 0x68, 0xf5, 0x7e, 0xae, 0xae, 0x9f, 0x2c, 0xec, 0xd1,
- 0x82, 0x27, 0x76, 0x10, 0x72, 0x44, 0xe0, 0xf2, 0xec, 0xd7, 0xe1, 0x2d, 0xdd, 0x03, 0x6b, 0xb3,
- 0x2b, 0xf4, 0x70, 0xaa, 0x9b, 0xbf, 0xcd, 0x33, 0x93, 0xa4, 0x0b, 0xd8, 0x96, 0x87, 0xf0, 0x6a,
- 0x9b, 0xf0, 0x1e, 0xaa, 0x5e, 0xc0, 0x58, 0x8f, 0xcf, 0x0d, 0x9a, 0x7e, 0xe2, 0xfb, 0xa8, 0x36,
- 0x62, 0x5e, 0x0c, 0xf9, 0x55, 0xfe, 0x78, 0xb3, 0x73, 0x3c, 0xe1, 0x3e, 0xd0, 0x4c, 0x78, 0x6f,
- 0xeb, 0xd8, 0xb0, 0xef, 0x5d, 0x5e, 0x99, 0x95, 0x97, 0x57, 0x66, 0xe5, 0xd5, 0x95, 0x59, 0x79,
- 0x91, 0x98, 0xc6, 0x65, 0x62, 0x1a, 0x2f, 0x13, 0xd3, 0x78, 0x95, 0x98, 0xc6, 0x5f, 0x89, 0x69,
- 0xfc, 0xf2, 0xb7, 0x59, 0xf9, 0x76, 0xbf, 0xec, 0x7f, 0xcc, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff,
- 0x0f, 0x42, 0xd2, 0x33, 0xde, 0x08, 0x00, 0x00,
-}
+func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} }
func (m *Eviction) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -519,7 +306,7 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro
for k := range m.DisruptedPods {
keysForDisruptedPods = append(keysForDisruptedPods, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods)
+ sort.Strings(keysForDisruptedPods)
for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- {
v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])]
baseI := i
@@ -732,7 +519,7 @@ func (this *PodDisruptionBudgetStatus) String() string {
for k := range this.DisruptedPods {
keysForDisruptedPods = append(keysForDisruptedPods, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods)
+ sort.Strings(keysForDisruptedPods)
mapStringForDisruptedPods := "map[string]v1.Time{"
for _, k := range keysForDisruptedPods {
mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k])
diff --git a/operator/vendor/k8s.io/api/policy/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/policy/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..2ede2176
--- /dev/null
+++ b/operator/vendor/k8s.io/api/policy/v1/generated.protomessage.pb.go
@@ -0,0 +1,32 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*Eviction) ProtoMessage() {}
+
+func (*PodDisruptionBudget) ProtoMessage() {}
+
+func (*PodDisruptionBudgetList) ProtoMessage() {}
+
+func (*PodDisruptionBudgetSpec) ProtoMessage() {}
+
+func (*PodDisruptionBudgetStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/policy/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/policy/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..238348e5
--- /dev/null
+++ b/operator/vendor/k8s.io/api/policy/v1/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Eviction) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1.Eviction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudget) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1.PodDisruptionBudget"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudgetList) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1.PodDisruptionBudgetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudgetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1.PodDisruptionBudgetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudgetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1.PodDisruptionBudgetStatus"
+}
diff --git a/operator/vendor/k8s.io/api/policy/v1beta1/doc.go b/operator/vendor/k8s.io/api/policy/v1beta1/doc.go
index 777106c6..3888e921 100644
--- a/operator/vendor/k8s.io/api/policy/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/policy/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.policy.v1beta1
// Package policy is for any kind of policy object. Suitable examples, even if
// they aren't all here, are PodDisruptionBudget,
diff --git a/operator/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/policy/v1beta1/generated.pb.go
index c3845e99..84979dc3 100644
--- a/operator/vendor/k8s.io/api/policy/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/policy/v1beta1/generated.pb.go
@@ -23,12 +23,10 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -36,226 +34,15 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *Eviction) Reset() { *m = Eviction{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} }
-func (m *Eviction) Reset() { *m = Eviction{} }
-func (*Eviction) ProtoMessage() {}
-func (*Eviction) Descriptor() ([]byte, []int) {
- return fileDescriptor_68b366237812cc96, []int{0}
-}
-func (m *Eviction) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Eviction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Eviction.Merge(m, src)
-}
-func (m *Eviction) XXX_Size() int {
- return m.Size()
-}
-func (m *Eviction) XXX_DiscardUnknown() {
- xxx_messageInfo_Eviction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Eviction proto.InternalMessageInfo
-
-func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} }
-func (*PodDisruptionBudget) ProtoMessage() {}
-func (*PodDisruptionBudget) Descriptor() ([]byte, []int) {
- return fileDescriptor_68b366237812cc96, []int{1}
-}
-func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudget.Merge(m, src)
-}
-func (m *PodDisruptionBudget) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudget) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo
-
-func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} }
-func (*PodDisruptionBudgetList) ProtoMessage() {}
-func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_68b366237812cc96, []int{2}
-}
-func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src)
-}
-func (m *PodDisruptionBudgetList) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m)
-}
+func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} }
-var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo
+func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} }
-func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} }
-func (*PodDisruptionBudgetSpec) ProtoMessage() {}
-func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_68b366237812cc96, []int{3}
-}
-func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src)
-}
-func (m *PodDisruptionBudgetSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo
-
-func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} }
-func (*PodDisruptionBudgetStatus) ProtoMessage() {}
-func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_68b366237812cc96, []int{4}
-}
-func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src)
-}
-func (m *PodDisruptionBudgetStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction")
- proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget")
- proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList")
- proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec")
- proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus")
- proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_68b366237812cc96)
-}
-
-var fileDescriptor_68b366237812cc96 = []byte{
- // 843 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x96, 0x4d, 0x8f, 0xdb, 0x44,
- 0x18, 0xc7, 0xe3, 0xcd, 0x66, 0xd9, 0x0e, 0x49, 0xb4, 0x0c, 0x6f, 0xbb, 0x39, 0x38, 0x55, 0x4e,
- 0x05, 0x89, 0x31, 0xdb, 0x56, 0x68, 0xc5, 0x01, 0x5a, 0x37, 0xab, 0x52, 0xd4, 0xd5, 0xae, 0x26,
- 0xdb, 0x0b, 0x2a, 0x12, 0x13, 0xfb, 0xa9, 0x33, 0xc4, 0xf6, 0x58, 0x9e, 0x71, 0x68, 0x6e, 0x3d,
- 0xf0, 0x01, 0xf8, 0x1e, 0x7c, 0x10, 0xf6, 0xc0, 0xa1, 0xdc, 0x2a, 0x0e, 0x11, 0x6b, 0xbe, 0x05,
- 0x27, 0xe4, 0xb1, 0xf3, 0xe2, 0xbc, 0xd0, 0xb4, 0x07, 0x6e, 0x9e, 0x67, 0x9e, 0xff, 0xef, 0x99,
- 0xe7, 0x65, 0x26, 0x41, 0x9f, 0x0c, 0x4f, 0x24, 0xe1, 0xc2, 0x62, 0x11, 0xb7, 0x22, 0xe1, 0x73,
- 0x67, 0x6c, 0x8d, 0x8e, 0xfb, 0xa0, 0xd8, 0xb1, 0xe5, 0x41, 0x08, 0x31, 0x53, 0xe0, 0x92, 0x28,
- 0x16, 0x4a, 0xe0, 0xa3, 0xdc, 0x95, 0xb0, 0x88, 0x93, 0xdc, 0x95, 0x14, 0xae, 0xad, 0xcf, 0x3c,
- 0xae, 0x06, 0x49, 0x9f, 0x38, 0x22, 0xb0, 0x3c, 0xe1, 0x09, 0x4b, 0x2b, 0xfa, 0xc9, 0x33, 0xbd,
- 0xd2, 0x0b, 0xfd, 0x95, 0x93, 0x5a, 0x77, 0xe7, 0x41, 0x03, 0xe6, 0x0c, 0x78, 0x08, 0xf1, 0xd8,
- 0x8a, 0x86, 0x5e, 0x66, 0x90, 0x56, 0x00, 0x8a, 0x59, 0xa3, 0x95, 0xf8, 0x2d, 0x6b, 0x93, 0x2a,
- 0x4e, 0x42, 0xc5, 0x03, 0x58, 0x11, 0x7c, 0xf1, 0x3a, 0x81, 0x74, 0x06, 0x10, 0xb0, 0x15, 0xdd,
- 0x9d, 0x4d, 0xba, 0x44, 0x71, 0xdf, 0xe2, 0xa1, 0x92, 0x2a, 0x5e, 0x16, 0x75, 0xfe, 0x34, 0xd0,
- 0xfe, 0xe9, 0x88, 0x3b, 0x8a, 0x8b, 0x10, 0xff, 0x80, 0xf6, 0xb3, 0x2c, 0x5c, 0xa6, 0xd8, 0xa1,
- 0x71, 0xd3, 0xb8, 0xf5, 0xee, 0xed, 0xcf, 0xc9, 0xbc, 0x7a, 0x33, 0x28, 0x89, 0x86, 0x5e, 0x66,
- 0x90, 0x24, 0xf3, 0x26, 0xa3, 0x63, 0x72, 0xde, 0xff, 0x11, 0x1c, 0x75, 0x06, 0x8a, 0xd9, 0xf8,
- 0x6a, 0xd2, 0xae, 0xa4, 0x93, 0x36, 0x9a, 0xdb, 0xe8, 0x8c, 0x8a, 0x7d, 0xd4, 0x70, 0xc1, 0x07,
- 0x05, 0xe7, 0x51, 0x16, 0x51, 0x1e, 0xee, 0xe8, 0x30, 0x77, 0xb6, 0x0b, 0xd3, 0x5d, 0x94, 0xda,
- 0xef, 0xa5, 0x93, 0x76, 0xa3, 0x64, 0xa2, 0x65, 0x78, 0xe7, 0xd7, 0x1d, 0xf4, 0xfe, 0x85, 0x70,
- 0xbb, 0x5c, 0xc6, 0x89, 0x36, 0xd9, 0x89, 0xeb, 0x81, 0xfa, 0x1f, 0xf2, 0xbc, 0x44, 0xbb, 0x32,
- 0x02, 0xa7, 0x48, 0xef, 0x36, 0xd9, 0x38, 0x83, 0x64, 0xcd, 0xf9, 0x7a, 0x11, 0x38, 0x76, 0xbd,
- 0xe0, 0xef, 0x66, 0x2b, 0xaa, 0x69, 0xf8, 0x29, 0xda, 0x93, 0x8a, 0xa9, 0x44, 0x1e, 0x56, 0x35,
- 0xf7, 0xee, 0x1b, 0x72, 0xb5, 0xd6, 0x6e, 0x16, 0xe4, 0xbd, 0x7c, 0x4d, 0x0b, 0x66, 0xe7, 0x77,
- 0x03, 0x7d, 0xbc, 0x46, 0xf5, 0x98, 0x4b, 0x85, 0x9f, 0xae, 0x54, 0x8c, 0x6c, 0x57, 0xb1, 0x4c,
- 0xad, 0xeb, 0x75, 0x50, 0x44, 0xdd, 0x9f, 0x5a, 0x16, 0xaa, 0xd5, 0x43, 0x35, 0xae, 0x20, 0xc8,
- 0xa6, 0xa1, 0xba, 0x84, 0xde, 0x22, 0x2d, 0xbb, 0x51, 0xa0, 0x6b, 0x8f, 0x32, 0x08, 0xcd, 0x59,
- 0x9d, 0x3f, 0xaa, 0x6b, 0xd3, 0xc9, 0xca, 0x89, 0x9f, 0xa1, 0x7a, 0xc0, 0xc3, 0xfb, 0x23, 0xc6,
- 0x7d, 0xd6, 0xf7, 0xe1, 0xb5, 0x43, 0x90, 0xdd, 0x20, 0x92, 0xdf, 0x20, 0xf2, 0x28, 0x54, 0xe7,
- 0x71, 0x4f, 0xc5, 0x3c, 0xf4, 0xec, 0x83, 0x74, 0xd2, 0xae, 0x9f, 0x2d, 0x90, 0x68, 0x89, 0x8b,
- 0xbf, 0x47, 0xfb, 0x12, 0x7c, 0x70, 0x94, 0x88, 0xdf, 0x6c, 0xd2, 0x1f, 0xb3, 0x3e, 0xf8, 0xbd,
- 0x42, 0x6a, 0xd7, 0xb3, 0xba, 0x4d, 0x57, 0x74, 0x86, 0xc4, 0x3e, 0x6a, 0x06, 0xec, 0xf9, 0x93,
- 0x90, 0xcd, 0x12, 0xa9, 0xbe, 0x65, 0x22, 0x38, 0x9d, 0xb4, 0x9b, 0x67, 0x25, 0x16, 0x5d, 0x62,
- 0xe3, 0x17, 0x06, 0x6a, 0x25, 0xe1, 0x00, 0x98, 0xaf, 0x06, 0xe3, 0x0b, 0xe1, 0x4e, 0x9f, 0x8d,
- 0x0b, 0xdd, 0xa1, 0xc3, 0xdd, 0x9b, 0xc6, 0xad, 0x1b, 0xf6, 0xbd, 0x74, 0xd2, 0x6e, 0x3d, 0xd9,
- 0xe8, 0xf5, 0xcf, 0xa4, 0x6d, 0x6e, 0xde, 0xbd, 0x1c, 0x47, 0x40, 0xff, 0x23, 0x46, 0xe7, 0xb7,
- 0x1a, 0x3a, 0xda, 0x38, 0xd8, 0xf8, 0x5b, 0x84, 0x45, 0x5f, 0x42, 0x3c, 0x02, 0xf7, 0x61, 0xfe,
- 0xcc, 0x71, 0x11, 0xea, 0xde, 0x56, 0xed, 0x56, 0x31, 0x23, 0xf8, 0x7c, 0xc5, 0x83, 0xae, 0x51,
- 0xe1, 0x9f, 0x0d, 0xd4, 0x70, 0xf3, 0x30, 0xe0, 0x5e, 0x08, 0x77, 0x3a, 0x9b, 0x0f, 0xdf, 0xe6,
- 0xca, 0x91, 0xee, 0x22, 0xe9, 0x34, 0x54, 0xf1, 0xd8, 0xfe, 0xb0, 0x38, 0x50, 0xa3, 0xb4, 0x47,
- 0xcb, 0x41, 0xb3, 0x94, 0xdc, 0x19, 0x52, 0xde, 0xf7, 0x7d, 0xf1, 0x13, 0xb8, 0xba, 0xcb, 0xb5,
- 0x79, 0x4a, 0xdd, 0x15, 0x0f, 0xba, 0x46, 0x85, 0xbf, 0x42, 0x4d, 0x27, 0x89, 0x63, 0x08, 0xd5,
- 0x37, 0x79, 0x7d, 0x75, 0xcb, 0x6a, 0xf6, 0x47, 0x05, 0xa7, 0xf9, 0xa0, 0xb4, 0x4b, 0x97, 0xbc,
- 0x33, 0xbd, 0x0b, 0x92, 0xc7, 0xe0, 0x4e, 0xf5, 0xb5, 0xb2, 0xbe, 0x5b, 0xda, 0xa5, 0x4b, 0xde,
- 0xf8, 0x04, 0xd5, 0xe1, 0x79, 0x04, 0xce, 0xb4, 0xa0, 0x7b, 0x5a, 0xfd, 0x41, 0xa1, 0xae, 0x9f,
- 0x2e, 0xec, 0xd1, 0x92, 0x27, 0x76, 0x10, 0x72, 0x44, 0xe8, 0xf2, 0xfc, 0x27, 0xe3, 0x1d, 0xdd,
- 0x08, 0x6b, 0xbb, 0x8b, 0xf4, 0x60, 0xaa, 0x9b, 0x3f, 0xd8, 0x33, 0x93, 0xa4, 0x0b, 0xd8, 0x96,
- 0x8f, 0xf0, 0x6a, 0x9b, 0xf0, 0x01, 0xaa, 0x0e, 0x61, 0xac, 0x87, 0xe8, 0x06, 0xcd, 0x3e, 0xf1,
- 0x3d, 0x54, 0x1b, 0x31, 0x3f, 0x81, 0xe2, 0x42, 0x7f, 0xba, 0xdd, 0x39, 0x2e, 0x79, 0x00, 0x34,
- 0x17, 0x7e, 0xb9, 0x73, 0x62, 0xd8, 0x5f, 0x5f, 0x5d, 0x9b, 0x95, 0x97, 0xd7, 0x66, 0xe5, 0xd5,
- 0xb5, 0x59, 0x79, 0x91, 0x9a, 0xc6, 0x55, 0x6a, 0x1a, 0x2f, 0x53, 0xd3, 0x78, 0x95, 0x9a, 0xc6,
- 0x5f, 0xa9, 0x69, 0xfc, 0xf2, 0xb7, 0x59, 0xf9, 0xee, 0x68, 0xe3, 0xdf, 0x9c, 0x7f, 0x03, 0x00,
- 0x00, 0xff, 0xff, 0x3c, 0xbe, 0x15, 0xfb, 0x02, 0x09, 0x00, 0x00,
-}
+func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} }
func (m *Eviction) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -519,7 +306,7 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro
for k := range m.DisruptedPods {
keysForDisruptedPods = append(keysForDisruptedPods, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods)
+ sort.Strings(keysForDisruptedPods)
for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- {
v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])]
baseI := i
@@ -732,7 +519,7 @@ func (this *PodDisruptionBudgetStatus) String() string {
for k := range this.DisruptedPods {
keysForDisruptedPods = append(keysForDisruptedPods, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods)
+ sort.Strings(keysForDisruptedPods)
mapStringForDisruptedPods := "map[string]v1.Time{"
for _, k := range keysForDisruptedPods {
mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k])
diff --git a/operator/vendor/k8s.io/api/policy/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/policy/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..01e0847f
--- /dev/null
+++ b/operator/vendor/k8s.io/api/policy/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,32 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*Eviction) ProtoMessage() {}
+
+func (*PodDisruptionBudget) ProtoMessage() {}
+
+func (*PodDisruptionBudgetList) ProtoMessage() {}
+
+func (*PodDisruptionBudgetSpec) ProtoMessage() {}
+
+func (*PodDisruptionBudgetStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/policy/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/policy/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..236493b1
--- /dev/null
+++ b/operator/vendor/k8s.io/api/policy/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Eviction) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1beta1.Eviction"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudget) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1beta1.PodDisruptionBudget"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudgetList) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1beta1.PodDisruptionBudgetList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudgetSpec) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodDisruptionBudgetStatus) OpenAPIModelName() string {
+ return "io.k8s.api.policy.v1beta1.PodDisruptionBudgetStatus"
+}
diff --git a/operator/vendor/k8s.io/api/rbac/v1/doc.go b/operator/vendor/k8s.io/api/rbac/v1/doc.go
index 40854627..f120f5d3 100644
--- a/operator/vendor/k8s.io/api/rbac/v1/doc.go
+++ b/operator/vendor/k8s.io/api/rbac/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.rbac.v1
+
// +groupName=rbac.authorization.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/rbac/v1/generated.pb.go b/operator/vendor/k8s.io/api/rbac/v1/generated.pb.go
index 112d18fb..ed31ec95 100644
--- a/operator/vendor/k8s.io/api/rbac/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/rbac/v1/generated.pb.go
@@ -24,434 +24,36 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AggregationRule) Reset() { *m = AggregationRule{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ClusterRole) Reset() { *m = ClusterRole{} }
-func (m *AggregationRule) Reset() { *m = AggregationRule{} }
-func (*AggregationRule) ProtoMessage() {}
-func (*AggregationRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{0}
-}
-func (m *AggregationRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AggregationRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AggregationRule.Merge(m, src)
-}
-func (m *AggregationRule) XXX_Size() int {
- return m.Size()
-}
-func (m *AggregationRule) XXX_DiscardUnknown() {
- xxx_messageInfo_AggregationRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AggregationRule proto.InternalMessageInfo
-
-func (m *ClusterRole) Reset() { *m = ClusterRole{} }
-func (*ClusterRole) ProtoMessage() {}
-func (*ClusterRole) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{1}
-}
-func (m *ClusterRole) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRole) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRole.Merge(m, src)
-}
-func (m *ClusterRole) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRole) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRole.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRole proto.InternalMessageInfo
-
-func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
-func (*ClusterRoleBinding) ProtoMessage() {}
-func (*ClusterRoleBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{2}
-}
-func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleBinding.Merge(m, src)
-}
-func (m *ClusterRoleBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo
-
-func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
-func (*ClusterRoleBindingList) ProtoMessage() {}
-func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{3}
-}
-func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleBindingList.Merge(m, src)
-}
-func (m *ClusterRoleBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo
-
-func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
-func (*ClusterRoleList) ProtoMessage() {}
-func (*ClusterRoleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{4}
-}
-func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleList.Merge(m, src)
-}
-func (m *ClusterRoleList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo
-
-func (m *PolicyRule) Reset() { *m = PolicyRule{} }
-func (*PolicyRule) ProtoMessage() {}
-func (*PolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{5}
-}
-func (m *PolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRule.Merge(m, src)
-}
-func (m *PolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRule.DiscardUnknown(m)
-}
+func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
-var xxx_messageInfo_PolicyRule proto.InternalMessageInfo
+func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
-func (m *Role) Reset() { *m = Role{} }
-func (*Role) ProtoMessage() {}
-func (*Role) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{6}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Role) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
- return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
- xxx_messageInfo_Role.DiscardUnknown(m)
-}
+func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
-var xxx_messageInfo_Role proto.InternalMessageInfo
+func (m *PolicyRule) Reset() { *m = PolicyRule{} }
-func (m *RoleBinding) Reset() { *m = RoleBinding{} }
-func (*RoleBinding) ProtoMessage() {}
-func (*RoleBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{7}
-}
-func (m *RoleBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleBinding.Merge(m, src)
-}
-func (m *RoleBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleBinding.DiscardUnknown(m)
-}
+func (m *Role) Reset() { *m = Role{} }
-var xxx_messageInfo_RoleBinding proto.InternalMessageInfo
+func (m *RoleBinding) Reset() { *m = RoleBinding{} }
-func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
-func (*RoleBindingList) ProtoMessage() {}
-func (*RoleBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{8}
-}
-func (m *RoleBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleBindingList.Merge(m, src)
-}
-func (m *RoleBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleBindingList.DiscardUnknown(m)
-}
+func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
-var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo
+func (m *RoleList) Reset() { *m = RoleList{} }
-func (m *RoleList) Reset() { *m = RoleList{} }
-func (*RoleList) ProtoMessage() {}
-func (*RoleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{9}
-}
-func (m *RoleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleList.Merge(m, src)
-}
-func (m *RoleList) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleList) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RoleList proto.InternalMessageInfo
+func (m *RoleRef) Reset() { *m = RoleRef{} }
-func (m *RoleRef) Reset() { *m = RoleRef{} }
-func (*RoleRef) ProtoMessage() {}
-func (*RoleRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{10}
-}
-func (m *RoleRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleRef.Merge(m, src)
-}
-func (m *RoleRef) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleRef) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RoleRef proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_c8ba2e7dd472de66, []int{11}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule")
- proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole")
- proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding")
- proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList")
- proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1.ClusterRoleList")
- proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1.PolicyRule")
- proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1.Role")
- proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1.RoleBinding")
- proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1.RoleBindingList")
- proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1.RoleList")
- proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/rbac/v1/generated.proto", fileDescriptor_c8ba2e7dd472de66)
-}
-
-var fileDescriptor_c8ba2e7dd472de66 = []byte{
- // 790 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0x4d, 0x6f, 0xd3, 0x4a,
- 0x14, 0xcd, 0xa4, 0x89, 0x1a, 0x4f, 0x5e, 0x94, 0xd7, 0x79, 0xd5, 0x93, 0xd5, 0xf7, 0xe4, 0x54,
- 0x46, 0x42, 0x95, 0x00, 0x9b, 0x16, 0x04, 0xdd, 0x74, 0x51, 0x17, 0x81, 0xaa, 0x96, 0x52, 0x4d,
- 0x05, 0x0b, 0xc4, 0x82, 0x89, 0x33, 0x75, 0x87, 0xf8, 0x4b, 0x1e, 0x3b, 0x52, 0xc5, 0x06, 0x21,
- 0xb1, 0x60, 0xc7, 0x12, 0x7e, 0x01, 0x1b, 0x58, 0xf2, 0x0b, 0xd8, 0x74, 0xd9, 0x65, 0x57, 0x11,
- 0x35, 0x3f, 0x04, 0xe4, 0xaf, 0x38, 0x1f, 0x2e, 0xcd, 0x2a, 0x12, 0x62, 0x95, 0xcc, 0xbd, 0xe7,
- 0x9e, 0x7b, 0xe6, 0xd8, 0xf7, 0x26, 0x50, 0xee, 0xae, 0x73, 0x85, 0x39, 0x2a, 0x71, 0x99, 0xea,
- 0xb5, 0x89, 0xae, 0xf6, 0x56, 0x55, 0x83, 0xda, 0xd4, 0x23, 0x3e, 0xed, 0x28, 0xae, 0xe7, 0xf8,
- 0x0e, 0x42, 0x09, 0x46, 0x21, 0x2e, 0x53, 0x22, 0x8c, 0xd2, 0x5b, 0x5d, 0xba, 0x61, 0x30, 0xff,
- 0x28, 0x68, 0x2b, 0xba, 0x63, 0xa9, 0x86, 0x63, 0x38, 0x6a, 0x0c, 0x6d, 0x07, 0x87, 0xf1, 0x29,
- 0x3e, 0xc4, 0xdf, 0x12, 0x8a, 0xa5, 0xdb, 0x79, 0x1b, 0x8b, 0xe8, 0x47, 0xcc, 0xa6, 0xde, 0xb1,
- 0xea, 0x76, 0x8d, 0x28, 0xc0, 0x55, 0x8b, 0xfa, 0xa4, 0xa0, 0xf1, 0x92, 0x7a, 0x51, 0x95, 0x17,
- 0xd8, 0x3e, 0xb3, 0xe8, 0x44, 0xc1, 0x9d, 0xcb, 0x0a, 0xb8, 0x7e, 0x44, 0x2d, 0x32, 0x5e, 0x27,
- 0x7f, 0x00, 0xb0, 0xb9, 0x69, 0x18, 0x1e, 0x35, 0x88, 0xcf, 0x1c, 0x1b, 0x07, 0x26, 0x45, 0x6f,
- 0x00, 0x5c, 0xd4, 0xcd, 0x80, 0xfb, 0xd4, 0xc3, 0x8e, 0x49, 0x0f, 0xa8, 0x49, 0x75, 0xdf, 0xf1,
- 0xb8, 0x08, 0x96, 0xe7, 0x56, 0xea, 0x6b, 0xb7, 0x94, 0xdc, 0x95, 0x41, 0x2f, 0xc5, 0xed, 0x1a,
- 0x51, 0x80, 0x2b, 0xd1, 0x95, 0x94, 0xde, 0xaa, 0xb2, 0x4b, 0xda, 0xd4, 0xcc, 0x6a, 0xb5, 0xff,
- 0x4f, 0xfa, 0xad, 0x52, 0xd8, 0x6f, 0x2d, 0x6e, 0x15, 0x10, 0xe3, 0xc2, 0x76, 0xf2, 0xfb, 0x32,
- 0xac, 0x0f, 0xc1, 0xd1, 0x73, 0x58, 0x8b, 0xc8, 0x3b, 0xc4, 0x27, 0x22, 0x58, 0x06, 0x2b, 0xf5,
- 0xb5, 0x9b, 0xd3, 0x49, 0x79, 0xd4, 0x7e, 0x41, 0x75, 0xff, 0x21, 0xf5, 0x89, 0x86, 0x52, 0x1d,
- 0x30, 0x8f, 0xe1, 0x01, 0x2b, 0xda, 0x82, 0x55, 0x2f, 0x30, 0x29, 0x17, 0xcb, 0xf1, 0x4d, 0x25,
- 0x65, 0xf2, 0xf9, 0x2b, 0xfb, 0x8e, 0xc9, 0xf4, 0xe3, 0xc8, 0x28, 0xad, 0x91, 0x92, 0x55, 0xa3,
- 0x13, 0xc7, 0x49, 0x2d, 0x6a, 0xc3, 0x26, 0x19, 0x75, 0x54, 0x9c, 0x8b, 0xd5, 0x5e, 0x29, 0xa2,
- 0x1b, 0x33, 0x5f, 0xfb, 0x27, 0xec, 0xb7, 0xc6, 0x9f, 0x08, 0x1e, 0x27, 0x94, 0xdf, 0x96, 0x21,
- 0x1a, 0xb2, 0x46, 0x63, 0x76, 0x87, 0xd9, 0xc6, 0x0c, 0x1c, 0xda, 0x86, 0x35, 0x1e, 0xc4, 0x89,
- 0xcc, 0xa4, 0xff, 0x8a, 0x6e, 0x75, 0x90, 0x60, 0xb4, 0xbf, 0x53, 0xb2, 0x5a, 0x1a, 0xe0, 0x78,
- 0x50, 0x8e, 0xee, 0xc3, 0x79, 0xcf, 0x31, 0x29, 0xa6, 0x87, 0xa9, 0x3f, 0x85, 0x4c, 0x38, 0x81,
- 0x68, 0xcd, 0x94, 0x69, 0x3e, 0x0d, 0xe0, 0xac, 0x58, 0xfe, 0x0a, 0xe0, 0xbf, 0x93, 0x5e, 0xec,
- 0x32, 0xee, 0xa3, 0x67, 0x13, 0x7e, 0x28, 0x53, 0xbe, 0xbc, 0x8c, 0x27, 0x6e, 0x0c, 0x2e, 0x90,
- 0x45, 0x86, 0xbc, 0xd8, 0x81, 0x55, 0xe6, 0x53, 0x2b, 0x33, 0xe2, 0x6a, 0x91, 0xfc, 0x49, 0x61,
- 0xf9, 0x5b, 0xb3, 0x1d, 0x15, 0xe3, 0x84, 0x43, 0xfe, 0x02, 0x60, 0x73, 0x08, 0x3c, 0x03, 0xf9,
- 0xf7, 0x46, 0xe5, 0xb7, 0x2e, 0x93, 0x5f, 0xac, 0xfb, 0x07, 0x80, 0x30, 0x1f, 0x09, 0xd4, 0x82,
- 0xd5, 0x1e, 0xf5, 0xda, 0xc9, 0xae, 0x10, 0x34, 0x21, 0xc2, 0x3f, 0x89, 0x02, 0x38, 0x89, 0xa3,
- 0x6b, 0x50, 0x20, 0x2e, 0x7b, 0xe0, 0x39, 0x81, 0x9b, 0x74, 0x16, 0xb4, 0x46, 0xd8, 0x6f, 0x09,
- 0x9b, 0xfb, 0xdb, 0x49, 0x10, 0xe7, 0xf9, 0x08, 0xec, 0x51, 0xee, 0x04, 0x9e, 0x4e, 0xb9, 0x38,
- 0x97, 0x83, 0x71, 0x16, 0xc4, 0x79, 0x1e, 0xdd, 0x85, 0x8d, 0xec, 0xb0, 0x47, 0x2c, 0xca, 0xc5,
- 0x4a, 0x5c, 0xb0, 0x10, 0xf6, 0x5b, 0x0d, 0x3c, 0x9c, 0xc0, 0xa3, 0x38, 0xb4, 0x01, 0x9b, 0xb6,
- 0x63, 0x67, 0x90, 0xc7, 0x78, 0x97, 0x8b, 0xd5, 0xb8, 0x34, 0x9e, 0xc5, 0xbd, 0xd1, 0x14, 0x1e,
- 0xc7, 0xca, 0x9f, 0x01, 0xac, 0xfc, 0x46, 0xfb, 0x49, 0x7e, 0x5d, 0x86, 0xf5, 0x3f, 0x7e, 0x69,
- 0x44, 0xe3, 0x36, 0xdb, 0x6d, 0x31, 0xcd, 0xb8, 0x5d, 0xbe, 0x26, 0x3e, 0x02, 0x58, 0x9b, 0xd1,
- 0x7e, 0xd8, 0x18, 0x15, 0x2c, 0x5e, 0x28, 0xb8, 0x58, 0xe9, 0x4b, 0x98, 0xb9, 0x8e, 0xae, 0xc3,
- 0x5a, 0x36, 0xd3, 0xb1, 0x4e, 0x21, 0xef, 0x9b, 0x8d, 0x3d, 0x1e, 0x20, 0xd0, 0x32, 0xac, 0x74,
- 0x99, 0xdd, 0x11, 0xcb, 0x31, 0xf2, 0xaf, 0x14, 0x59, 0xd9, 0x61, 0x76, 0x07, 0xc7, 0x99, 0x08,
- 0x61, 0x13, 0x2b, 0xf9, 0x59, 0x1d, 0x42, 0x44, 0xd3, 0x8c, 0xe3, 0x8c, 0xfc, 0x09, 0xc0, 0xf9,
- 0xf4, 0xed, 0x19, 0xf0, 0x81, 0x0b, 0xf9, 0x86, 0xf5, 0x95, 0xa7, 0xd1, 0xf7, 0xeb, 0xee, 0x48,
- 0x85, 0x42, 0xf4, 0xc9, 0x5d, 0xa2, 0x53, 0xb1, 0x12, 0xc3, 0x16, 0x52, 0x98, 0xb0, 0x97, 0x25,
- 0x70, 0x8e, 0xd1, 0xd6, 0x4f, 0xce, 0xa5, 0xd2, 0xe9, 0xb9, 0x54, 0x3a, 0x3b, 0x97, 0x4a, 0xaf,
- 0x42, 0x09, 0x9c, 0x84, 0x12, 0x38, 0x0d, 0x25, 0x70, 0x16, 0x4a, 0xe0, 0x5b, 0x28, 0x81, 0x77,
- 0xdf, 0xa5, 0xd2, 0x53, 0x34, 0xf9, 0x8f, 0xf5, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xff,
- 0x5a, 0x4f, 0xc6, 0x0a, 0x00, 0x00,
-}
+func (m *Subject) Reset() { *m = Subject{} }
func (m *AggregationRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/rbac/v1/generated.proto b/operator/vendor/k8s.io/api/rbac/v1/generated.proto
index 87b8f832..56869814 100644
--- a/operator/vendor/k8s.io/api/rbac/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/rbac/v1/generated.proto
@@ -185,6 +185,8 @@ message RoleRef {
optional string kind = 2;
// Name is the name of resource being referenced
+ // +required
+ // +k8s:required
optional string name = 3;
}
@@ -203,6 +205,8 @@ message Subject {
optional string apiGroup = 2;
// Name of the object being referenced.
+ // +required
+ // +k8s:required
optional string name = 3;
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
diff --git a/operator/vendor/k8s.io/api/rbac/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/rbac/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..6e29e012
--- /dev/null
+++ b/operator/vendor/k8s.io/api/rbac/v1/generated.protomessage.pb.go
@@ -0,0 +1,46 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*AggregationRule) ProtoMessage() {}
+
+func (*ClusterRole) ProtoMessage() {}
+
+func (*ClusterRoleBinding) ProtoMessage() {}
+
+func (*ClusterRoleBindingList) ProtoMessage() {}
+
+func (*ClusterRoleList) ProtoMessage() {}
+
+func (*PolicyRule) ProtoMessage() {}
+
+func (*Role) ProtoMessage() {}
+
+func (*RoleBinding) ProtoMessage() {}
+
+func (*RoleBindingList) ProtoMessage() {}
+
+func (*RoleList) ProtoMessage() {}
+
+func (*RoleRef) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/rbac/v1/types.go b/operator/vendor/k8s.io/api/rbac/v1/types.go
index f9628b85..2fde11b3 100644
--- a/operator/vendor/k8s.io/api/rbac/v1/types.go
+++ b/operator/vendor/k8s.io/api/rbac/v1/types.go
@@ -86,6 +86,8 @@ type Subject struct {
// +optional
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"`
// Name of the object being referenced.
+ // +required
+ // +k8s:required
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
// the Authorizer should report an error.
@@ -101,6 +103,8 @@ type RoleRef struct {
// Kind is the type of resource being referenced
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
// Name is the name of resource being referenced
+ // +required
+ // +k8s:required
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
}
diff --git a/operator/vendor/k8s.io/api/rbac/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/rbac/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..94da8174
--- /dev/null
+++ b/operator/vendor/k8s.io/api/rbac/v1/zz_generated.model_name.go
@@ -0,0 +1,82 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AggregationRule) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.AggregationRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRole) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.ClusterRole"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleBinding) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.ClusterRoleBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.ClusterRoleBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.ClusterRoleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.PolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Role) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.Role"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleBinding) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.RoleBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.RoleBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.RoleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleRef) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.RoleRef"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1.Subject"
+}
diff --git a/operator/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/operator/vendor/k8s.io/api/rbac/v1alpha1/doc.go
index 70d3c0e9..079b791b 100644
--- a/operator/vendor/k8s.io/api/rbac/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/rbac/v1alpha1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.api.rbac.v1alpha1
// +groupName=rbac.authorization.k8s.io
diff --git a/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
index ee3c7bfc..f2ed252b 100644
--- a/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
@@ -24,436 +24,36 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AggregationRule) Reset() { *m = AggregationRule{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ClusterRole) Reset() { *m = ClusterRole{} }
-func (m *AggregationRule) Reset() { *m = AggregationRule{} }
-func (*AggregationRule) ProtoMessage() {}
-func (*AggregationRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{0}
-}
-func (m *AggregationRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AggregationRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AggregationRule.Merge(m, src)
-}
-func (m *AggregationRule) XXX_Size() int {
- return m.Size()
-}
-func (m *AggregationRule) XXX_DiscardUnknown() {
- xxx_messageInfo_AggregationRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AggregationRule proto.InternalMessageInfo
-
-func (m *ClusterRole) Reset() { *m = ClusterRole{} }
-func (*ClusterRole) ProtoMessage() {}
-func (*ClusterRole) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{1}
-}
-func (m *ClusterRole) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRole) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRole.Merge(m, src)
-}
-func (m *ClusterRole) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRole) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRole.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRole proto.InternalMessageInfo
-
-func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
-func (*ClusterRoleBinding) ProtoMessage() {}
-func (*ClusterRoleBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{2}
-}
-func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleBinding.Merge(m, src)
-}
-func (m *ClusterRoleBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo
-
-func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
-func (*ClusterRoleBindingList) ProtoMessage() {}
-func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{3}
-}
-func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleBindingList.Merge(m, src)
-}
-func (m *ClusterRoleBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo
-
-func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
-func (*ClusterRoleList) ProtoMessage() {}
-func (*ClusterRoleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{4}
-}
-func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleList.Merge(m, src)
-}
-func (m *ClusterRoleList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo
-
-func (m *PolicyRule) Reset() { *m = PolicyRule{} }
-func (*PolicyRule) ProtoMessage() {}
-func (*PolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{5}
-}
-func (m *PolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRule.Merge(m, src)
-}
-func (m *PolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRule.DiscardUnknown(m)
-}
+func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
-var xxx_messageInfo_PolicyRule proto.InternalMessageInfo
+func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
-func (m *Role) Reset() { *m = Role{} }
-func (*Role) ProtoMessage() {}
-func (*Role) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{6}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Role) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
- return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
- xxx_messageInfo_Role.DiscardUnknown(m)
-}
+func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
-var xxx_messageInfo_Role proto.InternalMessageInfo
+func (m *PolicyRule) Reset() { *m = PolicyRule{} }
-func (m *RoleBinding) Reset() { *m = RoleBinding{} }
-func (*RoleBinding) ProtoMessage() {}
-func (*RoleBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{7}
-}
-func (m *RoleBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleBinding.Merge(m, src)
-}
-func (m *RoleBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleBinding.DiscardUnknown(m)
-}
+func (m *Role) Reset() { *m = Role{} }
-var xxx_messageInfo_RoleBinding proto.InternalMessageInfo
+func (m *RoleBinding) Reset() { *m = RoleBinding{} }
-func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
-func (*RoleBindingList) ProtoMessage() {}
-func (*RoleBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{8}
-}
-func (m *RoleBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleBindingList.Merge(m, src)
-}
-func (m *RoleBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleBindingList.DiscardUnknown(m)
-}
+func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
-var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo
+func (m *RoleList) Reset() { *m = RoleList{} }
-func (m *RoleList) Reset() { *m = RoleList{} }
-func (*RoleList) ProtoMessage() {}
-func (*RoleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{9}
-}
-func (m *RoleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleList.Merge(m, src)
-}
-func (m *RoleList) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleList) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RoleList proto.InternalMessageInfo
+func (m *RoleRef) Reset() { *m = RoleRef{} }
-func (m *RoleRef) Reset() { *m = RoleRef{} }
-func (*RoleRef) ProtoMessage() {}
-func (*RoleRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{10}
-}
-func (m *RoleRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleRef.Merge(m, src)
-}
-func (m *RoleRef) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleRef) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RoleRef proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_758889dfd9a88fa6, []int{11}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule")
- proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole")
- proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding")
- proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList")
- proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleList")
- proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1alpha1.PolicyRule")
- proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1alpha1.Role")
- proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.RoleBinding")
- proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.RoleBindingList")
- proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1alpha1.RoleList")
- proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_758889dfd9a88fa6)
-}
-
-var fileDescriptor_758889dfd9a88fa6 = []byte{
- // 819 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0xe3, 0x44,
- 0x14, 0xce, 0xa4, 0x09, 0x4d, 0x26, 0x44, 0xa1, 0x43, 0x85, 0xac, 0x0a, 0x39, 0xc5, 0x02, 0xa9,
- 0x88, 0x62, 0xd3, 0x82, 0x80, 0x0b, 0x48, 0x75, 0x0f, 0x28, 0x10, 0xda, 0x32, 0x15, 0x3d, 0x20,
- 0x0e, 0x4c, 0x9c, 0xa9, 0x33, 0xc4, 0xbf, 0xe4, 0xb1, 0x23, 0x55, 0x5c, 0xb8, 0x70, 0x45, 0x5c,
- 0x38, 0x70, 0xe7, 0xca, 0x85, 0x3d, 0xee, 0x3f, 0xd0, 0xbd, 0xf5, 0xd8, 0x53, 0xb4, 0xf5, 0xfe,
- 0x21, 0xbb, 0xf2, 0xd8, 0x8e, 0x9d, 0x5f, 0x9b, 0x9c, 0x22, 0xad, 0xb4, 0xa7, 0x64, 0xde, 0xfb,
- 0xde, 0xf7, 0xde, 0xfb, 0x66, 0xde, 0x4b, 0xe0, 0xc1, 0xf0, 0x4b, 0xae, 0x32, 0x57, 0x23, 0x1e,
- 0xd3, 0xfc, 0x1e, 0x31, 0xb4, 0xd1, 0x11, 0xb1, 0xbc, 0x01, 0x39, 0xd2, 0x4c, 0xea, 0x50, 0x9f,
- 0x04, 0xb4, 0xaf, 0x7a, 0xbe, 0x1b, 0xb8, 0x48, 0x4a, 0x90, 0x2a, 0xf1, 0x98, 0x1a, 0x23, 0xd5,
- 0x0c, 0xb9, 0xf7, 0xb1, 0xc9, 0x82, 0x41, 0xd8, 0x53, 0x0d, 0xd7, 0xd6, 0x4c, 0xd7, 0x74, 0x35,
- 0x11, 0xd0, 0x0b, 0xaf, 0xc5, 0x49, 0x1c, 0xc4, 0xb7, 0x84, 0x68, 0xef, 0xb3, 0x3c, 0xa5, 0x4d,
- 0x8c, 0x01, 0x73, 0xa8, 0x7f, 0xa3, 0x79, 0x43, 0x33, 0x36, 0x70, 0xcd, 0xa6, 0x01, 0xd1, 0x46,
- 0x73, 0xe9, 0xf7, 0xb4, 0x65, 0x51, 0x7e, 0xe8, 0x04, 0xcc, 0xa6, 0x73, 0x01, 0x9f, 0xaf, 0x0a,
- 0xe0, 0xc6, 0x80, 0xda, 0x64, 0x36, 0x4e, 0xf9, 0x07, 0xc0, 0xd6, 0x89, 0x69, 0xfa, 0xd4, 0x24,
- 0x01, 0x73, 0x1d, 0x1c, 0x5a, 0x14, 0xfd, 0x01, 0xe0, 0xae, 0x61, 0x85, 0x3c, 0xa0, 0x3e, 0x76,
- 0x2d, 0x7a, 0x49, 0x2d, 0x6a, 0x04, 0xae, 0xcf, 0x25, 0xb0, 0xbf, 0x75, 0xd0, 0x38, 0xfe, 0x54,
- 0xcd, 0xb5, 0x99, 0xe4, 0x52, 0xbd, 0xa1, 0x19, 0x1b, 0xb8, 0x1a, 0xb7, 0xa4, 0x8e, 0x8e, 0xd4,
- 0x2e, 0xe9, 0x51, 0x2b, 0x8b, 0xd5, 0xdf, 0xbd, 0x1d, 0xb7, 0x4b, 0xd1, 0xb8, 0xbd, 0x7b, 0xba,
- 0x80, 0x18, 0x2f, 0x4c, 0xa7, 0xfc, 0x5b, 0x86, 0x8d, 0x02, 0x1c, 0xfd, 0x02, 0x6b, 0x31, 0x79,
- 0x9f, 0x04, 0x44, 0x02, 0xfb, 0xe0, 0xa0, 0x71, 0xfc, 0xc9, 0x7a, 0xa5, 0x9c, 0xf7, 0x7e, 0xa5,
- 0x46, 0xf0, 0x3d, 0x0d, 0x88, 0x8e, 0xd2, 0x3a, 0x60, 0x6e, 0xc3, 0x13, 0x56, 0xd4, 0x81, 0x55,
- 0x3f, 0xb4, 0x28, 0x97, 0xca, 0xa2, 0xd3, 0xf7, 0xd5, 0x65, 0xaf, 0x40, 0xbd, 0x70, 0x2d, 0x66,
- 0xdc, 0xc4, 0x72, 0xe9, 0xcd, 0x94, 0xb2, 0x1a, 0x9f, 0x38, 0x4e, 0x18, 0xd0, 0x00, 0xb6, 0xc8,
- 0xb4, 0xae, 0xd2, 0x96, 0xa8, 0xf9, 0xc3, 0xe5, 0xa4, 0x33, 0x17, 0xa1, 0xbf, 0x1d, 0x8d, 0xdb,
- 0xb3, 0xb7, 0x83, 0x67, 0x69, 0x95, 0xbf, 0xcb, 0x10, 0x15, 0x64, 0xd2, 0x99, 0xd3, 0x67, 0x8e,
- 0xb9, 0x01, 0xb5, 0xce, 0x61, 0x8d, 0x87, 0xc2, 0x91, 0x09, 0xf6, 0xde, 0xf2, 0xde, 0x2e, 0x13,
- 0xa4, 0xfe, 0x56, 0x4a, 0x59, 0x4b, 0x0d, 0x1c, 0x4f, 0x48, 0x50, 0x17, 0x6e, 0xfb, 0xae, 0x45,
- 0x31, 0xbd, 0x4e, 0xb5, 0x7a, 0x09, 0x1f, 0x4e, 0x80, 0x7a, 0x2b, 0xe5, 0xdb, 0x4e, 0x0d, 0x38,
- 0xa3, 0x50, 0x9e, 0x00, 0xf8, 0xce, 0xbc, 0x2e, 0x5d, 0xc6, 0x03, 0xf4, 0xf3, 0x9c, 0x36, 0xea,
- 0x9a, 0x8f, 0x9a, 0xf1, 0x44, 0x99, 0x49, 0x1b, 0x99, 0xa5, 0xa0, 0xcb, 0x0f, 0xb0, 0xca, 0x02,
- 0x6a, 0x67, 0xa2, 0x1c, 0x2e, 0x6f, 0x62, 0xbe, 0xbc, 0xfc, 0x35, 0x75, 0x62, 0x0a, 0x9c, 0x30,
- 0x29, 0x8f, 0x01, 0x6c, 0x15, 0xc0, 0x1b, 0x68, 0xe2, 0xdb, 0xe9, 0x26, 0x3e, 0x58, 0xaf, 0x89,
- 0xc5, 0xd5, 0x3f, 0x07, 0x10, 0xe6, 0x03, 0x83, 0xda, 0xb0, 0x3a, 0xa2, 0x7e, 0x2f, 0xd9, 0x27,
- 0x75, 0xbd, 0x1e, 0xe3, 0xaf, 0x62, 0x03, 0x4e, 0xec, 0xe8, 0x23, 0x58, 0x27, 0x1e, 0xfb, 0xc6,
- 0x77, 0x43, 0x8f, 0x4b, 0x5b, 0x02, 0xd4, 0x8c, 0xc6, 0xed, 0xfa, 0xc9, 0x45, 0x27, 0x31, 0xe2,
- 0xdc, 0x1f, 0x83, 0x7d, 0xca, 0xdd, 0xd0, 0x37, 0x28, 0x97, 0x2a, 0x39, 0x18, 0x67, 0x46, 0x9c,
- 0xfb, 0xd1, 0x17, 0xb0, 0x99, 0x1d, 0xce, 0x88, 0x4d, 0xb9, 0x54, 0x15, 0x01, 0x3b, 0xd1, 0xb8,
- 0xdd, 0xc4, 0x45, 0x07, 0x9e, 0xc6, 0xa1, 0xaf, 0x60, 0xcb, 0x71, 0x9d, 0x0c, 0xf2, 0x23, 0xee,
- 0x72, 0xe9, 0x0d, 0x11, 0x2a, 0x66, 0xf4, 0x6c, 0xda, 0x85, 0x67, 0xb1, 0xca, 0x23, 0x00, 0x2b,
- 0xaf, 0xdc, 0x0e, 0x53, 0xfe, 0x2c, 0xc3, 0xc6, 0xeb, 0x95, 0x52, 0x58, 0x29, 0xf1, 0x18, 0x6e,
- 0x76, 0x97, 0xac, 0x3f, 0x86, 0xab, 0x97, 0xc8, 0x7f, 0x00, 0xd6, 0x36, 0xb4, 0x3d, 0x4e, 0xa7,
- 0xcb, 0x96, 0x57, 0x94, 0xbd, 0xb8, 0xde, 0xdf, 0x60, 0x76, 0x03, 0xe8, 0x10, 0xd6, 0xb2, 0x89,
- 0x17, 0xd5, 0xd6, 0xf3, 0xec, 0xd9, 0x52, 0xc0, 0x13, 0x04, 0xda, 0x87, 0x95, 0x21, 0x73, 0xfa,
- 0x52, 0x59, 0x20, 0xdf, 0x4c, 0x91, 0x95, 0xef, 0x98, 0xd3, 0xc7, 0xc2, 0x13, 0x23, 0x1c, 0x62,
- 0x27, 0x3f, 0xc9, 0x05, 0x44, 0x3c, 0xeb, 0x58, 0x78, 0x94, 0xff, 0x01, 0xdc, 0x4e, 0xdf, 0xd3,
- 0x84, 0x0f, 0x2c, 0xe5, 0x3b, 0x86, 0x90, 0x78, 0xec, 0x8a, 0xfa, 0x9c, 0xb9, 0x4e, 0x9a, 0x77,
- 0xf2, 0xd2, 0x4f, 0x2e, 0x3a, 0xa9, 0x07, 0x17, 0x50, 0xab, 0x6b, 0x40, 0x1a, 0xac, 0xc7, 0x9f,
- 0xdc, 0x23, 0x06, 0x95, 0x2a, 0x02, 0xb6, 0x93, 0xc2, 0xea, 0x67, 0x99, 0x03, 0xe7, 0x18, 0xfd,
- 0xeb, 0xdb, 0x07, 0xb9, 0x74, 0xf7, 0x20, 0x97, 0xee, 0x1f, 0xe4, 0xd2, 0xef, 0x91, 0x0c, 0x6e,
- 0x23, 0x19, 0xdc, 0x45, 0x32, 0xb8, 0x8f, 0x64, 0xf0, 0x34, 0x92, 0xc1, 0x5f, 0xcf, 0xe4, 0xd2,
- 0x4f, 0xd2, 0xb2, 0x7f, 0xc1, 0x2f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x02, 0x55, 0xe5, 0x20,
- 0x0b, 0x00, 0x00,
-}
+func (m *Subject) Reset() { *m = Subject{} }
func (m *AggregationRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
index 19d43cde..f787f397 100644
--- a/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
+++ b/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
@@ -190,6 +190,8 @@ message RoleRef {
optional string kind = 2;
// Name is the name of resource being referenced
+ // +required
+ // +k8s:required
optional string name = 3;
}
@@ -208,6 +210,8 @@ message Subject {
optional string apiVersion = 2;
// Name of the object being referenced.
+ // +required
+ // +k8s:required
optional string name = 3;
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
diff --git a/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..20f8d575
--- /dev/null
+++ b/operator/vendor/k8s.io/api/rbac/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,46 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*AggregationRule) ProtoMessage() {}
+
+func (*ClusterRole) ProtoMessage() {}
+
+func (*ClusterRoleBinding) ProtoMessage() {}
+
+func (*ClusterRoleBindingList) ProtoMessage() {}
+
+func (*ClusterRoleList) ProtoMessage() {}
+
+func (*PolicyRule) ProtoMessage() {}
+
+func (*Role) ProtoMessage() {}
+
+func (*RoleBinding) ProtoMessage() {}
+
+func (*RoleBindingList) ProtoMessage() {}
+
+func (*RoleList) ProtoMessage() {}
+
+func (*RoleRef) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/rbac/v1alpha1/types.go b/operator/vendor/k8s.io/api/rbac/v1alpha1/types.go
index 2146b4ce..a0d52ee4 100644
--- a/operator/vendor/k8s.io/api/rbac/v1alpha1/types.go
+++ b/operator/vendor/k8s.io/api/rbac/v1alpha1/types.go
@@ -86,6 +86,8 @@ type Subject struct {
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
// Name of the object being referenced.
+ // +required
+ // +k8s:required
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
// the Authorizer should report an error.
@@ -100,6 +102,8 @@ type RoleRef struct {
// Kind is the type of resource being referenced
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
// Name is the name of resource being referenced
+ // +required
+ // +k8s:required
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
}
diff --git a/operator/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..5d9ca16c
--- /dev/null
+++ b/operator/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,82 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AggregationRule) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.AggregationRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRole) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.ClusterRole"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleBinding) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.ClusterRoleBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.ClusterRoleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.PolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Role) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.Role"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleBinding) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.RoleBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.RoleBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.RoleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleRef) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.RoleRef"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1alpha1.Subject"
+}
diff --git a/operator/vendor/k8s.io/api/rbac/v1beta1/doc.go b/operator/vendor/k8s.io/api/rbac/v1beta1/doc.go
index 504a58d8..b1890610 100644
--- a/operator/vendor/k8s.io/api/rbac/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/rbac/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.rbac.v1beta1
// +groupName=rbac.authorization.k8s.io
diff --git a/operator/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
index 9052d7e8..0980b011 100644
--- a/operator/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
@@ -24,434 +24,36 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AggregationRule) Reset() { *m = AggregationRule{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *ClusterRole) Reset() { *m = ClusterRole{} }
-func (m *AggregationRule) Reset() { *m = AggregationRule{} }
-func (*AggregationRule) ProtoMessage() {}
-func (*AggregationRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{0}
-}
-func (m *AggregationRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AggregationRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AggregationRule.Merge(m, src)
-}
-func (m *AggregationRule) XXX_Size() int {
- return m.Size()
-}
-func (m *AggregationRule) XXX_DiscardUnknown() {
- xxx_messageInfo_AggregationRule.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AggregationRule proto.InternalMessageInfo
-
-func (m *ClusterRole) Reset() { *m = ClusterRole{} }
-func (*ClusterRole) ProtoMessage() {}
-func (*ClusterRole) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{1}
-}
-func (m *ClusterRole) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRole) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRole.Merge(m, src)
-}
-func (m *ClusterRole) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRole) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRole.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRole proto.InternalMessageInfo
-
-func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
-func (*ClusterRoleBinding) ProtoMessage() {}
-func (*ClusterRoleBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{2}
-}
-func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleBinding.Merge(m, src)
-}
-func (m *ClusterRoleBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo
-
-func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
-func (*ClusterRoleBindingList) ProtoMessage() {}
-func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{3}
-}
-func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleBindingList.Merge(m, src)
-}
-func (m *ClusterRoleBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo
-
-func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
-func (*ClusterRoleList) ProtoMessage() {}
-func (*ClusterRoleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{4}
-}
-func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ClusterRoleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClusterRoleList.Merge(m, src)
-}
-func (m *ClusterRoleList) XXX_Size() int {
- return m.Size()
-}
-func (m *ClusterRoleList) XXX_DiscardUnknown() {
- xxx_messageInfo_ClusterRoleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo
-
-func (m *PolicyRule) Reset() { *m = PolicyRule{} }
-func (*PolicyRule) ProtoMessage() {}
-func (*PolicyRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{5}
-}
-func (m *PolicyRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PolicyRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PolicyRule.Merge(m, src)
-}
-func (m *PolicyRule) XXX_Size() int {
- return m.Size()
-}
-func (m *PolicyRule) XXX_DiscardUnknown() {
- xxx_messageInfo_PolicyRule.DiscardUnknown(m)
-}
+func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} }
-var xxx_messageInfo_PolicyRule proto.InternalMessageInfo
+func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} }
-func (m *Role) Reset() { *m = Role{} }
-func (*Role) ProtoMessage() {}
-func (*Role) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{6}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Role) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
- return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
- xxx_messageInfo_Role.DiscardUnknown(m)
-}
+func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} }
-var xxx_messageInfo_Role proto.InternalMessageInfo
+func (m *PolicyRule) Reset() { *m = PolicyRule{} }
-func (m *RoleBinding) Reset() { *m = RoleBinding{} }
-func (*RoleBinding) ProtoMessage() {}
-func (*RoleBinding) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{7}
-}
-func (m *RoleBinding) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleBinding) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleBinding.Merge(m, src)
-}
-func (m *RoleBinding) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleBinding) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleBinding.DiscardUnknown(m)
-}
+func (m *Role) Reset() { *m = Role{} }
-var xxx_messageInfo_RoleBinding proto.InternalMessageInfo
+func (m *RoleBinding) Reset() { *m = RoleBinding{} }
-func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
-func (*RoleBindingList) ProtoMessage() {}
-func (*RoleBindingList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{8}
-}
-func (m *RoleBindingList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleBindingList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleBindingList.Merge(m, src)
-}
-func (m *RoleBindingList) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleBindingList) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleBindingList.DiscardUnknown(m)
-}
+func (m *RoleBindingList) Reset() { *m = RoleBindingList{} }
-var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo
+func (m *RoleList) Reset() { *m = RoleList{} }
-func (m *RoleList) Reset() { *m = RoleList{} }
-func (*RoleList) ProtoMessage() {}
-func (*RoleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{9}
-}
-func (m *RoleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleList.Merge(m, src)
-}
-func (m *RoleList) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleList) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RoleList proto.InternalMessageInfo
+func (m *RoleRef) Reset() { *m = RoleRef{} }
-func (m *RoleRef) Reset() { *m = RoleRef{} }
-func (*RoleRef) ProtoMessage() {}
-func (*RoleRef) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{10}
-}
-func (m *RoleRef) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RoleRef) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RoleRef.Merge(m, src)
-}
-func (m *RoleRef) XXX_Size() int {
- return m.Size()
-}
-func (m *RoleRef) XXX_DiscardUnknown() {
- xxx_messageInfo_RoleRef.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RoleRef proto.InternalMessageInfo
-
-func (m *Subject) Reset() { *m = Subject{} }
-func (*Subject) ProtoMessage() {}
-func (*Subject) Descriptor() ([]byte, []int) {
- return fileDescriptor_c5bc2d145acd4e45, []int{11}
-}
-func (m *Subject) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Subject) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Subject.Merge(m, src)
-}
-func (m *Subject) XXX_Size() int {
- return m.Size()
-}
-func (m *Subject) XXX_DiscardUnknown() {
- xxx_messageInfo_Subject.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Subject proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule")
- proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole")
- proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding")
- proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList")
- proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleList")
- proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1beta1.PolicyRule")
- proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1beta1.Role")
- proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1beta1.RoleBinding")
- proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.RoleBindingList")
- proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1beta1.RoleList")
- proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef")
- proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_c5bc2d145acd4e45)
-}
-
-var fileDescriptor_c5bc2d145acd4e45 = []byte{
- // 800 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x3b, 0x6f, 0xe3, 0x46,
- 0x10, 0xd6, 0xca, 0x12, 0x2c, 0xae, 0x22, 0x28, 0xde, 0x18, 0x31, 0x61, 0x24, 0x94, 0xa0, 0x04,
- 0x88, 0x01, 0x27, 0x64, 0xec, 0x04, 0x49, 0x1a, 0x17, 0x66, 0x8a, 0xc4, 0xb0, 0xa3, 0x18, 0x6b,
- 0x24, 0x45, 0x90, 0x22, 0x2b, 0x6a, 0x4d, 0x6f, 0xc4, 0x17, 0xb8, 0xa4, 0x00, 0x23, 0x4d, 0x9a,
- 0xeb, 0xae, 0x38, 0xe0, 0xaa, 0x6b, 0xaf, 0xbe, 0xea, 0xca, 0xfb, 0x05, 0x2a, 0x5d, 0xba, 0x12,
- 0xce, 0xbc, 0x1f, 0x72, 0x87, 0xe5, 0x43, 0xd4, 0x8b, 0xb6, 0x2a, 0x01, 0x07, 0x5c, 0x25, 0xed,
- 0xcc, 0x37, 0xdf, 0xcc, 0x7c, 0xbb, 0x33, 0x12, 0xfc, 0x6a, 0xf0, 0x13, 0x57, 0x99, 0xab, 0x11,
- 0x8f, 0x69, 0x7e, 0x8f, 0x18, 0xda, 0xf0, 0xa0, 0x47, 0x03, 0x72, 0xa0, 0x99, 0xd4, 0xa1, 0x3e,
- 0x09, 0x68, 0x5f, 0xf5, 0x7c, 0x37, 0x70, 0xd1, 0x4e, 0x02, 0x54, 0x89, 0xc7, 0x54, 0x01, 0x54,
- 0x53, 0xe0, 0xee, 0x37, 0x26, 0x0b, 0xae, 0xc2, 0x9e, 0x6a, 0xb8, 0xb6, 0x66, 0xba, 0xa6, 0xab,
- 0xc5, 0xf8, 0x5e, 0x78, 0x19, 0x9f, 0xe2, 0x43, 0xfc, 0x2d, 0xe1, 0xd9, 0xfd, 0x3e, 0x4f, 0x68,
- 0x13, 0xe3, 0x8a, 0x39, 0xd4, 0xbf, 0xd6, 0xbc, 0x81, 0x29, 0x0c, 0x5c, 0xb3, 0x69, 0x40, 0xb4,
- 0xe1, 0x42, 0xf6, 0x5d, 0xad, 0x28, 0xca, 0x0f, 0x9d, 0x80, 0xd9, 0x74, 0x21, 0xe0, 0x87, 0x87,
- 0x02, 0xb8, 0x71, 0x45, 0x6d, 0x32, 0x1f, 0xd7, 0x79, 0x06, 0x60, 0xf3, 0xd8, 0x34, 0x7d, 0x6a,
- 0x92, 0x80, 0xb9, 0x0e, 0x0e, 0x2d, 0x8a, 0x1e, 0x01, 0xb8, 0x6d, 0x58, 0x21, 0x0f, 0xa8, 0x8f,
- 0x5d, 0x8b, 0x5e, 0x50, 0x8b, 0x1a, 0x81, 0xeb, 0x73, 0x19, 0xb4, 0x37, 0xf6, 0xea, 0x87, 0xdf,
- 0xa9, 0xb9, 0x34, 0x93, 0x5c, 0xaa, 0x37, 0x30, 0x85, 0x81, 0xab, 0xa2, 0x25, 0x75, 0x78, 0xa0,
- 0x9e, 0x91, 0x1e, 0xb5, 0xb2, 0x58, 0xfd, 0xb3, 0xd1, 0xb8, 0x55, 0x8a, 0xc6, 0xad, 0xed, 0x9f,
- 0x97, 0x10, 0xe3, 0xa5, 0xe9, 0x3a, 0xcf, 0xcb, 0xb0, 0x3e, 0x05, 0x47, 0xff, 0xc0, 0x9a, 0x20,
- 0xef, 0x93, 0x80, 0xc8, 0xa0, 0x0d, 0xf6, 0xea, 0x87, 0xdf, 0xae, 0x56, 0xca, 0xef, 0xbd, 0x7f,
- 0xa9, 0x11, 0xfc, 0x46, 0x03, 0xa2, 0xa3, 0xb4, 0x0e, 0x98, 0xdb, 0xf0, 0x84, 0x15, 0xfd, 0x0a,
- 0xab, 0x7e, 0x68, 0x51, 0x2e, 0x97, 0xe3, 0x4e, 0xbf, 0x50, 0x0b, 0x1e, 0x81, 0x7a, 0xee, 0x5a,
- 0xcc, 0xb8, 0x16, 0x6a, 0xe9, 0x8d, 0x94, 0xb1, 0x2a, 0x4e, 0x1c, 0x27, 0x04, 0xc8, 0x84, 0x4d,
- 0x32, 0x2b, 0xab, 0xbc, 0x11, 0x97, 0xbc, 0x57, 0xc8, 0x39, 0x77, 0x0d, 0xfa, 0x27, 0xd1, 0xb8,
- 0x35, 0x7f, 0x37, 0x78, 0x9e, 0xb5, 0xf3, 0xb4, 0x0c, 0xd1, 0x94, 0x48, 0x3a, 0x73, 0xfa, 0xcc,
- 0x31, 0xd7, 0xa0, 0x55, 0x17, 0xd6, 0x78, 0x18, 0x3b, 0x32, 0xb9, 0xda, 0x85, 0xad, 0x5d, 0x24,
- 0x40, 0xfd, 0xe3, 0x94, 0xb1, 0x96, 0x1a, 0x38, 0x9e, 0x70, 0xa0, 0x53, 0xb8, 0xe9, 0xbb, 0x16,
- 0xc5, 0xf4, 0x32, 0x55, 0xaa, 0x98, 0x0e, 0x27, 0x38, 0xbd, 0x99, 0xd2, 0x6d, 0xa6, 0x06, 0x9c,
- 0x31, 0x74, 0x46, 0x00, 0x7e, 0xba, 0xa8, 0xca, 0x19, 0xe3, 0x01, 0xfa, 0x7b, 0x41, 0x19, 0x75,
- 0xc5, 0x07, 0xcd, 0x78, 0xa2, 0xcb, 0xa4, 0x8b, 0xcc, 0x32, 0xa5, 0xca, 0x39, 0xac, 0xb2, 0x80,
- 0xda, 0x99, 0x24, 0xfb, 0x85, 0x3d, 0x2c, 0x56, 0x97, 0xbf, 0xa4, 0x13, 0xc1, 0x80, 0x13, 0xa2,
- 0xce, 0x2b, 0x00, 0x9b, 0x53, 0xe0, 0x35, 0xf4, 0x70, 0x32, 0xdb, 0xc3, 0x97, 0x2b, 0xf5, 0xb0,
- 0xbc, 0xf8, 0xb7, 0x00, 0xc2, 0x7c, 0x56, 0x50, 0x0b, 0x56, 0x87, 0xd4, 0xef, 0x25, 0x9b, 0x44,
- 0xd2, 0x25, 0x81, 0xff, 0x53, 0x18, 0x70, 0x62, 0x47, 0xfb, 0x50, 0x22, 0x1e, 0xfb, 0xc5, 0x77,
- 0x43, 0x2f, 0x49, 0x2f, 0xe9, 0x8d, 0x68, 0xdc, 0x92, 0x8e, 0xcf, 0x4f, 0x12, 0x23, 0xce, 0xfd,
- 0x02, 0xec, 0x53, 0xee, 0x86, 0xbe, 0x41, 0xb9, 0xbc, 0x91, 0x83, 0x71, 0x66, 0xc4, 0xb9, 0x1f,
- 0xfd, 0x08, 0x1b, 0xd9, 0xa1, 0x4b, 0x6c, 0xca, 0xe5, 0x4a, 0x1c, 0xb0, 0x15, 0x8d, 0x5b, 0x0d,
- 0x3c, 0xed, 0xc0, 0xb3, 0x38, 0x74, 0x04, 0x9b, 0x8e, 0xeb, 0x64, 0x90, 0x3f, 0xf0, 0x19, 0x97,
- 0xab, 0x71, 0x68, 0x3c, 0x9f, 0xdd, 0x59, 0x17, 0x9e, 0xc7, 0x76, 0x5e, 0x02, 0x58, 0x79, 0xdf,
- 0xb6, 0x57, 0xe7, 0x71, 0x19, 0xd6, 0x3f, 0x6c, 0x93, 0xc9, 0x36, 0x11, 0x23, 0xb8, 0xde, 0x35,
- 0xb2, 0xf2, 0x08, 0x3e, 0xbc, 0x3f, 0x5e, 0x00, 0x58, 0x5b, 0xd3, 0xe2, 0xd0, 0x67, 0xab, 0xfe,
- 0xfc, 0xfe, 0xaa, 0x97, 0x97, 0xfb, 0x1f, 0xcc, 0xf4, 0x47, 0x5f, 0xc3, 0x5a, 0x36, 0xec, 0x71,
- 0xb1, 0x52, 0x9e, 0x3c, 0xdb, 0x07, 0x78, 0x82, 0x40, 0x6d, 0x58, 0x19, 0x30, 0xa7, 0x2f, 0x97,
- 0x63, 0xe4, 0x47, 0x29, 0xb2, 0x72, 0xca, 0x9c, 0x3e, 0x8e, 0x3d, 0x02, 0xe1, 0x10, 0x3b, 0xf9,
- 0x21, 0x9e, 0x42, 0x88, 0x31, 0xc7, 0xb1, 0x47, 0x68, 0xb5, 0x99, 0x3e, 0xa6, 0x09, 0x1f, 0x28,
- 0xe4, 0x9b, 0xae, 0xaf, 0xbc, 0x4a, 0x7d, 0xf7, 0x67, 0x47, 0x1a, 0x94, 0xc4, 0x27, 0xf7, 0x88,
- 0x41, 0xe5, 0x4a, 0x0c, 0xdb, 0x4a, 0x61, 0x52, 0x37, 0x73, 0xe0, 0x1c, 0xa3, 0x1f, 0x8d, 0xee,
- 0x94, 0xd2, 0xcd, 0x9d, 0x52, 0xba, 0xbd, 0x53, 0x4a, 0xff, 0x47, 0x0a, 0x18, 0x45, 0x0a, 0xb8,
- 0x89, 0x14, 0x70, 0x1b, 0x29, 0xe0, 0x75, 0xa4, 0x80, 0x27, 0x6f, 0x94, 0xd2, 0x5f, 0x3b, 0x05,
- 0x7f, 0x79, 0xdf, 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0xfb, 0x5a, 0x79, 0x0c, 0x0b, 0x00, 0x00,
-}
+func (m *Subject) Reset() { *m = Subject{} }
func (m *AggregationRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/operator/vendor/k8s.io/api/rbac/v1beta1/generated.proto
index 8bfbd0c8..cac7b413 100644
--- a/operator/vendor/k8s.io/api/rbac/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/rbac/v1beta1/generated.proto
@@ -191,6 +191,8 @@ message RoleRef {
optional string kind = 2;
// Name is the name of resource being referenced
+ // +required
+ // +k8s:required
optional string name = 3;
}
@@ -208,6 +210,8 @@ message Subject {
optional string apiGroup = 2;
// Name of the object being referenced.
+ // +required
+ // +k8s:required
optional string name = 3;
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
diff --git a/operator/vendor/k8s.io/api/rbac/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/rbac/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..b2989683
--- /dev/null
+++ b/operator/vendor/k8s.io/api/rbac/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,46 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*AggregationRule) ProtoMessage() {}
+
+func (*ClusterRole) ProtoMessage() {}
+
+func (*ClusterRoleBinding) ProtoMessage() {}
+
+func (*ClusterRoleBindingList) ProtoMessage() {}
+
+func (*ClusterRoleList) ProtoMessage() {}
+
+func (*PolicyRule) ProtoMessage() {}
+
+func (*Role) ProtoMessage() {}
+
+func (*RoleBinding) ProtoMessage() {}
+
+func (*RoleBindingList) ProtoMessage() {}
+
+func (*RoleList) ProtoMessage() {}
+
+func (*RoleRef) ProtoMessage() {}
+
+func (*Subject) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/rbac/v1beta1/types.go b/operator/vendor/k8s.io/api/rbac/v1beta1/types.go
index 9cfaaceb..861e33c9 100644
--- a/operator/vendor/k8s.io/api/rbac/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/rbac/v1beta1/types.go
@@ -86,6 +86,8 @@ type Subject struct {
// +optional
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt,name=apiGroup"`
// Name of the object being referenced.
+ // +required
+ // +k8s:required
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
// Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
// the Authorizer should report an error.
@@ -100,6 +102,8 @@ type RoleRef struct {
// Kind is the type of resource being referenced
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
// Name is the name of resource being referenced
+ // +required
+ // +k8s:required
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
}
diff --git a/operator/vendor/k8s.io/api/rbac/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/rbac/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..073ba67b
--- /dev/null
+++ b/operator/vendor/k8s.io/api/rbac/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,82 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AggregationRule) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.AggregationRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRole) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.ClusterRole"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleBinding) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.ClusterRoleBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.ClusterRoleBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ClusterRoleList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.ClusterRoleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PolicyRule) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.PolicyRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Role) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.Role"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleBinding) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.RoleBinding"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleBindingList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.RoleBindingList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleList) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.RoleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RoleRef) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.RoleRef"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Subject) OpenAPIModelName() string {
+ return "io.k8s.api.rbac.v1beta1.Subject"
+}
diff --git a/operator/vendor/k8s.io/api/resource/v1/doc.go b/operator/vendor/k8s.io/api/resource/v1/doc.go
index c94ca75d..645c1cb5 100644
--- a/operator/vendor/k8s.io/api/resource/v1/doc.go
+++ b/operator/vendor/k8s.io/api/resource/v1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.resource.v1
// +groupName=resource.k8s.io
// Package v1 is the v1 version of the resource API.
diff --git a/operator/vendor/k8s.io/api/resource/v1/generated.pb.go b/operator/vendor/k8s.io/api/resource/v1/generated.pb.go
index 5695e2c7..e6902049 100644
--- a/operator/vendor/k8s.io/api/resource/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/resource/v1/generated.pb.go
@@ -23,15 +23,13 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -39,1470 +37,91 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-func (*AllocatedDeviceStatus) ProtoMessage() {}
-func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{0}
-}
-func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
-}
-func (m *AllocatedDeviceStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
-
-func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (*AllocationResult) ProtoMessage() {}
-func (*AllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{1}
-}
-func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocationResult.Merge(m, src)
-}
-func (m *AllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *AllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocationResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
-
-func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-func (*CELDeviceSelector) ProtoMessage() {}
-func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{2}
-}
-func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CELDeviceSelector.Merge(m, src)
-}
-func (m *CELDeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *CELDeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
-
-func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} }
-func (*CapacityRequestPolicy) ProtoMessage() {}
-func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{3}
-}
-func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequestPolicy.Merge(m, src)
-}
-func (m *CapacityRequestPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequestPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo
-
-func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} }
-func (*CapacityRequestPolicyRange) ProtoMessage() {}
-func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{4}
-}
-func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src)
-}
-func (m *CapacityRequestPolicyRange) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo
-
-func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} }
-func (*CapacityRequirements) ProtoMessage() {}
-func (*CapacityRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{5}
-}
-func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequirements) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequirements.Merge(m, src)
-}
-func (m *CapacityRequirements) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequirements) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequirements.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo
-
-func (m *Counter) Reset() { *m = Counter{} }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{6}
-}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
-}
-func (m *Counter) XXX_Size() int {
- return m.Size()
-}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Counter proto.InternalMessageInfo
-
-func (m *CounterSet) Reset() { *m = CounterSet{} }
-func (*CounterSet) ProtoMessage() {}
-func (*CounterSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{7}
-}
-func (m *CounterSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CounterSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CounterSet.Merge(m, src)
-}
-func (m *CounterSet) XXX_Size() int {
- return m.Size()
-}
-func (m *CounterSet) XXX_DiscardUnknown() {
- xxx_messageInfo_CounterSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CounterSet proto.InternalMessageInfo
-
-func (m *Device) Reset() { *m = Device{} }
-func (*Device) ProtoMessage() {}
-func (*Device) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{8}
-}
-func (m *Device) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Device) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Device.Merge(m, src)
-}
-func (m *Device) XXX_Size() int {
- return m.Size()
-}
-func (m *Device) XXX_DiscardUnknown() {
- xxx_messageInfo_Device.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Device proto.InternalMessageInfo
-
-func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-func (*DeviceAllocationConfiguration) ProtoMessage() {}
-func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{9}
-}
-func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
-}
-func (m *DeviceAllocationConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
-
-func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-func (*DeviceAllocationResult) ProtoMessage() {}
-func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{10}
-}
-func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
-}
-func (m *DeviceAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
-
-func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-func (*DeviceAttribute) ProtoMessage() {}
-func (*DeviceAttribute) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{11}
-}
-func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAttribute.Merge(m, src)
-}
-func (m *DeviceAttribute) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAttribute) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
-
-func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
-func (*DeviceCapacity) ProtoMessage() {}
-func (*DeviceCapacity) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{12}
-}
-func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceCapacity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCapacity.Merge(m, src)
-}
-func (m *DeviceCapacity) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCapacity) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCapacity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo
-
-func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-func (*DeviceClaim) ProtoMessage() {}
-func (*DeviceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{13}
-}
-func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaim.Merge(m, src)
-}
-func (m *DeviceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
-
-func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-func (*DeviceClaimConfiguration) ProtoMessage() {}
-func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{14}
-}
-func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
-}
-func (m *DeviceClaimConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-func (*DeviceClass) ProtoMessage() {}
-func (*DeviceClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{15}
-}
-func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClass.Merge(m, src)
-}
-func (m *DeviceClass) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClass) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
-
-func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-func (*DeviceClassConfiguration) ProtoMessage() {}
-func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{16}
-}
-func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
-}
-func (m *DeviceClassConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-func (*DeviceClassList) ProtoMessage() {}
-func (*DeviceClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{17}
-}
-func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassList.Merge(m, src)
-}
-func (m *DeviceClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
-
-func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-func (*DeviceClassSpec) ProtoMessage() {}
-func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{18}
-}
-func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassSpec.Merge(m, src)
-}
-func (m *DeviceClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
-
-func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-func (*DeviceConfiguration) ProtoMessage() {}
-func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{19}
-}
-func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConfiguration.Merge(m, src)
-}
-func (m *DeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
-}
+func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
+func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} }
-func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-func (*DeviceConstraint) ProtoMessage() {}
-func (*DeviceConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{20}
-}
-func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConstraint.Merge(m, src)
-}
-func (m *DeviceConstraint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConstraint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
-}
+func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} }
-var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
+func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} }
-func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-func (*DeviceCounterConsumption) ProtoMessage() {}
-func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{21}
-}
-func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
-}
-func (m *DeviceCounterConsumption) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
-}
+func (m *Counter) Reset() { *m = Counter{} }
-var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
+func (m *CounterSet) Reset() { *m = CounterSet{} }
-func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-func (*DeviceRequest) ProtoMessage() {}
-func (*DeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{22}
-}
-func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequest.Merge(m, src)
-}
-func (m *DeviceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
-}
+func (m *Device) Reset() { *m = Device{} }
-var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
+func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-func (*DeviceRequestAllocationResult) ProtoMessage() {}
-func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{23}
-}
-func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
-}
-func (m *DeviceRequestAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
-}
+func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
+func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (*DeviceSelector) ProtoMessage() {}
-func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{24}
-}
-func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSelector.Merge(m, src)
-}
-func (m *DeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
-}
+func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
-var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
+func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-func (*DeviceSubRequest) ProtoMessage() {}
-func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{25}
-}
-func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSubRequest.Merge(m, src)
-}
-func (m *DeviceSubRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSubRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
-}
+func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
+func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-func (*DeviceTaint) ProtoMessage() {}
-func (*DeviceTaint) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{26}
-}
-func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaint.Merge(m, src)
-}
-func (m *DeviceTaint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
-}
+func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
+func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-func (*DeviceToleration) ProtoMessage() {}
-func (*DeviceToleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{27}
-}
-func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceToleration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceToleration.Merge(m, src)
-}
-func (m *DeviceToleration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceToleration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
-
-func (m *ExactDeviceRequest) Reset() { *m = ExactDeviceRequest{} }
-func (*ExactDeviceRequest) ProtoMessage() {}
-func (*ExactDeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{28}
-}
-func (m *ExactDeviceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExactDeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExactDeviceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExactDeviceRequest.Merge(m, src)
-}
-func (m *ExactDeviceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExactDeviceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExactDeviceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExactDeviceRequest proto.InternalMessageInfo
-
-func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (*NetworkDeviceData) ProtoMessage() {}
-func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{29}
-}
-func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkDeviceData.Merge(m, src)
-}
-func (m *NetworkDeviceData) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkDeviceData) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
-
-func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-func (*OpaqueDeviceConfiguration) ProtoMessage() {}
-func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{30}
-}
-func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
-}
+func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
+func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (*ResourceClaim) ProtoMessage() {}
-func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{31}
-}
-func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaim.Merge(m, src)
-}
-func (m *ResourceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
-}
+func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-func (*ResourceClaimConsumerReference) ProtoMessage() {}
-func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{32}
-}
-func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
-}
-func (m *ResourceClaimConsumerReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
-}
+func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
+func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (*ResourceClaimList) ProtoMessage() {}
-func (*ResourceClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{33}
-}
-func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimList.Merge(m, src)
-}
-func (m *ResourceClaimList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
-}
+func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
+func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-func (*ResourceClaimSpec) ProtoMessage() {}
-func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{34}
-}
-func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
-}
-func (m *ResourceClaimSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
-}
+func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
+func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (*ResourceClaimStatus) ProtoMessage() {}
-func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{35}
-}
-func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
-}
-func (m *ResourceClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
-}
+func (m *ExactDeviceRequest) Reset() { *m = ExactDeviceRequest{} }
-var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
+func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-func (*ResourceClaimTemplate) ProtoMessage() {}
-func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{36}
-}
-func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
-}
-func (m *ResourceClaimTemplate) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
-}
+func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
+func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (*ResourceClaimTemplateList) ProtoMessage() {}
-func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{37}
-}
-func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
-}
-func (m *ResourceClaimTemplateList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
-}
+func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
+func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-func (*ResourceClaimTemplateSpec) ProtoMessage() {}
-func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{38}
-}
-func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
-}
+func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
+func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (*ResourcePool) ProtoMessage() {}
-func (*ResourcePool) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{39}
-}
-func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePool) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePool.Merge(m, src)
-}
-func (m *ResourcePool) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePool) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePool.DiscardUnknown(m)
-}
+func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
+func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-func (*ResourceSlice) ProtoMessage() {}
-func (*ResourceSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{40}
-}
-func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSlice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSlice.Merge(m, src)
-}
-func (m *ResourceSlice) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSlice) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
-}
+func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
+func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (*ResourceSliceList) ProtoMessage() {}
-func (*ResourceSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{41}
-}
-func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceList.Merge(m, src)
-}
-func (m *ResourceSliceList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
-}
+func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
+func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
-func (*ResourceSliceSpec) ProtoMessage() {}
-func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_f4fc532aec02d243, []int{42}
-}
-func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
-}
-func (m *ResourceSliceSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1.AllocatedDeviceStatus")
- proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1.AllocationResult")
- proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1.CELDeviceSelector")
- proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicy")
- proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1.CapacityRequestPolicyRange")
- proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1.CapacityRequirements")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.CapacityRequirements.RequestsEntry")
- proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1.Counter")
- proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1.CounterSet")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.CounterSet.CountersEntry")
- proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1.Device")
- proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1.Device.AttributesEntry")
- proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1.Device.CapacityEntry")
- proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1.DeviceAllocationConfiguration")
- proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceAllocationResult")
- proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1.DeviceAttribute")
- proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1.DeviceCapacity")
- proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1.DeviceClaim")
- proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClaimConfiguration")
- proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1.DeviceClass")
- proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1.DeviceClassConfiguration")
- proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1.DeviceClassList")
- proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1.DeviceClassSpec")
- proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1.DeviceConfiguration")
- proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1.DeviceConstraint")
- proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1.DeviceCounterConsumption.CountersEntry")
- proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1.DeviceRequest")
- proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1.DeviceRequestAllocationResult.ConsumedCapacityEntry")
- proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1.DeviceSelector")
- proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1.DeviceSubRequest")
- proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1.DeviceTaint")
- proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1.DeviceToleration")
- proto.RegisterType((*ExactDeviceRequest)(nil), "k8s.io.api.resource.v1.ExactDeviceRequest")
- proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1.NetworkDeviceData")
- proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1.OpaqueDeviceConfiguration")
- proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1.ResourceClaim")
- proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1.ResourceClaimConsumerReference")
- proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1.ResourceClaimList")
- proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimSpec")
- proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1.ResourceClaimStatus")
- proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplate")
- proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateList")
- proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1.ResourceClaimTemplateSpec")
- proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1.ResourcePool")
- proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1.ResourceSlice")
- proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1.ResourceSliceList")
- proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1.ResourceSliceSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/resource/v1/generated.proto", fileDescriptor_f4fc532aec02d243)
-}
-
-var fileDescriptor_f4fc532aec02d243 = []byte{
- // 3028 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x6c, 0x24, 0x47,
- 0xf5, 0x77, 0xcf, 0xcc, 0x8e, 0xc7, 0x6f, 0x6c, 0xaf, 0x5d, 0xbb, 0xeb, 0x4c, 0xfc, 0xff, 0xc7,
- 0xe3, 0xf4, 0x92, 0xc4, 0x49, 0x76, 0xc7, 0x6b, 0x8b, 0x44, 0x51, 0x12, 0x10, 0x1e, 0xdb, 0x9b,
- 0x38, 0xfb, 0x11, 0xa7, 0xc6, 0x6b, 0x36, 0x28, 0x84, 0xb4, 0x7b, 0xca, 0x76, 0xe3, 0x9e, 0xee,
- 0x49, 0x77, 0x8d, 0x77, 0xcd, 0x29, 0xe2, 0x00, 0x57, 0x04, 0x12, 0x02, 0x24, 0x24, 0x94, 0x03,
- 0x12, 0x17, 0x84, 0x38, 0x11, 0x04, 0x28, 0xc7, 0x08, 0x29, 0x28, 0x17, 0xa4, 0x20, 0xa1, 0x81,
- 0x1d, 0x4e, 0x48, 0x08, 0x89, 0x0b, 0x07, 0x1f, 0x10, 0xaa, 0xea, 0xaa, 0xfe, 0x9a, 0x6e, 0x4f,
- 0xdb, 0x59, 0xaf, 0x96, 0x9b, 0xe7, 0xd5, 0x7b, 0xbf, 0xaa, 0x7a, 0xf5, 0xbe, 0xea, 0x75, 0x19,
- 0x9e, 0xdc, 0x7b, 0xc1, 0xad, 0x19, 0xf6, 0xbc, 0xd6, 0x36, 0xe6, 0x1d, 0xe2, 0xda, 0x1d, 0x47,
- 0x27, 0xf3, 0xfb, 0x0b, 0xf3, 0x3b, 0xc4, 0x22, 0x8e, 0x46, 0x49, 0xb3, 0xd6, 0x76, 0x6c, 0x6a,
- 0xa3, 0x29, 0x8f, 0xaf, 0xa6, 0xb5, 0x8d, 0x9a, 0xe4, 0xab, 0xed, 0x2f, 0x4c, 0x5f, 0xde, 0x31,
- 0xe8, 0x6e, 0x67, 0xab, 0xa6, 0xdb, 0xad, 0xf9, 0x1d, 0x7b, 0xc7, 0x9e, 0xe7, 0xec, 0x5b, 0x9d,
- 0x6d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x79, 0x30, 0xd3, 0x6a, 0x68, 0x3a, 0xdd, 0x76, 0x92, 0xa6,
- 0x9a, 0xfe, 0x7c, 0xc0, 0xd3, 0xd2, 0xf4, 0x5d, 0xc3, 0x22, 0xce, 0xc1, 0x7c, 0x7b, 0x6f, 0x27,
- 0xba, 0xc6, 0xe3, 0x48, 0xb9, 0xf3, 0x2d, 0x42, 0xb5, 0xa4, 0xb9, 0xe6, 0xd3, 0xa4, 0x9c, 0x8e,
- 0x45, 0x8d, 0x56, 0xff, 0x34, 0xcf, 0x0f, 0x12, 0x70, 0xf5, 0x5d, 0xd2, 0xd2, 0xe2, 0x72, 0xea,
- 0x87, 0x79, 0xb8, 0xb0, 0x64, 0x9a, 0xb6, 0xce, 0x68, 0x2b, 0x64, 0xdf, 0xd0, 0x49, 0x83, 0x6a,
- 0xb4, 0xe3, 0xa2, 0x27, 0xa1, 0xd8, 0x74, 0x8c, 0x7d, 0xe2, 0x54, 0x94, 0x59, 0x65, 0x6e, 0xa4,
- 0x3e, 0xfe, 0x51, 0xb7, 0x3a, 0xd4, 0xeb, 0x56, 0x8b, 0x2b, 0x9c, 0x8a, 0xc5, 0x28, 0x9a, 0x85,
- 0x42, 0xdb, 0xb6, 0xcd, 0x4a, 0x8e, 0x73, 0x8d, 0x0a, 0xae, 0xc2, 0xba, 0x6d, 0x9b, 0x98, 0x8f,
- 0x70, 0x24, 0x8e, 0x5c, 0xc9, 0xc7, 0x90, 0x38, 0x15, 0x8b, 0x51, 0xf4, 0x04, 0x0c, 0xbb, 0xbb,
- 0x9a, 0x43, 0xd6, 0x56, 0x2a, 0xc3, 0x9c, 0xb1, 0xdc, 0xeb, 0x56, 0x87, 0x1b, 0x1e, 0x09, 0xcb,
- 0x31, 0xa4, 0x03, 0xe8, 0xb6, 0xd5, 0x34, 0xa8, 0x61, 0x5b, 0x6e, 0xa5, 0x30, 0x9b, 0x9f, 0x2b,
- 0x2f, 0xce, 0xd7, 0x02, 0x3b, 0xf0, 0xf7, 0x5f, 0x6b, 0xef, 0xed, 0x30, 0x82, 0x5b, 0x63, 0x6a,
- 0xae, 0xed, 0x2f, 0xd4, 0x96, 0xa5, 0x5c, 0x1d, 0x89, 0x35, 0x80, 0x4f, 0x72, 0x71, 0x08, 0x16,
- 0x5d, 0x83, 0x42, 0x53, 0xa3, 0x5a, 0xe5, 0xcc, 0xac, 0x32, 0x57, 0x5e, 0xbc, 0x9c, 0x0a, 0x2f,
- 0xd4, 0x5b, 0xc3, 0xda, 0x9d, 0xd5, 0xbb, 0x94, 0x58, 0x2e, 0x03, 0x2f, 0x31, 0x05, 0xac, 0x68,
- 0x54, 0xc3, 0x1c, 0x04, 0xbd, 0x05, 0x65, 0x8b, 0xd0, 0x3b, 0xb6, 0xb3, 0xc7, 0x88, 0x95, 0x22,
- 0xc7, 0x7c, 0xba, 0x96, 0x6c, 0xba, 0xb5, 0x9b, 0x82, 0x95, 0x2b, 0x85, 0x09, 0xd4, 0xcf, 0xf6,
- 0xba, 0xd5, 0xf2, 0xcd, 0x00, 0x01, 0x87, 0xe1, 0xd4, 0xdf, 0xe4, 0x60, 0x42, 0x1c, 0xa1, 0x61,
- 0x5b, 0x98, 0xb8, 0x1d, 0x93, 0xa2, 0x37, 0x61, 0xd8, 0xd3, 0xaa, 0xcb, 0x8f, 0xaf, 0xbc, 0x58,
- 0x4b, 0x9b, 0xce, 0x9b, 0x27, 0x0e, 0x50, 0x3f, 0x2b, 0x14, 0x34, 0xec, 0x8d, 0xbb, 0x58, 0xe2,
- 0xa1, 0x4d, 0x18, 0xb5, 0xec, 0x26, 0x69, 0x10, 0x93, 0xe8, 0xd4, 0x76, 0xf8, 0xa1, 0x96, 0x17,
- 0x67, 0xc3, 0xf8, 0xcc, 0x85, 0xf8, 0x56, 0x42, 0x7c, 0xf5, 0x89, 0x5e, 0xb7, 0x3a, 0x1a, 0xa6,
- 0xe0, 0x08, 0x0e, 0xea, 0xc0, 0x39, 0xcd, 0x5f, 0xc5, 0x86, 0xd1, 0x22, 0x2e, 0xd5, 0x5a, 0x6d,
- 0x71, 0x02, 0xcf, 0x64, 0x3b, 0x60, 0x26, 0x56, 0x7f, 0xa4, 0xd7, 0xad, 0x9e, 0x5b, 0xea, 0x87,
- 0xc2, 0x49, 0xf8, 0xea, 0x2b, 0x30, 0xb9, 0xbc, 0x7a, 0x5d, 0x98, 0xbe, 0x5c, 0xcb, 0x22, 0x00,
- 0xb9, 0xdb, 0x76, 0x88, 0xcb, 0xce, 0x53, 0x38, 0x80, 0x6f, 0x32, 0xab, 0xfe, 0x08, 0x0e, 0x71,
- 0xa9, 0x1f, 0xe4, 0xe0, 0xc2, 0xb2, 0xd6, 0xd6, 0x74, 0x83, 0x1e, 0x60, 0xf2, 0x6e, 0x87, 0xb8,
- 0x74, 0xdd, 0x36, 0x0d, 0xfd, 0x00, 0xdd, 0x62, 0x87, 0xb1, 0xad, 0x75, 0x4c, 0x9a, 0x70, 0x18,
- 0x7d, 0xbb, 0x09, 0x4e, 0xe7, 0x8d, 0x8e, 0x66, 0x51, 0x83, 0x1e, 0x78, 0x8e, 0xb0, 0xe2, 0x41,
- 0x60, 0x89, 0x85, 0x08, 0x94, 0xf7, 0x35, 0xd3, 0x68, 0x6e, 0x6a, 0x66, 0x87, 0xb8, 0x95, 0x3c,
- 0xf7, 0x84, 0xe3, 0x42, 0x9f, 0x13, 0xbb, 0x2a, 0x6f, 0x06, 0x50, 0x38, 0x8c, 0x8b, 0xb6, 0x00,
- 0xf8, 0x4f, 0xac, 0x59, 0x3b, 0xa4, 0x52, 0xe0, 0x1b, 0x58, 0x4c, 0xb3, 0xa6, 0x44, 0x05, 0x70,
- 0xc9, 0xfa, 0x38, 0xd3, 0xdd, 0xa6, 0x8f, 0x84, 0x43, 0xa8, 0xea, 0x7b, 0x39, 0x98, 0x4e, 0x17,
- 0x45, 0x6b, 0x90, 0x6f, 0x19, 0xd6, 0x09, 0x95, 0x37, 0xdc, 0xeb, 0x56, 0xf3, 0x37, 0x0c, 0x0b,
- 0x33, 0x0c, 0x0e, 0xa5, 0xdd, 0xe5, 0xd1, 0xea, 0xa4, 0x50, 0xda, 0x5d, 0xcc, 0x30, 0xd0, 0x75,
- 0x28, 0xb8, 0x94, 0xb4, 0x85, 0x03, 0x1c, 0x17, 0x8b, 0x07, 0x89, 0x06, 0x25, 0x6d, 0xcc, 0x51,
- 0xd4, 0xff, 0x28, 0x70, 0x3e, 0xac, 0x02, 0xc3, 0x21, 0x2d, 0x62, 0x51, 0x17, 0x1d, 0x40, 0xc9,
- 0xf1, 0x54, 0xc2, 0x7c, 0x99, 0x9d, 0xf1, 0x8b, 0x59, 0xb4, 0x2f, 0xe5, 0x6b, 0x42, 0x9f, 0xee,
- 0xaa, 0x45, 0x9d, 0x83, 0xfa, 0xe3, 0xe2, 0xbc, 0x4b, 0x92, 0xfc, 0xcd, 0xbf, 0x54, 0xc7, 0xde,
- 0xe8, 0x68, 0xa6, 0xb1, 0x6d, 0x90, 0xe6, 0x4d, 0xad, 0x45, 0xb0, 0x3f, 0xdd, 0xf4, 0x1e, 0x8c,
- 0x45, 0xa4, 0xd1, 0x04, 0xe4, 0xf7, 0xc8, 0x81, 0xe7, 0x10, 0x98, 0xfd, 0x89, 0x56, 0xe0, 0xcc,
- 0x3e, 0xb3, 0x93, 0x93, 0x69, 0x14, 0x7b, 0xc2, 0x2f, 0xe6, 0x5e, 0x50, 0xd4, 0xb7, 0x61, 0x78,
- 0xd9, 0xee, 0x58, 0x94, 0x38, 0xa8, 0x21, 0x41, 0x4f, 0x76, 0xe2, 0x63, 0x62, 0x8f, 0x67, 0xb8,
- 0x05, 0x8b, 0x39, 0xd4, 0x7f, 0x28, 0x00, 0x62, 0x82, 0x06, 0xa1, 0x2c, 0x6f, 0x59, 0x5a, 0x8b,
- 0x08, 0xe7, 0xf6, 0xf3, 0x16, 0xd7, 0x00, 0x1f, 0x41, 0x6f, 0x43, 0x49, 0xf7, 0xf8, 0xdd, 0x4a,
- 0x8e, 0x2b, 0xfe, 0x4a, 0xaa, 0xe2, 0x7d, 0x5c, 0xf9, 0xa7, 0x50, 0xf7, 0x84, 0x54, 0xb7, 0x24,
- 0x63, 0x1f, 0x73, 0xfa, 0x2d, 0x18, 0x8b, 0x30, 0x27, 0x68, 0xf7, 0xb9, 0xa8, 0x76, 0xab, 0x03,
- 0xe6, 0x0f, 0xab, 0xf3, 0xdf, 0x25, 0x10, 0x09, 0x36, 0xc3, 0x56, 0x5d, 0x00, 0x8d, 0x52, 0xc7,
- 0xd8, 0xea, 0x50, 0x22, 0x37, 0x3b, 0x20, 0x63, 0xd4, 0x96, 0x7c, 0x01, 0x6f, 0xab, 0x17, 0x65,
- 0x7c, 0x0c, 0x06, 0xfa, 0x6d, 0x2b, 0x34, 0x0d, 0xda, 0x83, 0x92, 0x2e, 0x0c, 0x56, 0x04, 0xaf,
- 0x4b, 0x03, 0xa6, 0x94, 0xf6, 0x1d, 0x33, 0x65, 0x49, 0x4e, 0x30, 0x65, 0x39, 0x01, 0xda, 0x87,
- 0x09, 0xdd, 0xb6, 0xdc, 0x4e, 0x8b, 0xb8, 0x52, 0xe9, 0xa2, 0x76, 0xb8, 0x72, 0xf4, 0xa4, 0x82,
- 0x7b, 0x99, 0x0b, 0xb7, 0x79, 0xf1, 0x50, 0x11, 0x13, 0x4f, 0x2c, 0xc7, 0x10, 0x71, 0xdf, 0x1c,
- 0x68, 0x0e, 0x4a, 0x2c, 0xcb, 0xb1, 0xd5, 0xf0, 0x54, 0x36, 0x52, 0x1f, 0x65, 0x4b, 0xbe, 0x29,
- 0x68, 0xd8, 0x1f, 0xed, 0xcb, 0xab, 0xc5, 0xfb, 0x94, 0x57, 0xe7, 0xa0, 0xa4, 0x99, 0x26, 0x63,
- 0x70, 0x79, 0x5d, 0x55, 0xf2, 0x56, 0xb0, 0x24, 0x68, 0xd8, 0x1f, 0x45, 0xd7, 0xa0, 0x48, 0x35,
- 0xc3, 0xa2, 0x6e, 0xa5, 0xc4, 0x35, 0x73, 0xf1, 0x68, 0xcd, 0x6c, 0x30, 0xde, 0xa0, 0x9a, 0xe3,
- 0x3f, 0x5d, 0x2c, 0x20, 0xd0, 0x02, 0x94, 0xb7, 0x0c, 0xab, 0xe9, 0x6e, 0xd8, 0x0c, 0xbc, 0x32,
- 0xc2, 0x67, 0xe6, 0x95, 0x4c, 0x3d, 0x20, 0xe3, 0x30, 0x0f, 0x5a, 0x86, 0x49, 0xf6, 0xd3, 0xb0,
- 0x76, 0x82, 0xaa, 0xac, 0x02, 0xb3, 0xf9, 0xb9, 0x91, 0xfa, 0x85, 0x5e, 0xb7, 0x3a, 0x59, 0x8f,
- 0x0f, 0xe2, 0x7e, 0x7e, 0x74, 0x1b, 0x2a, 0x82, 0x78, 0x55, 0x33, 0xcc, 0x8e, 0x43, 0x42, 0x58,
- 0x65, 0x8e, 0xf5, 0xff, 0xbd, 0x6e, 0xb5, 0x52, 0x4f, 0xe1, 0xc1, 0xa9, 0xd2, 0x0c, 0x99, 0x15,
- 0x10, 0x77, 0x6e, 0x74, 0x4c, 0x6a, 0xb4, 0xcd, 0x50, 0xcd, 0xe4, 0x56, 0x46, 0xf9, 0xf6, 0x38,
- 0xf2, 0x52, 0x0a, 0x0f, 0x4e, 0x95, 0x9e, 0xde, 0x86, 0xb3, 0x31, 0x6f, 0x4a, 0x88, 0x05, 0x5f,
- 0x88, 0xc6, 0x82, 0xa7, 0x06, 0x14, 0x74, 0x12, 0x2f, 0x14, 0x13, 0xa6, 0x75, 0x18, 0x8b, 0xb8,
- 0x50, 0xc2, 0x2c, 0x2f, 0x47, 0x67, 0x79, 0x72, 0x80, 0x73, 0xc8, 0x84, 0x13, 0x0a, 0x3c, 0xdf,
- 0xce, 0xc1, 0x63, 0xf1, 0xa2, 0x72, 0xd9, 0xb6, 0xb6, 0x8d, 0x9d, 0x8e, 0xc3, 0x7f, 0xa0, 0x2f,
- 0x41, 0xd1, 0x03, 0x12, 0x11, 0x69, 0x4e, 0x9a, 0x50, 0x83, 0x53, 0x0f, 0xbb, 0xd5, 0xa9, 0xb8,
- 0xa8, 0x37, 0x82, 0x85, 0x1c, 0xb3, 0x69, 0x3f, 0x27, 0xe6, 0xf8, 0xa1, 0x8e, 0x86, 0x73, 0x5a,
- 0x90, 0xc2, 0xd0, 0x37, 0xe0, 0x5c, 0x53, 0xf8, 0x71, 0x68, 0x09, 0x22, 0x67, 0x3f, 0x3b, 0xc8,
- 0xf5, 0x43, 0x22, 0xf5, 0xff, 0x13, 0xab, 0x3c, 0x97, 0x30, 0x88, 0x93, 0x26, 0x51, 0xff, 0xa4,
- 0xc0, 0x54, 0x72, 0x79, 0x8d, 0xde, 0x81, 0x61, 0x87, 0xff, 0x25, 0x73, 0xfa, 0x73, 0x47, 0x2f,
- 0x45, 0xec, 0x2c, 0xbd, 0x4c, 0xf7, 0x7e, 0xbb, 0x58, 0xc2, 0xa2, 0xaf, 0x42, 0x51, 0xe7, 0xab,
- 0x11, 0xe1, 0xfc, 0xb9, 0xac, 0x17, 0x80, 0xe8, 0xae, 0x7d, 0xf7, 0xf6, 0xc8, 0x58, 0x80, 0xaa,
- 0x3f, 0x53, 0xe0, 0x6c, 0xcc, 0xd2, 0xd0, 0x0c, 0xe4, 0x0d, 0x8b, 0x72, 0xcb, 0xc9, 0x7b, 0x07,
- 0xb2, 0x66, 0x51, 0x2f, 0x07, 0xb3, 0x01, 0xf4, 0x38, 0x14, 0xb6, 0xd8, 0x55, 0x31, 0xcf, 0x9d,
- 0x65, 0xac, 0xd7, 0xad, 0x8e, 0xd4, 0x6d, 0xdb, 0xf4, 0x38, 0xf8, 0x10, 0x7a, 0x0a, 0x8a, 0x2e,
- 0x75, 0x0c, 0x6b, 0x87, 0x17, 0x9a, 0x23, 0x5e, 0xc0, 0x68, 0x70, 0x8a, 0xc7, 0x26, 0x86, 0xd1,
- 0x33, 0x30, 0xbc, 0x4f, 0x1c, 0x5e, 0x9e, 0x7b, 0x61, 0x95, 0x87, 0xc1, 0x4d, 0x8f, 0xe4, 0xb1,
- 0x4a, 0x06, 0xf5, 0x63, 0x05, 0xc6, 0xa3, 0xf6, 0x7a, 0x2a, 0x15, 0x06, 0xda, 0x86, 0x31, 0x27,
- 0x5c, 0xbc, 0x0a, 0x1f, 0xba, 0x7c, 0xac, 0x62, 0xb9, 0x3e, 0xd9, 0xeb, 0x56, 0xc7, 0xa2, 0x45,
- 0x70, 0x14, 0x56, 0xfd, 0x71, 0x0e, 0xca, 0x62, 0x3f, 0xa6, 0x66, 0xb4, 0x50, 0xa3, 0xaf, 0x42,
- 0x7c, 0x22, 0x93, 0x35, 0x05, 0xd5, 0x49, 0x82, 0xe3, 0x7c, 0x0d, 0xca, 0x2c, 0x99, 0x51, 0xc7,
- 0xcb, 0x08, 0x9e, 0x11, 0xcd, 0x0d, 0x74, 0x18, 0x21, 0x10, 0xdc, 0x2b, 0x02, 0x9a, 0x8b, 0xc3,
- 0x88, 0xe8, 0xb6, 0x6f, 0xa0, 0xf9, 0x4c, 0x79, 0x98, 0x6d, 0x35, 0x9b, 0x6d, 0x7e, 0xa8, 0x40,
- 0x25, 0x4d, 0x28, 0x12, 0x3a, 0x94, 0x93, 0x84, 0x8e, 0xdc, 0x83, 0x08, 0x1d, 0xbf, 0x56, 0x42,
- 0x47, 0xec, 0xba, 0xe8, 0x1d, 0x28, 0xb1, 0x3b, 0x2e, 0xef, 0x49, 0x78, 0x26, 0x7b, 0x25, 0xdb,
- 0x8d, 0xf8, 0xf5, 0xad, 0xaf, 0x13, 0x9d, 0xde, 0x20, 0x54, 0x0b, 0x2e, 0xb0, 0x01, 0x0d, 0xfb,
- 0xa8, 0x68, 0x0d, 0x0a, 0x6e, 0x9b, 0xe8, 0xd9, 0xb2, 0x0b, 0x5f, 0x54, 0xa3, 0x4d, 0xf4, 0xa0,
- 0x9a, 0x64, 0xbf, 0x30, 0x87, 0x50, 0xbf, 0x1f, 0xd6, 0xbf, 0xeb, 0x46, 0xf5, 0x9f, 0xa2, 0x55,
- 0xe5, 0x41, 0x68, 0xf5, 0x03, 0x3f, 0x68, 0xf1, 0x85, 0x5d, 0x37, 0x5c, 0x8a, 0xde, 0xea, 0xd3,
- 0x6c, 0x2d, 0x9b, 0x66, 0x99, 0x34, 0xd7, 0xab, 0xef, 0x45, 0x92, 0x12, 0xd2, 0xea, 0xab, 0x70,
- 0xc6, 0xa0, 0xa4, 0x25, 0xfd, 0xe7, 0x62, 0x06, 0xb5, 0x06, 0xc1, 0x65, 0x8d, 0x49, 0x62, 0x0f,
- 0x40, 0xfd, 0x6e, 0x2e, 0xb2, 0x76, 0xa6, 0x6e, 0xf4, 0x65, 0x18, 0x71, 0x45, 0x99, 0x27, 0x3d,
- 0x7f, 0x40, 0xc2, 0xf6, 0xab, 0xc6, 0x49, 0x31, 0xc9, 0x88, 0xa4, 0xb8, 0x38, 0xc0, 0x0a, 0xf9,
- 0x66, 0x2e, 0xa3, 0x6f, 0xc6, 0x8e, 0x39, 0xcd, 0x37, 0xd1, 0x75, 0x38, 0x4f, 0xee, 0x52, 0x62,
- 0x35, 0x49, 0x13, 0x0b, 0x1c, 0x5e, 0x1b, 0x7b, 0xe1, 0xbe, 0xd2, 0xeb, 0x56, 0xcf, 0xaf, 0x26,
- 0x8c, 0xe3, 0x44, 0x29, 0xd5, 0x84, 0xa4, 0xc3, 0x47, 0xb7, 0xa0, 0x68, 0xb7, 0xb5, 0x77, 0xfd,
- 0xf0, 0xbe, 0x90, 0xb6, 0xfc, 0xd7, 0x39, 0x57, 0x92, 0x71, 0x01, 0x5b, 0xbb, 0x37, 0x8c, 0x05,
- 0x98, 0xfa, 0x77, 0x05, 0x26, 0xe2, 0x81, 0xee, 0x18, 0xf1, 0x64, 0x1d, 0xc6, 0x5b, 0x1a, 0xd5,
- 0x77, 0xfd, 0x84, 0x29, 0x7a, 0xa6, 0x73, 0xbd, 0x6e, 0x75, 0xfc, 0x46, 0x64, 0xe4, 0xb0, 0x5b,
- 0x45, 0x57, 0x3b, 0xa6, 0x79, 0x10, 0xbd, 0xce, 0xc4, 0xe4, 0xd1, 0x9b, 0x30, 0xd9, 0x34, 0x5c,
- 0x6a, 0x58, 0x3a, 0x0d, 0x40, 0xbd, 0x26, 0xeb, 0xb3, 0xac, 0x60, 0x5e, 0x89, 0x0f, 0xa6, 0xe0,
- 0xf6, 0xa3, 0xa8, 0x3f, 0xca, 0xf9, 0x3e, 0xdc, 0x77, 0x01, 0x42, 0x8b, 0x00, 0xba, 0x7f, 0xe3,
- 0x8d, 0xb7, 0xc7, 0x82, 0xbb, 0x30, 0x0e, 0x71, 0x21, 0xb3, 0xef, 0x36, 0xfd, 0xc5, 0xe3, 0x5e,
- 0xbc, 0x1e, 0x9a, 0xbb, 0xf5, 0x3f, 0x15, 0x18, 0x8b, 0x64, 0xd2, 0x0c, 0x57, 0xec, 0x37, 0x60,
- 0x98, 0xdc, 0xd5, 0x74, 0x6a, 0xca, 0xb2, 0xe0, 0x99, 0xb4, 0x09, 0x57, 0x19, 0x5b, 0x34, 0x51,
- 0xf3, 0x06, 0xe0, 0xaa, 0x27, 0x8e, 0x25, 0x0e, 0xda, 0x85, 0xf1, 0x6d, 0xc3, 0x71, 0xe9, 0xd2,
- 0xbe, 0x66, 0x98, 0xda, 0x96, 0x49, 0x44, 0x26, 0x1d, 0x90, 0xa5, 0x1b, 0x9d, 0x2d, 0x89, 0x3b,
- 0x25, 0x16, 0x3a, 0x7e, 0x35, 0x82, 0x83, 0x63, 0xb8, 0xea, 0x1f, 0x8b, 0xb2, 0xa6, 0x4f, 0x29,
- 0x44, 0xd1, 0xd3, 0xac, 0xa0, 0xe5, 0x43, 0x42, 0x07, 0xa1, 0xca, 0x94, 0x93, 0xb1, 0x1c, 0x0f,
- 0x7d, 0x59, 0xc8, 0x65, 0xfa, 0xb2, 0x90, 0xcf, 0xf0, 0x65, 0xa1, 0x70, 0xe4, 0x97, 0x85, 0x05,
- 0x28, 0x6b, 0xcd, 0x96, 0x61, 0x2d, 0xe9, 0x3a, 0x71, 0x5d, 0x5e, 0x30, 0x8a, 0xbb, 0xe8, 0x52,
- 0x40, 0xc6, 0x61, 0x1e, 0x56, 0xfe, 0x50, 0xdb, 0x24, 0x8e, 0xb8, 0xdf, 0x15, 0xb3, 0x28, 0x76,
- 0xc3, 0x17, 0x08, 0xca, 0x9f, 0x80, 0xe6, 0xe2, 0x30, 0x62, 0xf2, 0x65, 0x77, 0xf8, 0x3e, 0x5e,
- 0x76, 0x4b, 0x9f, 0xe9, 0xb2, 0xfb, 0x5a, 0xf0, 0x31, 0x66, 0x84, 0xeb, 0xf6, 0x4a, 0xe8, 0x63,
- 0xcc, 0x61, 0xb7, 0xfa, 0x78, 0xda, 0x07, 0x27, 0x7a, 0xd0, 0x26, 0x6e, 0xed, 0x56, 0xf8, 0x8b,
- 0xcd, 0xfb, 0x8a, 0xdf, 0x7c, 0x69, 0xca, 0x9a, 0x97, 0xdf, 0xeb, 0xcb, 0x8b, 0xd7, 0x4e, 0x74,
- 0xed, 0xa9, 0x2d, 0xc7, 0xd0, 0xbc, 0x80, 0xf0, 0x74, 0xac, 0x2f, 0xd3, 0x4c, 0x6f, 0x0c, 0xf5,
- 0xad, 0x67, 0xda, 0x85, 0x0b, 0x89, 0xa8, 0xa7, 0xda, 0xf3, 0xdc, 0x94, 0x17, 0x13, 0xbf, 0x5b,
- 0xb3, 0x02, 0x79, 0x9d, 0x98, 0x22, 0x6f, 0xa5, 0x7e, 0x23, 0xea, 0xfb, 0x62, 0xe1, 0xb5, 0xa6,
- 0x97, 0x57, 0xaf, 0x63, 0x26, 0xae, 0x7e, 0xab, 0x20, 0x33, 0x55, 0xe0, 0xec, 0x19, 0x62, 0xd4,
- 0x12, 0x9c, 0x6d, 0x06, 0x09, 0x9d, 0xe7, 0x65, 0xcf, 0x45, 0x1f, 0x11, 0xcc, 0xe1, 0x0a, 0x84,
- 0xcb, 0xc5, 0xf9, 0xa3, 0x25, 0x49, 0xfe, 0x3e, 0x96, 0x24, 0x9b, 0x30, 0x1e, 0x7c, 0xbe, 0xb9,
- 0x61, 0x37, 0xa5, 0xcf, 0xd7, 0x64, 0x08, 0x5b, 0x8a, 0x8c, 0x1e, 0x76, 0xab, 0xe7, 0xe3, 0x37,
- 0x5b, 0x46, 0xc7, 0x31, 0x14, 0x74, 0x11, 0xce, 0xf0, 0xac, 0xc1, 0xa3, 0x42, 0x3e, 0x28, 0xbe,
- 0x78, 0xd8, 0xc7, 0xde, 0xd8, 0xe9, 0x47, 0x83, 0xcd, 0x50, 0x2f, 0x74, 0x98, 0x9f, 0xfd, 0xa5,
- 0xe3, 0x34, 0xf9, 0xbd, 0x9a, 0xc3, 0x1f, 0xf1, 0xb1, 0xd4, 0x7f, 0xf9, 0xf7, 0x08, 0xde, 0x9e,
- 0x43, 0x8f, 0x85, 0x8c, 0xb9, 0x5e, 0x16, 0xcb, 0xca, 0x5f, 0x23, 0x07, 0x9e, 0x65, 0x5f, 0x0c,
- 0x5b, 0xf6, 0x48, 0xca, 0x35, 0xf7, 0x25, 0x28, 0x92, 0xed, 0x6d, 0xa2, 0x53, 0x11, 0x99, 0x65,
- 0xe3, 0xb7, 0xb8, 0xca, 0xa9, 0x87, 0xac, 0xf0, 0x08, 0xa6, 0xf4, 0x88, 0x58, 0x88, 0x30, 0xfb,
- 0xa0, 0x46, 0x8b, 0x2c, 0x35, 0x9b, 0xa4, 0x29, 0x3e, 0x26, 0x1d, 0xe7, 0xdb, 0x1e, 0x6f, 0x1a,
- 0x6c, 0x48, 0x00, 0x1c, 0x60, 0xbd, 0x58, 0xfa, 0xc1, 0x4f, 0xaa, 0x43, 0xef, 0xfd, 0x79, 0x76,
- 0x48, 0x7d, 0x3f, 0x27, 0x8d, 0x3f, 0x50, 0xf7, 0xa0, 0x8d, 0xbf, 0x0a, 0x25, 0xbb, 0xcd, 0x78,
- 0x6d, 0x99, 0x95, 0x2e, 0xc9, 0xea, 0xe2, 0x75, 0x41, 0x3f, 0xec, 0x56, 0x2b, 0x71, 0x58, 0x39,
- 0x86, 0x7d, 0xe9, 0x40, 0x85, 0xf9, 0x4c, 0x2a, 0x2c, 0x1c, 0x5f, 0x85, 0xcb, 0x30, 0x19, 0x98,
- 0x4e, 0x83, 0xe8, 0xb6, 0xd5, 0x74, 0x85, 0xf5, 0xf2, 0xcc, 0xb1, 0x11, 0x1f, 0xc4, 0xfd, 0xfc,
- 0xea, 0x0f, 0x0b, 0x80, 0xfa, 0x0b, 0x8d, 0xa4, 0x08, 0xa0, 0x7c, 0x96, 0x08, 0x90, 0x3b, 0xd5,
- 0x08, 0x90, 0xbf, 0xbf, 0x11, 0xa0, 0x70, 0x44, 0x04, 0x78, 0x18, 0x4b, 0x88, 0xd3, 0x0a, 0x1a,
- 0x3f, 0x57, 0x60, 0xb2, 0xef, 0x15, 0x02, 0x7a, 0x09, 0xc6, 0x0c, 0x56, 0x08, 0x6f, 0x6b, 0xe2,
- 0xca, 0xe6, 0x19, 0xc6, 0x05, 0xb1, 0xcc, 0xb1, 0xb5, 0xf0, 0x20, 0x8e, 0xf2, 0xa2, 0x47, 0x21,
- 0x6f, 0xb4, 0x65, 0xaf, 0x96, 0xe7, 0xaa, 0xb5, 0x75, 0x17, 0x33, 0x1a, 0x33, 0xb9, 0x5d, 0xcd,
- 0x69, 0xde, 0xd1, 0x1c, 0xe6, 0xc9, 0x0e, 0xd3, 0x6e, 0x3e, 0x6a, 0x72, 0xaf, 0x46, 0x87, 0x71,
- 0x9c, 0x5f, 0xfd, 0xa9, 0x02, 0x8f, 0xa6, 0x5e, 0xe5, 0x32, 0xbf, 0x64, 0xd1, 0x00, 0xda, 0x9a,
- 0xa3, 0xb5, 0x88, 0xb8, 0xa3, 0x9c, 0xe0, 0xe5, 0x87, 0x7f, 0x09, 0x5a, 0xf7, 0x81, 0x70, 0x08,
- 0x54, 0xfd, 0x5e, 0x0e, 0xc6, 0xe4, 0x05, 0xd6, 0xeb, 0xdd, 0x9d, 0x7e, 0x63, 0xe7, 0x5a, 0xa4,
- 0xb1, 0x93, 0x5a, 0x52, 0x44, 0x96, 0x95, 0xd6, 0xda, 0x41, 0x0d, 0x28, 0xba, 0xfc, 0x7d, 0xd0,
- 0xa0, 0x0e, 0x7a, 0x14, 0x8e, 0x8b, 0x04, 0x8a, 0xf7, 0x7e, 0x63, 0x01, 0xa5, 0xf6, 0x14, 0x98,
- 0x89, 0xf0, 0x8b, 0x42, 0xcc, 0xc1, 0x64, 0x9b, 0x38, 0xc4, 0xd2, 0x09, 0xba, 0x04, 0x25, 0xad,
- 0x6d, 0xbc, 0xe2, 0xd8, 0x9d, 0xb6, 0x38, 0x45, 0xff, 0xf6, 0xb7, 0xb4, 0xbe, 0xc6, 0xe9, 0xd8,
- 0xe7, 0x60, 0xdc, 0x72, 0x2d, 0xc2, 0x96, 0x42, 0x9d, 0x4e, 0x8f, 0x8e, 0x7d, 0x0e, 0xbf, 0x2e,
- 0x2a, 0xa4, 0xd6, 0x45, 0x75, 0xc8, 0x77, 0x8c, 0xa6, 0x68, 0x34, 0x5f, 0x91, 0xc9, 0xe3, 0x56,
- 0xd6, 0x42, 0x98, 0x09, 0xab, 0xbf, 0x55, 0x60, 0x32, 0xb2, 0xc9, 0x07, 0xd0, 0x7d, 0x7a, 0x2d,
- 0xda, 0x7d, 0x7a, 0x22, 0xd3, 0x61, 0xa5, 0xf4, 0x9f, 0xf4, 0xd8, 0xf2, 0x79, 0x03, 0xea, 0x66,
- 0xfc, 0x99, 0xd1, 0xc5, 0x0c, 0x4d, 0xdc, 0xf4, 0xb7, 0x45, 0xea, 0xaf, 0x72, 0x70, 0x2e, 0xc1,
- 0x72, 0xd0, 0x6d, 0x80, 0x20, 0x68, 0x8b, 0xa9, 0x52, 0x23, 0x69, 0xdf, 0x47, 0x12, 0xfe, 0xf2,
- 0x24, 0x44, 0x0d, 0x61, 0xa1, 0x16, 0x94, 0x1d, 0xe2, 0x12, 0x67, 0x9f, 0x34, 0xaf, 0xf2, 0xdc,
- 0xcf, 0x14, 0xf5, 0x7c, 0x26, 0x45, 0xf5, 0x59, 0x69, 0x10, 0xb2, 0x71, 0x00, 0x89, 0xc3, 0xf8,
- 0xe8, 0x76, 0xa0, 0x30, 0xef, 0xeb, 0xf3, 0xe5, 0x01, 0xbb, 0x88, 0xbe, 0xca, 0x3b, 0x42, 0x75,
- 0x7f, 0x50, 0xe0, 0x42, 0x64, 0x79, 0x1b, 0xa4, 0xd5, 0x36, 0x35, 0x4a, 0x1e, 0x40, 0x88, 0x69,
- 0x44, 0x42, 0xcc, 0x42, 0x26, 0xed, 0xc9, 0xe5, 0xa5, 0x76, 0x91, 0x3f, 0x56, 0xe0, 0xd1, 0x44,
- 0x89, 0x07, 0xe0, 0x38, 0x38, 0xea, 0x38, 0x97, 0x8f, 0xb5, 0xa3, 0x14, 0x07, 0xfa, 0x7d, 0xda,
- 0x7e, 0xb8, 0x27, 0xfd, 0x6f, 0xe5, 0x01, 0xf5, 0x17, 0x0a, 0x8c, 0x4a, 0xce, 0x75, 0xdb, 0x36,
- 0x33, 0x5c, 0x2e, 0x17, 0x01, 0xc4, 0xeb, 0x53, 0xf9, 0x15, 0x25, 0x1f, 0xac, 0xf8, 0x15, 0x7f,
- 0x04, 0x87, 0xb8, 0xd0, 0x6b, 0x80, 0xe4, 0xda, 0x1a, 0xa6, 0xec, 0x09, 0xf2, 0x90, 0x9e, 0xaf,
- 0x4f, 0x0b, 0x59, 0x84, 0xfb, 0x38, 0x70, 0x82, 0x94, 0xfa, 0x3b, 0x25, 0xc8, 0xbd, 0x9c, 0xfc,
- 0xf0, 0xe9, 0x9c, 0x2f, 0x2b, 0x55, 0xe7, 0xe1, 0x0c, 0xc2, 0x39, 0x1f, 0xc2, 0x0c, 0xc2, 0xd7,
- 0x95, 0xe2, 0x00, 0xbf, 0x2c, 0xc4, 0xd6, 0xcf, 0x0d, 0x3f, 0x6b, 0x75, 0x76, 0x35, 0xf4, 0xce,
- 0xb8, 0xbc, 0xf8, 0xb9, 0x41, 0x0b, 0x61, 0x46, 0x99, 0xd8, 0x33, 0x0c, 0x3f, 0xc8, 0xc9, 0x1f,
- 0xeb, 0x41, 0x4e, 0xe1, 0x14, 0x1e, 0xe4, 0x9c, 0x39, 0xf2, 0x41, 0xce, 0x5a, 0x90, 0x2d, 0xbc,
- 0xdb, 0xc3, 0xcc, 0xd1, 0xe9, 0xf5, 0x88, 0x57, 0xbb, 0x18, 0xa6, 0xda, 0xc4, 0xf1, 0xc8, 0xc1,
- 0xda, 0x98, 0x27, 0x7a, 0x6f, 0x82, 0xa6, 0x7b, 0xdd, 0xea, 0xd4, 0x7a, 0x22, 0x07, 0x4e, 0x91,
- 0x44, 0x5b, 0x30, 0xce, 0x5b, 0x7c, 0x4d, 0xff, 0x45, 0x95, 0xf7, 0x6e, 0x48, 0x1d, 0xfc, 0x4c,
- 0x2e, 0xe8, 0x3c, 0x37, 0x22, 0x08, 0x38, 0x86, 0x58, 0x7f, 0xf9, 0xa3, 0x7b, 0x33, 0x43, 0x9f,
- 0xdc, 0x9b, 0x19, 0xfa, 0xf4, 0xde, 0xcc, 0xd0, 0x7b, 0xbd, 0x19, 0xe5, 0xa3, 0xde, 0x8c, 0xf2,
- 0x49, 0x6f, 0x46, 0xf9, 0xb4, 0x37, 0xa3, 0xfc, 0xb5, 0x37, 0xa3, 0x7c, 0xe7, 0x6f, 0x33, 0x43,
- 0x5f, 0x99, 0x4a, 0xfe, 0x77, 0x81, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x0c, 0xec, 0x16,
- 0x47, 0x30, 0x00, 0x00,
-}
+func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1817,7 +436,7 @@ func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
v := m.Requests[QualifiedName(keysForRequests[iNdEx])]
baseI := i
@@ -1902,7 +521,7 @@ func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Counters {
keysForCounters = append(keysForCounters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Counters[string(keysForCounters[iNdEx])]
baseI := i
@@ -2054,7 +673,7 @@ func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
baseI := i
@@ -2083,7 +702,7 @@ func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ sort.Strings(keysForAttributes)
for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
baseI := i
@@ -2704,7 +1323,7 @@ func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error
for k := range m.Counters {
keysForCounters = append(keysForCounters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Counters[string(keysForCounters[iNdEx])]
baseI := i
@@ -2815,7 +1434,7 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int,
for k := range m.ConsumedCapacity {
keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+ sort.Strings(keysForConsumedCapacity)
for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])]
baseI := i
@@ -4890,7 +3509,7 @@ func (this *CapacityRequirements) String() string {
for k := range this.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
mapStringForRequests := "map[QualifiedName]resource.Quantity{"
for _, k := range keysForRequests {
mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)])
@@ -4920,7 +3539,7 @@ func (this *CounterSet) String() string {
for k := range this.Counters {
keysForCounters = append(keysForCounters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
mapStringForCounters := "map[string]Counter{"
for _, k := range keysForCounters {
mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
@@ -4951,7 +3570,7 @@ func (this *Device) String() string {
for k := range this.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ sort.Strings(keysForAttributes)
mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
for _, k := range keysForAttributes {
mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
@@ -4961,7 +3580,7 @@ func (this *Device) String() string {
for k := range this.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
mapStringForCapacity := "map[QualifiedName]DeviceCapacity{"
for _, k := range keysForCapacity {
mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
@@ -5168,7 +3787,7 @@ func (this *DeviceCounterConsumption) String() string {
for k := range this.Counters {
keysForCounters = append(keysForCounters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
mapStringForCounters := "map[string]Counter{"
for _, k := range keysForCounters {
mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
@@ -5211,7 +3830,7 @@ func (this *DeviceRequestAllocationResult) String() string {
for k := range this.ConsumedCapacity {
keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+ sort.Strings(keysForConsumedCapacity)
mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{"
for _, k := range keysForConsumedCapacity {
mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)])
diff --git a/operator/vendor/k8s.io/api/resource/v1/generated.proto b/operator/vendor/k8s.io/api/resource/v1/generated.proto
index 816a430c..c254137c 100644
--- a/operator/vendor/k8s.io/api/resource/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/resource/v1/generated.proto
@@ -41,7 +41,7 @@ message AllocatedDeviceStatus {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
optional string driver = 1;
@@ -65,6 +65,8 @@ message AllocatedDeviceStatus {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
optional string shareID = 7;
// Conditions contains the latest observation of the device's state.
@@ -88,6 +90,7 @@ message AllocatedDeviceStatus {
// NetworkData contains network-related information specific to the device.
//
// +optional
+ // +k8s:optional
optional NetworkDeviceData networkData = 6;
}
@@ -293,7 +296,7 @@ message Counter {
// CounterSet defines a named set of counters
// that are available to be used by devices defined in the
-// ResourceSlice.
+// ResourcePool.
//
// The counters are not allocatable by themselves, but
// can be referenced by devices. When a device is allocated,
@@ -304,12 +307,14 @@ message CounterSet {
// It must be a DNS label.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
optional string name = 1;
// Counters defines the set of counters for this CounterSet
// The name of each counter must be unique in that set and must be a DNS label.
//
- // The maximum number of counters in all sets is 32.
+ // The maximum number of counters is 32.
//
// +required
map counters = 2;
@@ -346,14 +351,17 @@ message Device {
//
// There can only be a single entry per counterSet.
//
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
+ // The maximum number of device counter consumptions per
+ // device is 2.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=counterSet
// +featureGate=DRAPartitionableDevices
+ // +k8s:maxItems=2
repeated DeviceCounterConsumption consumesCounters = 4;
// NodeName identifies the node where the device is available.
@@ -390,7 +398,9 @@ message Device {
// If specified, these are the driver-defined taints.
//
- // The maximum number of taints is 4.
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
//
// This is an alpha field and requires enabling the DRADeviceTaints
// feature gate.
@@ -427,6 +437,8 @@ message Device {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingConditions = 10;
// BindingFailureConditions defines the conditions for binding failure.
@@ -443,6 +455,8 @@ message Device {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingFailureConditions = 11;
// AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
@@ -462,6 +476,7 @@ message DeviceAllocationConfiguration {
// or from a claim.
//
// +required
+ // +k8s:required
optional string source = 1;
// Requests lists the names of requests where the configuration applies.
@@ -473,6 +488,10 @@ message DeviceAllocationConfiguration {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 2;
optional DeviceConfiguration deviceConfiguration = 3;
@@ -484,6 +503,8 @@ message DeviceAllocationResult {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceRequestAllocationResult results = 1;
// This field is a combination of all the claim and class configuration parameters.
@@ -496,6 +517,8 @@ message DeviceAllocationResult {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=64
repeated DeviceAllocationConfiguration config = 2;
}
@@ -504,26 +527,30 @@ message DeviceAttribute {
// IntValue is a number.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional int64 int = 2;
// BoolValue is a true/false value.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional bool bool = 3;
// StringValue is a string. Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional string string = 4;
// VersionValue is a semantic version according to semver.org spec 2.0.0.
// Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional string version = 5;
}
@@ -560,6 +587,11 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=32
repeated DeviceRequest requests = 1;
// These constraints must be satisfied by the set of devices that get
@@ -567,6 +599,8 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceConstraint constraints = 2;
// This field holds configuration for multiple potential drivers which
@@ -575,6 +609,8 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceClaimConfiguration config = 3;
}
@@ -589,6 +625,10 @@ message DeviceClaimConfiguration {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 1;
optional DeviceConfiguration deviceConfiguration = 2;
@@ -604,6 +644,8 @@ message DeviceClaimConfiguration {
message DeviceClass {
// Standard object metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines what can be allocated and how to configure it.
@@ -639,6 +681,8 @@ message DeviceClassSpec {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 1;
// Config defines configuration parameters that apply to each device that is claimed via this class.
@@ -649,6 +693,8 @@ message DeviceClassSpec {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceClassConfiguration config = 2;
// ExtendedResourceName is the extended resource name for the devices of this class.
@@ -663,6 +709,8 @@ message DeviceClassSpec {
// This is an alpha field.
// +optional
// +featureGate=DRAExtendedResource
+ // +k8s:optional
+ // +k8s:format=k8s-extended-resource-name
optional string extendedResourceName = 4;
}
@@ -674,6 +722,7 @@ message DeviceConfiguration {
//
// +optional
// +oneOf=ConfigurationType
+ // +k8s:optional
optional OpaqueDeviceConfiguration opaque = 1;
}
@@ -691,6 +740,10 @@ message DeviceConstraint {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 1;
// MatchAttribute requires that all devices in question have this
@@ -708,6 +761,8 @@ message DeviceConstraint {
//
// +optional
// +oneOf=ConstraintType
+ // +k8s:optional
+ // +k8s:format=k8s-resource-fully-qualified-name
optional string matchAttribute = 2;
// DistinctAttribute requires that all devices in question have this
@@ -734,14 +789,13 @@ message DeviceCounterConsumption {
// counters defined will be consumed.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
optional string counterSet = 1;
// Counters defines the counters that will be consumed by the device.
//
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
+ // The maximum number of counters is 32.
//
// +required
map counters = 2;
@@ -773,6 +827,7 @@ message DeviceRequest {
//
// +optional
// +oneOf=deviceRequestType
+ // +k8s:optional
optional ExactDeviceRequest exactly = 2;
// FirstAvailable contains subrequests, of which exactly one will be
@@ -793,6 +848,11 @@ message DeviceRequest {
// +oneOf=deviceRequestType
// +listType=atomic
// +featureGate=DRAPrioritizedList
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=8
repeated DeviceSubRequest firstAvailable = 3;
}
@@ -814,9 +874,11 @@ message DeviceRequestAllocationResult {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:format=k8s-long-name-caseless
+ // +k8s:required
optional string driver = 2;
// This name together with the driver name and the device name field
@@ -826,6 +888,8 @@ message DeviceRequestAllocationResult {
// DNS sub-domains separated by slashes.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-resource-pool-name
optional string pool = 3;
// Device references one device instance via its name in the driver's
@@ -868,6 +932,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingConditions = 7;
// BindingFailureConditions contains a copy of the BindingFailureConditions
@@ -879,6 +945,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingFailureConditions = 8;
// ShareID uniquely identifies an individual allocation share of the device,
@@ -888,6 +956,8 @@ message DeviceRequestAllocationResult {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
optional string shareID = 9;
// ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
@@ -944,6 +1014,8 @@ message DeviceSubRequest {
// to reference.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name
optional string deviceClassName = 2;
// Selectors define criteria which must be satisfied by a specific
@@ -953,6 +1025,8 @@ message DeviceSubRequest {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 3;
// AllocationMode and its related fields define how devices are allocated
@@ -1044,10 +1118,13 @@ message DeviceTaint {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
+ // +k8s:required
optional string effect = 3;
// TimeAdded represents the time at which the taint was added.
@@ -1065,6 +1142,8 @@ message DeviceToleration {
// Must be a label name.
//
// +optional
+ // +k8s:optional
+ // +k8s:format=k8s-label-key
optional string key = 1;
// Operator represents a key's relationship to the value.
@@ -1124,6 +1203,8 @@ message ExactDeviceRequest {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 2;
// AllocationMode and its related fields define how devices are allocated
@@ -1146,6 +1227,7 @@ message ExactDeviceRequest {
// requests with unknown modes.
//
// +optional
+ // +k8s:optional
optional string allocationMode = 3;
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
@@ -1221,6 +1303,8 @@ message NetworkDeviceData {
// Must not be longer than 256 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=256
optional string interfaceName = 1;
// IPs lists the network addresses assigned to the device's network interface.
@@ -1231,6 +1315,10 @@ message NetworkDeviceData {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=16
repeated string ips = 2;
// HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
@@ -1238,6 +1326,8 @@ message NetworkDeviceData {
// Must not be longer than 128 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=128
optional string hardwareAddress = 3;
}
@@ -1251,9 +1341,11 @@ message OpaqueDeviceConfiguration {
// to decide whether it needs to validate them.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name-caseless
optional string driver = 1;
// Parameters can contain arbitrary data. It is the responsibility of
@@ -1282,6 +1374,7 @@ message ResourceClaim {
// Spec describes what is being requested and how to configure it.
// The spec is immutable.
+ // +k8s:immutable
optional ResourceClaimSpec spec = 2;
// Status describes whether the claim is ready to use and what has been allocated.
@@ -1336,6 +1429,8 @@ message ResourceClaimStatus {
// Allocation is set once the claim has been allocated successfully.
//
// +optional
+ // +k8s:optional
+ // +k8s:update=NoModify
optional AllocationResult allocation = 1;
// ReservedFor indicates which entities are currently allowed to use
@@ -1363,6 +1458,10 @@ message ResourceClaimStatus {
// +listMapKey=uid
// +patchStrategy=merge
// +patchMergeKey=uid
+ // +k8s:optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=uid
+ // +k8s:maxItems=256
repeated ResourceClaimConsumerReference reservedFor = 2;
// Devices contains the status of each device allocated for this
@@ -1370,12 +1469,18 @@ message ResourceClaimStatus {
// information. Entries are owned by their respective drivers.
//
// +optional
+ // +k8s:optional
// +listType=map
// +listMapKey=driver
// +listMapKey=device
// +listMapKey=pool
// +listMapKey=shareID
// +featureGate=DRAResourceClaimDeviceStatus
+ // +k8s:listType=map
+ // +k8s:listMapKey=driver
+ // +k8s:listMapKey=device
+ // +k8s:listMapKey=pool
+ // +k8s:listMapKey=shareID
repeated AllocatedDeviceStatus devices = 4;
}
@@ -1509,7 +1614,8 @@ message ResourceSliceSpec {
// objects with a certain driver name.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver. This field is immutable.
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
//
// +required
optional string driver = 1;
@@ -1556,10 +1662,14 @@ message ResourceSliceSpec {
// Devices lists some or all of the devices in this pool.
//
- // Must not have more than 128 entries.
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +zeroOrOneOf=ResourceSliceType
repeated Device devices = 6;
// PerDeviceNodeSelection defines whether the access from nodes to
@@ -1577,13 +1687,21 @@ message ResourceSliceSpec {
// SharedCounters defines a list of counter sets, each of which
// has a name and a list of counters available.
//
- // The names of the SharedCounters must be unique in the ResourceSlice.
+ // The names of the counter sets must be unique in the ResourcePool.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
- // The maximum number of counters in all sets is 32.
+ // The maximum number of counter sets is 8.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
// +featureGate=DRAPartitionableDevices
+ // +zeroOrOneOf=ResourceSliceType
+ // +k8s:maxItems=8
repeated CounterSet sharedCounters = 8;
}
diff --git a/operator/vendor/k8s.io/api/resource/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/resource/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..97035a93
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1/generated.protomessage.pb.go
@@ -0,0 +1,108 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*AllocatedDeviceStatus) ProtoMessage() {}
+
+func (*AllocationResult) ProtoMessage() {}
+
+func (*CELDeviceSelector) ProtoMessage() {}
+
+func (*CapacityRequestPolicy) ProtoMessage() {}
+
+func (*CapacityRequestPolicyRange) ProtoMessage() {}
+
+func (*CapacityRequirements) ProtoMessage() {}
+
+func (*Counter) ProtoMessage() {}
+
+func (*CounterSet) ProtoMessage() {}
+
+func (*Device) ProtoMessage() {}
+
+func (*DeviceAllocationConfiguration) ProtoMessage() {}
+
+func (*DeviceAllocationResult) ProtoMessage() {}
+
+func (*DeviceAttribute) ProtoMessage() {}
+
+func (*DeviceCapacity) ProtoMessage() {}
+
+func (*DeviceClaim) ProtoMessage() {}
+
+func (*DeviceClaimConfiguration) ProtoMessage() {}
+
+func (*DeviceClass) ProtoMessage() {}
+
+func (*DeviceClassConfiguration) ProtoMessage() {}
+
+func (*DeviceClassList) ProtoMessage() {}
+
+func (*DeviceClassSpec) ProtoMessage() {}
+
+func (*DeviceConfiguration) ProtoMessage() {}
+
+func (*DeviceConstraint) ProtoMessage() {}
+
+func (*DeviceCounterConsumption) ProtoMessage() {}
+
+func (*DeviceRequest) ProtoMessage() {}
+
+func (*DeviceRequestAllocationResult) ProtoMessage() {}
+
+func (*DeviceSelector) ProtoMessage() {}
+
+func (*DeviceSubRequest) ProtoMessage() {}
+
+func (*DeviceTaint) ProtoMessage() {}
+
+func (*DeviceToleration) ProtoMessage() {}
+
+func (*ExactDeviceRequest) ProtoMessage() {}
+
+func (*NetworkDeviceData) ProtoMessage() {}
+
+func (*OpaqueDeviceConfiguration) ProtoMessage() {}
+
+func (*ResourceClaim) ProtoMessage() {}
+
+func (*ResourceClaimConsumerReference) ProtoMessage() {}
+
+func (*ResourceClaimList) ProtoMessage() {}
+
+func (*ResourceClaimSpec) ProtoMessage() {}
+
+func (*ResourceClaimStatus) ProtoMessage() {}
+
+func (*ResourceClaimTemplate) ProtoMessage() {}
+
+func (*ResourceClaimTemplateList) ProtoMessage() {}
+
+func (*ResourceClaimTemplateSpec) ProtoMessage() {}
+
+func (*ResourcePool) ProtoMessage() {}
+
+func (*ResourceSlice) ProtoMessage() {}
+
+func (*ResourceSliceList) ProtoMessage() {}
+
+func (*ResourceSliceSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/resource/v1/types.go b/operator/vendor/k8s.io/api/resource/v1/types.go
index f2950444..29b4a5fb 100644
--- a/operator/vendor/k8s.io/api/resource/v1/types.go
+++ b/operator/vendor/k8s.io/api/resource/v1/types.go
@@ -101,7 +101,8 @@ type ResourceSliceSpec struct {
// objects with a certain driver name.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver. This field is immutable.
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
//
// +required
Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
@@ -148,11 +149,15 @@ type ResourceSliceSpec struct {
// Devices lists some or all of the devices in this pool.
//
- // Must not have more than 128 entries.
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
// +optional
// +listType=atomic
- Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"`
+ // +k8s:optional
+ // +zeroOrOneOf=ResourceSliceType
+ Devices []Device `json:"devices,omitempty" protobuf:"bytes,6,name=devices"`
// PerDeviceNodeSelection defines whether the access from nodes to
// resources in the pool is set on the ResourceSlice level or on each
@@ -169,19 +174,27 @@ type ResourceSliceSpec struct {
// SharedCounters defines a list of counter sets, each of which
// has a name and a list of counters available.
//
- // The names of the SharedCounters must be unique in the ResourceSlice.
+ // The names of the counter sets must be unique in the ResourcePool.
//
- // The maximum number of counters in all sets is 32.
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ //
+ // The maximum number of counter sets is 8.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
// +featureGate=DRAPartitionableDevices
+ // +zeroOrOneOf=ResourceSliceType
+ // +k8s:maxItems=8
SharedCounters []CounterSet `json:"sharedCounters,omitempty" protobuf:"bytes,8,name=sharedCounters"`
}
// CounterSet defines a named set of counters
// that are available to be used by devices defined in the
-// ResourceSlice.
+// ResourcePool.
//
// The counters are not allocatable by themselves, but
// can be referenced by devices. When a device is allocated,
@@ -192,12 +205,14 @@ type CounterSet struct {
// It must be a DNS label.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
Name string `json:"name" protobuf:"bytes,1,name=name"`
// Counters defines the set of counters for this CounterSet
// The name of each counter must be unique in that set and must be a DNS label.
//
- // The maximum number of counters in all sets is 32.
+ // The maximum number of counters is 32.
//
// +required
Counters map[string]Counter `json:"counters,omitempty" protobuf:"bytes,2,name=counters"`
@@ -246,13 +261,27 @@ type ResourcePool struct {
const ResourceSliceMaxSharedCapacity = 128
const ResourceSliceMaxDevices = 128
+const ResourceSliceMaxDevicesWithTaintsOrConsumesCounters = 64
const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name.
const BindingConditionsMaxSize = 4
const BindingFailureConditionsMaxSize = 4
-// Defines the max number of shared counters that can be specified
-// in a ResourceSlice. The number is summed up across all sets.
-const ResourceSliceMaxSharedCounters = 32
+// Defines the maximum number of counter sets (through the
+// SharedCounters field) that can be defined in a ResourceSlice.
+const ResourceSliceMaxCounterSets = 8
+
+// Defines the maximum number of counters that can be defined
+// in a counter set.
+const ResourceSliceMaxCountersPerCounterSet = 32
+
+// Defines the maximum number of device counter consumptions
+// (through the ConsumesCounters field) that can be defined per
+// device.
+const ResourceSliceMaxDeviceCounterConsumptionsPerDevice = 2
+
+// Defines the maximum number of counters that can be defined
+// per device counter consumption.
+const ResourceSliceMaxCountersPerDeviceCounterConsumption = 32
// Device represents one individual hardware instance that can be selected based
// on its attributes. Besides the name, exactly one field must be set.
@@ -285,14 +314,17 @@ type Device struct {
//
// There can only be a single entry per counterSet.
//
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
+ // The maximum number of device counter consumptions per
+ // device is 2.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=counterSet
// +featureGate=DRAPartitionableDevices
+ // +k8s:maxItems=2
ConsumesCounters []DeviceCounterConsumption `json:"consumesCounters,omitempty" protobuf:"bytes,4,rep,name=consumesCounters"`
// NodeName identifies the node where the device is available.
@@ -329,7 +361,9 @@ type Device struct {
// If specified, these are the driver-defined taints.
//
- // The maximum number of taints is 4.
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
//
// This is an alpha field and requires enabling the DRADeviceTaints
// feature gate.
@@ -366,6 +400,8 @@ type Device struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingConditions []string `json:"bindingConditions,omitempty" protobuf:"bytes,10,rep,name=bindingConditions"`
// BindingFailureConditions defines the conditions for binding failure.
@@ -382,6 +418,8 @@ type Device struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingFailureConditions []string `json:"bindingFailureConditions,omitempty" protobuf:"bytes,11,rep,name=bindingFailureConditions"`
// AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
@@ -401,14 +439,13 @@ type DeviceCounterConsumption struct {
// counters defined will be consumed.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
CounterSet string `json:"counterSet" protobuf:"bytes,1,opt,name=counterSet"`
// Counters defines the counters that will be consumed by the device.
//
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
+ // The maximum number of counters is 32.
//
// +required
Counters map[string]Counter `json:"counters,omitempty" protobuf:"bytes,2,opt,name=counters"`
@@ -531,14 +568,6 @@ type CapacityRequestPolicyRange struct {
// Limit for the sum of the number of entries in both attributes and capacity.
const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32
-// Limit for the total number of counters in each device.
-const ResourceSliceMaxCountersPerDevice = 32
-
-// Limit for the total number of counters defined in devices in
-// a ResourceSlice. We want to allow up to 64 devices to specify
-// up to 16 counters, so the limit for the ResourceSlice will be 1024.
-const ResourceSliceMaxDeviceCountersPerSlice = 1024 // 64 * 16
-
// QualifiedName is the name of a device attribute or capacity.
//
// Attributes and capacities are defined either by the owner of the specific
@@ -558,6 +587,9 @@ const ResourceSliceMaxDeviceCountersPerSlice = 1024 // 64 * 16
type QualifiedName string
// FullyQualifiedName is a QualifiedName where the domain is set.
+// Format validation cannot be added to this type because one of its usages,
+// DistinctAttribute, is validated conditionally. This conditional validation
+// cannot be expressed declaratively.
type FullyQualifiedName string
// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name.
@@ -575,34 +607,38 @@ type DeviceAttribute struct {
// IntValue is a number.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"`
// BoolValue is a true/false value.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"`
// StringValue is a string. Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"`
// VersionValue is a semantic version according to semver.org spec 2.0.0.
// Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"`
}
// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value.
const DeviceAttributeMaxValueLength = 64
-// DeviceTaintsMaxLength is the maximum number of taints per device.
-const DeviceTaintsMaxLength = 4
+// DeviceTaintsMaxLength is the maximum number of taints per Device.
+const DeviceTaintsMaxLength = 16
// The device this taint is attached to has the "effect" on
// any claim which does not tolerate the taint and, through the claim,
@@ -624,16 +660,27 @@ type DeviceTaint struct {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
+ // +k8s:required
Effect DeviceTaintEffect `json:"effect" protobuf:"bytes,3,name=effect,casttype=DeviceTaintEffect"`
// ^^^^
//
// Implementing PreferNoSchedule would depend on a scoring solution for DRA.
// It might get added as part of that.
+ //
+ // A possible future new effect is NoExecuteWithPodDisruptionBudget:
+ // honor the pod disruption budget instead of simply deleting pods.
+ // This is currently undecided, it could also be a separate field.
+ //
+ // Validation must be prepared to allow unknown enums in stored objects,
+ // which will enable adding new enums within a single release without
+ // ratcheting.
// TimeAdded represents the time at which the taint was added.
// Added automatically during create or update if not set.
@@ -649,9 +696,13 @@ type DeviceTaint struct {
}
// +enum
+// +k8s:enum
type DeviceTaintEffect string
const (
+ // No effect, the taint is purely informational.
+ DeviceTaintEffectNone DeviceTaintEffect = "None"
+
// Do not allow new pods to schedule which use a tainted device unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
@@ -678,6 +729,7 @@ type ResourceSliceList struct {
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.34
+// +k8s:supportsSubresource=/status
// ResourceClaim describes a request for access to resources in the cluster,
// for use by workloads. For example, if a workload needs an accelerator device
@@ -695,6 +747,7 @@ type ResourceClaim struct {
// Spec describes what is being requested and how to configure it.
// The spec is immutable.
+ // +k8s:immutable
Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
// Status describes whether the claim is ready to use and what has been allocated.
@@ -722,6 +775,11 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=32
Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"`
// These constraints must be satisfied by the set of devices that get
@@ -729,6 +787,8 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"`
// This field holds configuration for multiple potential drivers which
@@ -737,6 +797,8 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"`
// Potential future extension, ignored by older schedulers. This is
@@ -787,6 +849,7 @@ type DeviceRequest struct {
//
// +optional
// +oneOf=deviceRequestType
+ // +k8s:optional
Exactly *ExactDeviceRequest `json:"exactly,omitempty" protobuf:"bytes,2,name=exactly"`
// FirstAvailable contains subrequests, of which exactly one will be
@@ -807,6 +870,11 @@ type DeviceRequest struct {
// +oneOf=deviceRequestType
// +listType=atomic
// +featureGate=DRAPrioritizedList
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=8
FirstAvailable []DeviceSubRequest `json:"firstAvailable,omitempty" protobuf:"bytes,3,name=firstAvailable"`
}
@@ -834,6 +902,8 @@ type ExactDeviceRequest struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,2,name=selectors"`
// AllocationMode and its related fields define how devices are allocated
@@ -856,6 +926,7 @@ type ExactDeviceRequest struct {
// requests with unknown modes.
//
// +optional
+ // +k8s:optional
AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"`
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
@@ -951,6 +1022,8 @@ type DeviceSubRequest struct {
// to reference.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name
DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"`
// Selectors define criteria which must be satisfied by a specific
@@ -960,6 +1033,8 @@ type DeviceSubRequest struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"`
// AllocationMode and its related fields define how devices are allocated
@@ -1066,6 +1141,8 @@ const (
DeviceTolerationsMaxLength = 16
)
+// +enum
+// +k8s:enum
type DeviceAllocationMode string
// Valid [DeviceRequest.CountMode] values.
@@ -1184,6 +1261,10 @@ type DeviceConstraint struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
// MatchAttribute requires that all devices in question have this
@@ -1201,6 +1282,8 @@ type DeviceConstraint struct {
//
// +optional
// +oneOf=ConstraintType
+ // +k8s:optional
+ // +k8s:format=k8s-resource-fully-qualified-name
MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"`
// Potential future extension, not part of the current design:
@@ -1241,6 +1324,10 @@ type DeviceClaimConfiguration struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"`
@@ -1254,6 +1341,7 @@ type DeviceConfiguration struct {
//
// +optional
// +oneOf=ConfigurationType
+ // +k8s:optional
Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"`
}
@@ -1267,9 +1355,11 @@ type OpaqueDeviceConfiguration struct {
// to decide whether it needs to validate them.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name-caseless
Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
// Parameters can contain arbitrary data. It is the responsibility of
@@ -1295,6 +1385,8 @@ type DeviceToleration struct {
// Must be a label name.
//
// +optional
+ // +k8s:optional
+ // +k8s:format=k8s-label-key
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
@@ -1333,6 +1425,7 @@ type DeviceToleration struct {
// A toleration operator is the set of operators that can be used in a toleration.
//
// +enum
+// +k8s:enum
type DeviceTolerationOperator string
const (
@@ -1346,6 +1439,8 @@ type ResourceClaimStatus struct {
// Allocation is set once the claim has been allocated successfully.
//
// +optional
+ // +k8s:optional
+ // +k8s:update=NoModify
Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"`
// ReservedFor indicates which entities are currently allowed to use
@@ -1373,6 +1468,10 @@ type ResourceClaimStatus struct {
// +listMapKey=uid
// +patchStrategy=merge
// +patchMergeKey=uid
+ // +k8s:optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=uid
+ // +k8s:maxItems=256
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
// DeallocationRequested is tombstoned since Kubernetes 1.32 where
@@ -1385,12 +1484,18 @@ type ResourceClaimStatus struct {
// information. Entries are owned by their respective drivers.
//
// +optional
+ // +k8s:optional
// +listType=map
// +listMapKey=driver
// +listMapKey=device
// +listMapKey=pool
// +listMapKey=shareID
// +featureGate=DRAResourceClaimDeviceStatus
+ // +k8s:listType=map
+ // +k8s:listMapKey=driver
+ // +k8s:listMapKey=device
+ // +k8s:listMapKey=pool
+ // +k8s:listMapKey=shareID
Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
}
@@ -1453,6 +1558,8 @@ type DeviceAllocationResult struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"`
// This field is a combination of all the claim and class configuration parameters.
@@ -1465,6 +1572,8 @@ type DeviceAllocationResult struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=64
Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
}
@@ -1490,9 +1599,11 @@ type DeviceRequestAllocationResult struct {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:format=k8s-long-name-caseless
+ // +k8s:required
Driver string `json:"driver" protobuf:"bytes,2,name=driver"`
// This name together with the driver name and the device name field
@@ -1502,6 +1613,8 @@ type DeviceRequestAllocationResult struct {
// DNS sub-domains separated by slashes.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-resource-pool-name
Pool string `json:"pool" protobuf:"bytes,3,name=pool"`
// Device references one device instance via its name in the driver's
@@ -1544,6 +1657,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingConditions []string `json:"bindingConditions,omitempty" protobuf:"bytes,7,rep,name=bindingConditions"`
// BindingFailureConditions contains a copy of the BindingFailureConditions
@@ -1555,6 +1670,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingFailureConditions []string `json:"bindingFailureConditions,omitempty" protobuf:"bytes,8,rep,name=bindingFailureConditions"`
// ShareID uniquely identifies an individual allocation share of the device,
@@ -1564,6 +1681,8 @@ type DeviceRequestAllocationResult struct {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
ShareID *types.UID `json:"shareID,omitempty" protobuf:"bytes,9,opt,name=shareID"`
// ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
@@ -1587,6 +1706,7 @@ type DeviceAllocationConfiguration struct {
// or from a claim.
//
// +required
+ // +k8s:required
Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"`
// Requests lists the names of requests where the configuration applies.
@@ -1598,17 +1718,23 @@ type DeviceAllocationConfiguration struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"`
DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"`
}
+// +enum
+// +k8s:enum
type AllocationConfigSource string
// Valid [DeviceAllocationConfiguration.Source] values.
const (
- AllocationConfigSourceClass = "FromClass"
- AllocationConfigSourceClaim = "FromClaim"
+ AllocationConfigSourceClass AllocationConfigSource = "FromClass"
+ AllocationConfigSourceClaim AllocationConfigSource = "FromClaim"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -1641,6 +1767,8 @@ type DeviceClass struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines what can be allocated and how to configure it.
@@ -1661,6 +1789,8 @@ type DeviceClassSpec struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"`
// Config defines configuration parameters that apply to each device that is claimed via this class.
@@ -1671,6 +1801,8 @@ type DeviceClassSpec struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
// SuitableNodes is tombstoned since Kubernetes 1.32 where
@@ -1690,6 +1822,8 @@ type DeviceClassSpec struct {
// This is an alpha field.
// +optional
// +featureGate=DRAExtendedResource
+ // +k8s:optional
+ // +k8s:format=k8s-extended-resource-name
ExtendedResourceName *string `json:"extendedResourceName,omitempty" protobuf:"bytes,4,opt,name=extendedResourceName"`
}
@@ -1791,7 +1925,7 @@ type AllocatedDeviceStatus struct {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"`
@@ -1815,6 +1949,8 @@ type AllocatedDeviceStatus struct {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
ShareID *string `json:"shareID,omitempty" protobuf:"bytes,7,opt,name=shareID"`
// Conditions contains the latest observation of the device's state.
@@ -1838,6 +1974,7 @@ type AllocatedDeviceStatus struct {
// NetworkData contains network-related information specific to the device.
//
// +optional
+ // +k8s:optional
NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"`
}
@@ -1852,6 +1989,8 @@ type NetworkDeviceData struct {
// Must not be longer than 256 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=256
InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"`
// IPs lists the network addresses assigned to the device's network interface.
@@ -1862,6 +2001,10 @@ type NetworkDeviceData struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=16
IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"`
// HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
@@ -1869,5 +2012,7 @@ type NetworkDeviceData struct {
// Must not be longer than 128 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=128
HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"`
}
diff --git a/operator/vendor/k8s.io/api/resource/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/resource/v1/types_swagger_doc_generated.go
index bf81ced6..6ba5f598 100644
--- a/operator/vendor/k8s.io/api/resource/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/resource/v1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_AllocatedDeviceStatus = map[string]string{
"": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.\n\nThe combination of Driver, Pool, Device, and ShareID must match the corresponding key in Status.Allocation.Devices.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
"shareID": "ShareID uniquely identifies an individual allocation share of the device.",
@@ -103,9 +103,9 @@ func (Counter) SwaggerDoc() map[string]string {
}
var map_CounterSet = map[string]string{
- "": "CounterSet defines a named set of counters that are available to be used by devices defined in the ResourceSlice.\n\nThe counters are not allocatable by themselves, but can be referenced by devices. When a device is allocated, the portion of counters it uses will no longer be available for use by other devices.",
+ "": "CounterSet defines a named set of counters that are available to be used by devices defined in the ResourcePool.\n\nThe counters are not allocatable by themselves, but can be referenced by devices. When a device is allocated, the portion of counters it uses will no longer be available for use by other devices.",
"name": "Name defines the name of the counter set. It must be a DNS label.",
- "counters": "Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label.\n\nThe maximum number of counters in all sets is 32.",
+ "counters": "Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label.\n\nThe maximum number of counters is 32.",
}
func (CounterSet) SwaggerDoc() map[string]string {
@@ -117,11 +117,11 @@ var map_Device = map[string]string{
"name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.",
"attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
"capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
- "consumesCounters": "ConsumesCounters defines a list of references to sharedCounters and the set of counters that the device will consume from those counter sets.\n\nThere can only be a single entry per counterSet.\n\nThe total number of device counter consumption entries must be <= 32. In addition, the total number in the entire ResourceSlice must be <= 1024 (for example, 64 devices with 16 counters each).",
+ "consumesCounters": "ConsumesCounters defines a list of references to sharedCounters and the set of counters that the device will consume from those counter sets.\n\nThere can only be a single entry per counterSet.\n\nThe maximum number of device counter consumptions per device is 2.",
"nodeName": "NodeName identifies the node where the device is available.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
"nodeSelector": "NodeSelector defines the nodes where the device is available.\n\nMust use exactly one term.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
"allNodes": "AllNodes indicates that all nodes have access to the device.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
- "taints": "If specified, these are the driver-defined taints.\n\nThe maximum number of taints is 4.\n\nThis is an alpha field and requires enabling the DRADeviceTaints feature gate.",
+ "taints": "If specified, these are the driver-defined taints.\n\nThe maximum number of taints is 16. If taints are set for any device in a ResourceSlice, then the maximum number of allowed devices per ResourceSlice is 64 instead of 128.\n\nThis is an alpha field and requires enabling the DRADeviceTaints feature gate.",
"bindsToNode": "BindsToNode indicates if the usage of an allocation involving this device has to be limited to exactly the node that was chosen when allocating the claim. If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector to match the node where the allocation was made.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
"bindingConditions": "BindingConditions defines the conditions for proceeding with binding. All of these conditions must be set in the per-device status conditions with a value of True to proceed with binding the pod to the node while scheduling the pod.\n\nThe maximum number of binding conditions is 4.\n\nThe conditions must be a valid condition type string.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
"bindingFailureConditions": "BindingFailureConditions defines the conditions for binding failure. They may be set in the per-device status conditions. If any is set to \"True\", a binding failure occurred.\n\nThe maximum number of binding failure conditions is 4.\n\nThe conditions must be a valid condition type string.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
@@ -256,7 +256,7 @@ func (DeviceConstraint) SwaggerDoc() map[string]string {
var map_DeviceCounterConsumption = map[string]string{
"": "DeviceCounterConsumption defines a set of counters that a device will consume from a CounterSet.",
"counterSet": "CounterSet is the name of the set from which the counters defined will be consumed.",
- "counters": "Counters defines the counters that will be consumed by the device.\n\nThe maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each).",
+ "counters": "Counters defines the counters that will be consumed by the device.\n\nThe maximum number of counters is 32.",
}
func (DeviceCounterConsumption) SwaggerDoc() map[string]string {
@@ -277,7 +277,7 @@ func (DeviceRequest) SwaggerDoc() map[string]string {
var map_DeviceRequestAllocationResult = map[string]string{
"": "DeviceRequestAllocationResult contains the allocation result for one request.",
"request": "Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format /.\n\nMultiple devices may have been allocated per request.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
"adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
@@ -320,7 +320,7 @@ var map_DeviceTaint = map[string]string{
"": "The device this taint is attached to has the \"effect\" on any claim which does not tolerate the taint and, through the claim, to pods using the claim.",
"key": "The taint key to be applied to a device. Must be a label name.",
"value": "The taint value corresponding to the taint key. Must be a label value.",
- "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them. Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here.",
+ "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them.\n\nValid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here. More effects may get added in the future. Consumers must treat unknown effects like None.",
"timeAdded": "TimeAdded represents the time at which the taint was added. Added automatically during create or update if not set.",
}
@@ -369,7 +369,7 @@ func (NetworkDeviceData) SwaggerDoc() map[string]string {
var map_OpaqueDeviceConfiguration = map[string]string{
"": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.",
- "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
}
@@ -493,14 +493,14 @@ func (ResourceSliceList) SwaggerDoc() map[string]string {
var map_ResourceSliceSpec = map[string]string{
"": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.",
- "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.",
+ "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters. This field is immutable.",
"pool": "Pool describes the pool that this ResourceSlice belongs to.",
"nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable.",
"nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
"allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
- "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.",
+ "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.\n\nOnly one of Devices and SharedCounters can be set in a ResourceSlice.",
"perDeviceNodeSelection": "PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
- "sharedCounters": "SharedCounters defines a list of counter sets, each of which has a name and a list of counters available.\n\nThe names of the SharedCounters must be unique in the ResourceSlice.\n\nThe maximum number of counters in all sets is 32.",
+ "sharedCounters": "SharedCounters defines a list of counter sets, each of which has a name and a list of counters available.\n\nThe names of the counter sets must be unique in the ResourcePool.\n\nOnly one of Devices and SharedCounters can be set in a ResourceSlice.\n\nThe maximum number of counter sets is 8.",
}
func (ResourceSliceSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/resource/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/resource/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..c0de4f9e
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1/zz_generated.model_name.go
@@ -0,0 +1,237 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AllocatedDeviceStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.AllocatedDeviceStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.AllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CELDeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.CELDeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequestPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.CapacityRequestPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequestPolicyRange) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.CapacityRequestPolicyRange"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequirements) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.CapacityRequirements"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Counter) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.Counter"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CounterSet) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.CounterSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Device) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.Device"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAllocationConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceAllocationConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceAllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAttribute) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceAttribute"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceCapacity) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceCapacity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClaimConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceClaimConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClass) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceClassConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceClassSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceConstraint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceConstraint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceCounterConsumption) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceCounterConsumption"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceRequestAllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceRequestAllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSubRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceSubRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceTaint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceToleration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.DeviceToleration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExactDeviceRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ExactDeviceRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkDeviceData) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.NetworkDeviceData"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in OpaqueDeviceConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.OpaqueDeviceConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimConsumerReference) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimConsumerReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplate) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimTemplate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplateList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimTemplateList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceClaimTemplateSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePool) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourcePool"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSlice) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceSlice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSliceList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceSliceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSliceSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1.ResourceSliceSpec"
+}
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/doc.go b/operator/vendor/k8s.io/api/resource/v1alpha3/doc.go
index 82e64f1d..b6bfd79c 100644
--- a/operator/vendor/k8s.io/api/resource/v1alpha3/doc.go
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.resource.v1alpha3
+
// +groupName=resource.k8s.io
// Package v1alpha3 is the v1alpha3 version of the resource API.
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go b/operator/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
index dc37717e..66b9f8f6 100644
--- a/operator/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/generated.pb.go
@@ -24,284 +24,28 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-func (*CELDeviceSelector) ProtoMessage() {}
-func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{0}
-}
-func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CELDeviceSelector.Merge(m, src)
-}
-func (m *CELDeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *CELDeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
-
-func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (*DeviceSelector) ProtoMessage() {}
-func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{1}
-}
-func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSelector.Merge(m, src)
-}
-func (m *DeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
-
-func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-func (*DeviceTaint) ProtoMessage() {}
-func (*DeviceTaint) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{2}
-}
-func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaint.Merge(m, src)
-}
-func (m *DeviceTaint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
-
-func (m *DeviceTaintRule) Reset() { *m = DeviceTaintRule{} }
-func (*DeviceTaintRule) ProtoMessage() {}
-func (*DeviceTaintRule) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{3}
-}
-func (m *DeviceTaintRule) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaintRule) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintRule.Merge(m, src)
-}
-func (m *DeviceTaintRule) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintRule) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintRule.DiscardUnknown(m)
-}
+func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-var xxx_messageInfo_DeviceTaintRule proto.InternalMessageInfo
+func (m *DeviceTaintRule) Reset() { *m = DeviceTaintRule{} }
-func (m *DeviceTaintRuleList) Reset() { *m = DeviceTaintRuleList{} }
-func (*DeviceTaintRuleList) ProtoMessage() {}
-func (*DeviceTaintRuleList) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{4}
-}
-func (m *DeviceTaintRuleList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintRuleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaintRuleList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintRuleList.Merge(m, src)
-}
-func (m *DeviceTaintRuleList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintRuleList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintRuleList.DiscardUnknown(m)
-}
+func (m *DeviceTaintRuleList) Reset() { *m = DeviceTaintRuleList{} }
-var xxx_messageInfo_DeviceTaintRuleList proto.InternalMessageInfo
+func (m *DeviceTaintRuleSpec) Reset() { *m = DeviceTaintRuleSpec{} }
-func (m *DeviceTaintRuleSpec) Reset() { *m = DeviceTaintRuleSpec{} }
-func (*DeviceTaintRuleSpec) ProtoMessage() {}
-func (*DeviceTaintRuleSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{5}
-}
-func (m *DeviceTaintRuleSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintRuleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaintRuleSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintRuleSpec.Merge(m, src)
-}
-func (m *DeviceTaintRuleSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintRuleSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintRuleSpec.DiscardUnknown(m)
-}
+func (m *DeviceTaintRuleStatus) Reset() { *m = DeviceTaintRuleStatus{} }
-var xxx_messageInfo_DeviceTaintRuleSpec proto.InternalMessageInfo
-
-func (m *DeviceTaintSelector) Reset() { *m = DeviceTaintSelector{} }
-func (*DeviceTaintSelector) ProtoMessage() {}
-func (*DeviceTaintSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_66649ee9bbcd89d2, []int{6}
-}
-func (m *DeviceTaintSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaintSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaintSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaintSelector.Merge(m, src)
-}
-func (m *DeviceTaintSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaintSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaintSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceTaintSelector proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.CELDeviceSelector")
- proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector")
- proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaint")
- proto.RegisterType((*DeviceTaintRule)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRule")
- proto.RegisterType((*DeviceTaintRuleList)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleList")
- proto.RegisterType((*DeviceTaintRuleSpec)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintRuleSpec")
- proto.RegisterType((*DeviceTaintSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceTaintSelector")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/resource/v1alpha3/generated.proto", fileDescriptor_66649ee9bbcd89d2)
-}
-
-var fileDescriptor_66649ee9bbcd89d2 = []byte{
- // 716 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xbf, 0x6f, 0xd3, 0x40,
- 0x14, 0x8e, 0x9b, 0xa4, 0x24, 0x17, 0xda, 0xd2, 0xeb, 0x12, 0x55, 0xc5, 0xae, 0xdc, 0xa5, 0xa0,
- 0xd6, 0x26, 0x01, 0x21, 0x04, 0x62, 0x20, 0x6d, 0x84, 0x80, 0x52, 0xd0, 0xb5, 0x02, 0x09, 0x15,
- 0x89, 0xab, 0xf3, 0x9a, 0x98, 0xd8, 0xb1, 0xe5, 0x73, 0x22, 0xba, 0xf5, 0x4f, 0x60, 0x84, 0x8d,
- 0xff, 0x86, 0x8c, 0x1d, 0x18, 0x3a, 0xa0, 0x88, 0x9a, 0xbf, 0x80, 0x95, 0x09, 0xdd, 0xf9, 0x12,
- 0xa7, 0x8e, 0x28, 0x61, 0x8b, 0xbf, 0xfb, 0xde, 0xf7, 0xde, 0xf7, 0x7e, 0x28, 0x68, 0xa3, 0x7d,
- 0x8f, 0x19, 0xb6, 0x67, 0x52, 0xdf, 0x36, 0x03, 0x60, 0x5e, 0x37, 0xb0, 0xc0, 0xec, 0x55, 0xa8,
- 0xe3, 0xb7, 0xe8, 0x6d, 0xb3, 0x09, 0x1d, 0x08, 0x68, 0x08, 0x0d, 0xc3, 0x0f, 0xbc, 0xd0, 0xc3,
- 0x2b, 0x31, 0xdb, 0xa0, 0xbe, 0x6d, 0x0c, 0xd9, 0xc6, 0x90, 0xbd, 0xbc, 0xd9, 0xb4, 0xc3, 0x56,
- 0xf7, 0xd0, 0xb0, 0x3c, 0xd7, 0x6c, 0x7a, 0x4d, 0xcf, 0x14, 0x41, 0x87, 0xdd, 0x23, 0xf1, 0x25,
- 0x3e, 0xc4, 0xaf, 0x58, 0x6c, 0xf9, 0x4e, 0x92, 0xda, 0xa5, 0x56, 0xcb, 0xee, 0x40, 0x70, 0x6c,
- 0xfa, 0xed, 0x26, 0x07, 0x98, 0xe9, 0x42, 0x48, 0xcd, 0x5e, 0x25, 0x5d, 0xc2, 0xb2, 0xf9, 0xb7,
- 0xa8, 0xa0, 0xdb, 0x09, 0x6d, 0x17, 0x26, 0x02, 0xee, 0xfe, 0x2b, 0x80, 0x59, 0x2d, 0x70, 0x69,
- 0x3a, 0x4e, 0x7f, 0x8c, 0x16, 0xb7, 0xea, 0x3b, 0xdb, 0xd0, 0xb3, 0x2d, 0xd8, 0x03, 0x07, 0xac,
- 0xd0, 0x0b, 0x70, 0x15, 0x21, 0xf8, 0xe0, 0x07, 0xc0, 0x98, 0xed, 0x75, 0xca, 0xca, 0xaa, 0xb2,
- 0x5e, 0xac, 0xe1, 0xfe, 0x40, 0xcb, 0x44, 0x03, 0x0d, 0xd5, 0x47, 0x2f, 0x64, 0x8c, 0xa5, 0x1f,
- 0xa0, 0xf9, 0x94, 0xca, 0x53, 0x94, 0xb5, 0xc0, 0x11, 0xe1, 0xa5, 0xaa, 0x69, 0x5c, 0xd6, 0x54,
- 0x63, 0xa2, 0x86, 0xda, 0x95, 0x68, 0xa0, 0x65, 0xb7, 0xea, 0x3b, 0x84, 0x8b, 0xe8, 0xbf, 0x14,
- 0x54, 0x8a, 0x09, 0xfb, 0xd4, 0xee, 0x84, 0xf8, 0x3a, 0xca, 0xb6, 0xe1, 0x58, 0x96, 0x56, 0x92,
- 0xa5, 0x65, 0x9f, 0xc1, 0x31, 0xe1, 0x38, 0x5e, 0x43, 0xf9, 0x1e, 0x75, 0xba, 0x50, 0x9e, 0x11,
- 0x84, 0x39, 0x49, 0xc8, 0xbf, 0xe2, 0x20, 0x89, 0xdf, 0xf0, 0x03, 0x34, 0x0b, 0x47, 0x47, 0x60,
- 0x85, 0xe5, 0xac, 0x60, 0xad, 0x49, 0xd6, 0x6c, 0x5d, 0xa0, 0xbf, 0x07, 0xda, 0xe2, 0x58, 0xca,
- 0x18, 0x24, 0x32, 0x04, 0xbf, 0x46, 0x45, 0xde, 0xd6, 0x47, 0x8d, 0x06, 0x34, 0xca, 0x39, 0x61,
- 0xf1, 0xe6, 0x98, 0xc5, 0xd1, 0x0c, 0x0c, 0xbf, 0xdd, 0xe4, 0x00, 0x33, 0xf8, 0xa8, 0x8d, 0x5e,
- 0xc5, 0xd8, 0xb7, 0x5d, 0xa8, 0xcd, 0x45, 0x03, 0xad, 0xb8, 0x3f, 0x14, 0x20, 0x89, 0xd6, 0xfd,
- 0xc2, 0xa7, 0x2f, 0x5a, 0xe6, 0xe4, 0xfb, 0x6a, 0x46, 0xef, 0x2b, 0x68, 0x61, 0xac, 0x00, 0xd2,
- 0x75, 0x00, 0xbf, 0x43, 0x05, 0xae, 0xd3, 0xa0, 0x21, 0x95, 0x8d, 0xbd, 0x35, 0x5d, 0xd6, 0x17,
- 0x87, 0xef, 0xc1, 0x0a, 0x9f, 0x43, 0x48, 0x93, 0x49, 0x26, 0x18, 0x19, 0xa9, 0xe2, 0x3d, 0x94,
- 0x63, 0x3e, 0x58, 0xa2, 0x73, 0xa5, 0x6a, 0xe5, 0xf2, 0xb1, 0xa5, 0xca, 0xdb, 0xf3, 0xc1, 0xaa,
- 0x5d, 0x95, 0xf2, 0x39, 0xfe, 0x45, 0x84, 0x98, 0xfe, 0x55, 0x41, 0x4b, 0x29, 0xee, 0x8e, 0xcd,
- 0x42, 0x7c, 0x30, 0x61, 0xc7, 0x98, 0xce, 0x0e, 0x8f, 0x16, 0x66, 0xae, 0xc9, 0x6c, 0x85, 0x21,
- 0x32, 0x66, 0x85, 0xa0, 0xbc, 0x1d, 0x82, 0xcb, 0xca, 0x33, 0xab, 0xd9, 0xf5, 0x52, 0x75, 0xf3,
- 0xbf, 0xbc, 0x24, 0x4b, 0xf3, 0x84, 0x6b, 0x90, 0x58, 0x4a, 0xff, 0x36, 0xe9, 0x84, 0xfb, 0xc4,
- 0x2e, 0x9a, 0x6f, 0x5c, 0x58, 0x60, 0xe9, 0x67, 0xfa, 0x06, 0x8e, 0x36, 0x1f, 0x47, 0x03, 0x2d,
- 0x75, 0x4b, 0x24, 0x25, 0x8e, 0x77, 0x51, 0x3e, 0xe4, 0x41, 0x72, 0x4c, 0x37, 0xa6, 0xce, 0x92,
- 0xd8, 0x8a, 0xeb, 0x8f, 0x65, 0xf4, 0xcf, 0x33, 0x17, 0x6c, 0x8d, 0xf2, 0x3c, 0x44, 0x0b, 0x71,
- 0xe6, 0x2d, 0x87, 0x32, 0xb6, 0x4b, 0x5d, 0x90, 0x37, 0xb7, 0x14, 0x0d, 0x34, 0xb9, 0x9d, 0xa3,
- 0x27, 0x92, 0xe6, 0x62, 0x1d, 0xcd, 0x36, 0x02, 0xbb, 0x07, 0x81, 0x3c, 0x44, 0xc4, 0xcf, 0x6b,
- 0x5b, 0x20, 0x44, 0xbe, 0xe0, 0x15, 0x94, 0xf3, 0x3d, 0xcf, 0x91, 0x47, 0x58, 0xe0, 0x9b, 0xf3,
- 0xd2, 0xf3, 0x1c, 0x22, 0x50, 0xa1, 0x20, 0x44, 0xc5, 0x91, 0x0d, 0x15, 0x04, 0x42, 0xe4, 0x0b,
- 0x7e, 0x8b, 0x8a, 0x4c, 0x16, 0xcc, 0xca, 0x79, 0x31, 0xeb, 0x8d, 0x69, 0x1a, 0x32, 0xea, 0xf8,
- 0xa2, 0xec, 0x49, 0x71, 0x88, 0x30, 0x92, 0x28, 0xd6, 0x6a, 0xfd, 0x73, 0x35, 0x73, 0x7a, 0xae,
- 0x66, 0xce, 0xce, 0xd5, 0xcc, 0x49, 0xa4, 0x2a, 0xfd, 0x48, 0x55, 0x4e, 0x23, 0x55, 0x39, 0x8b,
- 0x54, 0xe5, 0x47, 0xa4, 0x2a, 0x1f, 0x7f, 0xaa, 0x99, 0x37, 0x2b, 0x97, 0xfd, 0xc5, 0xfc, 0x09,
- 0x00, 0x00, 0xff, 0xff, 0x7e, 0xb1, 0x06, 0x7b, 0x81, 0x06, 0x00, 0x00,
-}
+func (m *DeviceTaintSelector) Reset() { *m = DeviceTaintSelector{} }
func (m *CELDeviceSelector) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -436,6 +180,16 @@ func (m *DeviceTaintRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
{
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
@@ -551,7 +305,7 @@ func (m *DeviceTaintRuleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) {
+func (m *DeviceTaintRuleStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -561,20 +315,20 @@ func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *DeviceTaintSelector) MarshalTo(dAtA []byte) (int, error) {
+func (m *DeviceTaintRuleStatus) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DeviceTaintRuleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if len(m.Selectors) > 0 {
- for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -582,9 +336,32 @@ func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x2a
+ dAtA[i] = 0xa
}
}
+ return len(dAtA) - i, nil
+}
+
+func (m *DeviceTaintSelector) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeviceTaintSelector) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
if m.Device != nil {
i -= len(*m.Device)
copy(dAtA[i:], *m.Device)
@@ -606,13 +383,6 @@ func (m *DeviceTaintSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x12
}
- if m.DeviceClassName != nil {
- i -= len(*m.DeviceClassName)
- copy(dAtA[i:], *m.DeviceClassName)
- i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DeviceClassName)))
- i--
- dAtA[i] = 0xa
- }
return len(dAtA) - i, nil
}
@@ -680,6 +450,8 @@ func (m *DeviceTaintRule) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
l = m.Spec.Size()
n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -715,16 +487,27 @@ func (m *DeviceTaintRuleSpec) Size() (n int) {
return n
}
-func (m *DeviceTaintSelector) Size() (n int) {
+func (m *DeviceTaintRuleStatus) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- if m.DeviceClassName != nil {
- l = len(*m.DeviceClassName)
- n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *DeviceTaintSelector) Size() (n int) {
+ if m == nil {
+ return 0
}
+ var l int
+ _ = l
if m.Driver != nil {
l = len(*m.Driver)
n += 1 + l + sovGenerated(uint64(l))
@@ -737,12 +520,6 @@ func (m *DeviceTaintSelector) Size() (n int) {
l = len(*m.Device)
n += 1 + l + sovGenerated(uint64(l))
}
- if len(m.Selectors) > 0 {
- for _, e := range m.Selectors {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
return n
}
@@ -779,6 +556,7 @@ func (this *DeviceTaintRule) String() string {
s := strings.Join([]string{`&DeviceTaintRule{`,
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeviceTaintRuleSpec", "DeviceTaintRuleSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeviceTaintRuleStatus", "DeviceTaintRuleStatus", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
@@ -810,21 +588,29 @@ func (this *DeviceTaintRuleSpec) String() string {
}, "")
return s
}
-func (this *DeviceTaintSelector) String() string {
+func (this *DeviceTaintRuleStatus) String() string {
if this == nil {
return "nil"
}
- repeatedStringForSelectors := "[]DeviceSelector{"
- for _, f := range this.Selectors {
- repeatedStringForSelectors += strings.Replace(strings.Replace(f.String(), "DeviceSelector", "DeviceSelector", 1), `&`, ``, 1) + ","
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&DeviceTaintRuleStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *DeviceTaintSelector) String() string {
+ if this == nil {
+ return "nil"
}
- repeatedStringForSelectors += "}"
s := strings.Join([]string{`&DeviceTaintSelector{`,
- `DeviceClassName:` + valueToStringGenerated(this.DeviceClassName) + `,`,
`Driver:` + valueToStringGenerated(this.Driver) + `,`,
`Pool:` + valueToStringGenerated(this.Pool) + `,`,
`Device:` + valueToStringGenerated(this.Device) + `,`,
- `Selectors:` + repeatedStringForSelectors + `,`,
`}`,
}, "")
return s
@@ -1282,6 +1068,39 @@ func (m *DeviceTaintRule) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -1539,7 +1358,7 @@ func (m *DeviceTaintRuleSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
+func (m *DeviceTaintRuleStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -1562,17 +1381,17 @@ func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DeviceTaintSelector: wiretype end group for non-group")
+ return fmt.Errorf("proto: DeviceTaintRuleStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DeviceTaintSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: DeviceTaintRuleStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeviceClassName", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -1582,25 +1401,76 @@ func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := string(dAtA[iNdEx:postIndex])
- m.DeviceClassName = &s
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeviceTaintSelector: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeviceTaintSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
@@ -1700,40 +1570,6 @@ func (m *DeviceTaintSelector) Unmarshal(dAtA []byte) error {
s := string(dAtA[iNdEx:postIndex])
m.Device = &s
iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Selectors = append(m.Selectors, DeviceSelector{})
- if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/generated.proto b/operator/vendor/k8s.io/api/resource/v1alpha3/generated.proto
index d3344790..6414216d 100644
--- a/operator/vendor/k8s.io/api/resource/v1alpha3/generated.proto
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/generated.proto
@@ -114,8 +114,10 @@ message DeviceTaint {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
optional string effect = 3;
@@ -139,6 +141,11 @@ message DeviceTaintRule {
//
// Changing the spec automatically increments the metadata.generation number.
optional DeviceTaintRuleSpec spec = 2;
+
+ // Status provides information about what was requested in the spec.
+ //
+ // +optional
+ optional DeviceTaintRuleStatus status = 3;
}
// DeviceTaintRuleList is a collection of DeviceTaintRules.
@@ -154,7 +161,7 @@ message DeviceTaintRuleList {
// DeviceTaintRuleSpec specifies the selector and one taint.
message DeviceTaintRuleSpec {
// DeviceSelector defines which device(s) the taint is applied to.
- // All selector criteria must be satified for a device to
+ // All selector criteria must be satisfied for a device to
// match. The empty selector matches all devices. Without
// a selector, no devices are matches.
//
@@ -167,17 +174,41 @@ message DeviceTaintRuleSpec {
optional DeviceTaint taint = 2;
}
+// DeviceTaintRuleStatus provides information about an on-going pod eviction.
+message DeviceTaintRuleStatus {
+ // Conditions provide information about the state of the DeviceTaintRule
+ // and the cluster at some point in time,
+ // in a machine-readable and human-readable format.
+ //
+ // The following condition is currently defined as part of this API, more may
+ // get added:
+ // - Type: EvictionInProgress
+ // - Status: True if there are currently pods which need to be evicted, False otherwise
+ // (includes the effects which don't cause eviction).
+ // - Reason: not specified, may change
+ // - Message: includes information about number of pending pods and already evicted pods
+ // in a human-readable format, updated periodically, may change
+ //
+ // For `effect: None`, the condition above gets set once for each change to
+ // the spec, with the message containing information about what would happen
+ // if the effect was `NoExecute`. This feedback can be used to decide whether
+ // changing the effect to `NoExecute` will work as intended. It only gets
+ // set once to avoid having to constantly update the status.
+ //
+ // Must have 8 or fewer entries.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ // +patchStrategy=merge
+ // +patchMergeKey=type
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
+}
+
// DeviceTaintSelector defines which device(s) a DeviceTaintRule applies to.
// The empty selector matches all devices. Without a selector, no devices
// are matched.
message DeviceTaintSelector {
- // If DeviceClassName is set, the selectors defined there must be
- // satisfied by a device to be selected. This field corresponds
- // to class.metadata.name.
- //
- // +optional
- optional string deviceClassName = 1;
-
// If driver is set, only devices from that driver are selected.
// This fields corresponds to slice.spec.driver.
//
@@ -204,13 +235,5 @@ message DeviceTaintSelector {
//
// +optional
optional string device = 4;
-
- // Selectors contains the same selection criteria as a ResourceClaim.
- // Currently, CEL expressions are supported. All of these selectors
- // must be satisfied.
- //
- // +optional
- // +listType=atomic
- repeated DeviceSelector selectors = 5;
}
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/resource/v1alpha3/generated.protomessage.pb.go
new file mode 100644
index 00000000..aba6231f
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/generated.protomessage.pb.go
@@ -0,0 +1,38 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha3
+
+func (*CELDeviceSelector) ProtoMessage() {}
+
+func (*DeviceSelector) ProtoMessage() {}
+
+func (*DeviceTaint) ProtoMessage() {}
+
+func (*DeviceTaintRule) ProtoMessage() {}
+
+func (*DeviceTaintRuleList) ProtoMessage() {}
+
+func (*DeviceTaintRuleSpec) ProtoMessage() {}
+
+func (*DeviceTaintRuleStatus) ProtoMessage() {}
+
+func (*DeviceTaintSelector) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/types.go b/operator/vendor/k8s.io/api/resource/v1alpha3/types.go
index da9a9ca2..ba02edab 100644
--- a/operator/vendor/k8s.io/api/resource/v1alpha3/types.go
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/types.go
@@ -134,8 +134,10 @@ type DeviceTaint struct {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
Effect DeviceTaintEffect `json:"effect" protobuf:"bytes,3,name=effect,casttype=DeviceTaintEffect"`
@@ -144,6 +146,14 @@ type DeviceTaint struct {
//
// Implementing PreferNoSchedule would depend on a scoring solution for DRA.
// It might get added as part of that.
+ //
+ // A possible future new effect is NoExecuteWithPodDisruptionBudget:
+ // honor the pod disruption budget instead of simply deleting pods.
+ // This is currently undecided, it could also be a separate field.
+ //
+ // Validation must be prepared to allow unknown enums in stored objects,
+ // which will enable adding new enums within a single release without
+ // ratcheting.
// TimeAdded represents the time at which the taint was added.
// Added automatically during create or update if not set.
@@ -162,6 +172,9 @@ type DeviceTaint struct {
type DeviceTaintEffect string
const (
+ // No effect, the taint is purely informational.
+ DeviceTaintEffectNone DeviceTaintEffect = "None"
+
// Do not allow new pods to schedule which use a tainted device unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
@@ -190,18 +203,16 @@ type DeviceTaintRule struct {
// Changing the spec automatically increments the metadata.generation number.
Spec DeviceTaintRuleSpec `json:"spec" protobuf:"bytes,2,name=spec"`
- // ^^^
- // A spec gets added because adding a status seems likely.
- // Such a status could provide feedback on applying the
- // eviction and/or statistics (number of matching devices,
- // affected allocated claims, pods remaining to be evicted,
- // etc.).
+ // Status provides information about what was requested in the spec.
+ //
+ // +optional
+ Status DeviceTaintRuleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// DeviceTaintRuleSpec specifies the selector and one taint.
type DeviceTaintRuleSpec struct {
// DeviceSelector defines which device(s) the taint is applied to.
- // All selector criteria must be satified for a device to
+ // All selector criteria must be satisfied for a device to
// match. The empty selector matches all devices. Without
// a selector, no devices are matches.
//
@@ -223,7 +234,12 @@ type DeviceTaintSelector struct {
// to class.metadata.name.
//
// +optional
- DeviceClassName *string `json:"deviceClassName,omitempty" protobuf:"bytes,1,opt,name=deviceClassName"`
+ //
+ // Tombstoned since 1.35 because it turned out that supporting this in all cases
+ // would depend on copying the device attributes into the ResourceClaim allocation
+ // result. Without that the eviction controller cannot evaluate these CEL expressions.
+ //
+ // DeviceClassName *string `json:"deviceClassName,omitempty" protobuf:"bytes,1,opt,name=deviceClassName"`
// If driver is set, only devices from that driver are selected.
// This fields corresponds to slice.spec.driver.
@@ -258,9 +274,51 @@ type DeviceTaintSelector struct {
//
// +optional
// +listType=atomic
- Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,5,rep,name=selectors"`
+ //
+ // Tombstoned since 1.35 because it turned out that supporting this in all cases
+ // would depend on copying the device attributes into the ResourceClaim allocation
+ // result. Without that the eviction controller cannot evaluate these CEL expressions.
+ //
+ // Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,5,rep,name=selectors"`
+}
+
+// DeviceTaintRuleStatus provides information about an on-going pod eviction.
+type DeviceTaintRuleStatus struct {
+ // Conditions provide information about the state of the DeviceTaintRule
+ // and the cluster at some point in time,
+ // in a machine-readable and human-readable format.
+ //
+ // The following condition is currently defined as part of this API, more may
+ // get added:
+ // - Type: EvictionInProgress
+ // - Status: True if there are currently pods which need to be evicted, False otherwise
+ // (includes the effects which don't cause eviction).
+ // - Reason: not specified, may change
+ // - Message: includes information about number of pending pods and already evicted pods
+ // in a human-readable format, updated periodically, may change
+ //
+ // For `effect: None`, the condition above gets set once for each change to
+ // the spec, with the message containing information about what would happen
+ // if the effect was `NoExecute`. This feedback can be used to decide whether
+ // changing the effect to `NoExecute` will work as intended. It only gets
+ // set once to avoid having to constantly update the status.
+ //
+ // Must have 8 or fewer entries.
+ //
+ // +optional
+ // +listType=map
+ // +listMapKey=type
+ // +patchStrategy=merge
+ // +patchMergeKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
}
+// DeviceTaintRuleStatusMaxConditions is the maximum number of conditions in DeviceTaintRuleStatus.
+const DeviceTaintRuleStatusMaxConditions = 8
+
+// DeviceTaintConditionEvictionInProgress is the publicly documented condition type for the DeviceTaintRuleStatus.
+const DeviceTaintConditionEvictionInProgress = "EvictionInProgress"
+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.33
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
index 6c4c4eb1..30981bd7 100644
--- a/operator/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go
@@ -49,7 +49,7 @@ var map_DeviceTaint = map[string]string{
"": "The device this taint is attached to has the \"effect\" on any claim which does not tolerate the taint and, through the claim, to pods using the claim.",
"key": "The taint key to be applied to a device. Must be a label name.",
"value": "The taint value corresponding to the taint key. Must be a label value.",
- "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them. Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here.",
+ "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them.\n\nValid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here. More effects may get added in the future. Consumers must treat unknown effects like None.",
"timeAdded": "TimeAdded represents the time at which the taint was added. Added automatically during create or update if not set.",
}
@@ -61,6 +61,7 @@ var map_DeviceTaintRule = map[string]string{
"": "DeviceTaintRule adds one taint to all devices which match the selector. This has the same effect as if the taint was specified directly in the ResourceSlice by the DRA driver.",
"metadata": "Standard object metadata",
"spec": "Spec specifies the selector and one taint.\n\nChanging the spec automatically increments the metadata.generation number.",
+ "status": "Status provides information about what was requested in the spec.",
}
func (DeviceTaintRule) SwaggerDoc() map[string]string {
@@ -79,7 +80,7 @@ func (DeviceTaintRuleList) SwaggerDoc() map[string]string {
var map_DeviceTaintRuleSpec = map[string]string{
"": "DeviceTaintRuleSpec specifies the selector and one taint.",
- "deviceSelector": "DeviceSelector defines which device(s) the taint is applied to. All selector criteria must be satified for a device to match. The empty selector matches all devices. Without a selector, no devices are matches.",
+ "deviceSelector": "DeviceSelector defines which device(s) the taint is applied to. All selector criteria must be satisfied for a device to match. The empty selector matches all devices. Without a selector, no devices are matches.",
"taint": "The taint that gets applied to matching devices.",
}
@@ -87,13 +88,20 @@ func (DeviceTaintRuleSpec) SwaggerDoc() map[string]string {
return map_DeviceTaintRuleSpec
}
+var map_DeviceTaintRuleStatus = map[string]string{
+ "": "DeviceTaintRuleStatus provides information about an on-going pod eviction.",
+ "conditions": "Conditions provide information about the state of the DeviceTaintRule and the cluster at some point in time, in a machine-readable and human-readable format.\n\nThe following condition is currently defined as part of this API, more may get added: - Type: EvictionInProgress - Status: True if there are currently pods which need to be evicted, False otherwise\n (includes the effects which don't cause eviction).\n- Reason: not specified, may change - Message: includes information about number of pending pods and already evicted pods\n in a human-readable format, updated periodically, may change\n\nFor `effect: None`, the condition above gets set once for each change to the spec, with the message containing information about what would happen if the effect was `NoExecute`. This feedback can be used to decide whether changing the effect to `NoExecute` will work as intended. It only gets set once to avoid having to constantly update the status.\n\nMust have 8 or fewer entries.",
+}
+
+func (DeviceTaintRuleStatus) SwaggerDoc() map[string]string {
+ return map_DeviceTaintRuleStatus
+}
+
var map_DeviceTaintSelector = map[string]string{
- "": "DeviceTaintSelector defines which device(s) a DeviceTaintRule applies to. The empty selector matches all devices. Without a selector, no devices are matched.",
- "deviceClassName": "If DeviceClassName is set, the selectors defined there must be satisfied by a device to be selected. This field corresponds to class.metadata.name.",
- "driver": "If driver is set, only devices from that driver are selected. This fields corresponds to slice.spec.driver.",
- "pool": "If pool is set, only devices in that pool are selected.\n\nAlso setting the driver name may be useful to avoid ambiguity when different drivers use the same pool name, but this is not required because selecting pools from different drivers may also be useful, for example when drivers with node-local devices use the node name as their pool name.",
- "device": "If device is set, only devices with that name are selected. This field corresponds to slice.spec.devices[].name.\n\nSetting also driver and pool may be required to avoid ambiguity, but is not required.",
- "selectors": "Selectors contains the same selection criteria as a ResourceClaim. Currently, CEL expressions are supported. All of these selectors must be satisfied.",
+ "": "DeviceTaintSelector defines which device(s) a DeviceTaintRule applies to. The empty selector matches all devices. Without a selector, no devices are matched.",
+ "driver": "If driver is set, only devices from that driver are selected. This fields corresponds to slice.spec.driver.",
+ "pool": "If pool is set, only devices in that pool are selected.\n\nAlso setting the driver name may be useful to avoid ambiguity when different drivers use the same pool name, but this is not required because selecting pools from different drivers may also be useful, for example when drivers with node-local devices use the node name as their pool name.",
+ "device": "If device is set, only devices with that name are selected. This field corresponds to slice.spec.devices[].name.\n\nSetting also driver and pool may be required to avoid ambiguity, but is not required.",
}
func (DeviceTaintSelector) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
index e10736b9..6813ab04 100644
--- a/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go
@@ -22,6 +22,7 @@ limitations under the License.
package v1alpha3
import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -88,6 +89,7 @@ func (in *DeviceTaintRule) DeepCopyInto(out *DeviceTaintRule) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
return
}
@@ -165,13 +167,31 @@ func (in *DeviceTaintRuleSpec) DeepCopy() *DeviceTaintRuleSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *DeviceTaintSelector) DeepCopyInto(out *DeviceTaintSelector) {
+func (in *DeviceTaintRuleStatus) DeepCopyInto(out *DeviceTaintRuleStatus) {
*out = *in
- if in.DeviceClassName != nil {
- in, out := &in.DeviceClassName, &out.DeviceClassName
- *out = new(string)
- **out = **in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeviceTaintRuleStatus.
+func (in *DeviceTaintRuleStatus) DeepCopy() *DeviceTaintRuleStatus {
+ if in == nil {
+ return nil
}
+ out := new(DeviceTaintRuleStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DeviceTaintSelector) DeepCopyInto(out *DeviceTaintSelector) {
+ *out = *in
if in.Driver != nil {
in, out := &in.Driver, &out.Driver
*out = new(string)
@@ -187,13 +207,6 @@ func (in *DeviceTaintSelector) DeepCopyInto(out *DeviceTaintSelector) {
*out = new(string)
**out = **in
}
- if in.Selectors != nil {
- in, out := &in.Selectors, &out.Selectors
- *out = make([]DeviceSelector, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
return
}
diff --git a/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.model_name.go b/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.model_name.go
new file mode 100644
index 00000000..1c1672b4
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1alpha3/zz_generated.model_name.go
@@ -0,0 +1,62 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha3
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CELDeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.CELDeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceTaint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaintRule) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceTaintRule"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaintRuleList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceTaintRuleList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaintRuleSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceTaintRuleSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaintRuleStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceTaintRuleStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaintSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1alpha3.DeviceTaintSelector"
+}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/doc.go b/operator/vendor/k8s.io/api/resource/v1beta1/doc.go
index 1e08b69a..290c7bac 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.resource.v1beta1
+
// +groupName=resource.k8s.io
// Package v1beta1 is the v1beta1 version of the resource API.
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/resource/v1beta1/generated.pb.go
index c1f9ab09..1ac7de8a 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/generated.pb.go
@@ -23,15 +23,13 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -39,1470 +37,91 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-func (*AllocatedDeviceStatus) ProtoMessage() {}
-func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{0}
-}
-func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
-}
-func (m *AllocatedDeviceStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
-
-func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (*AllocationResult) ProtoMessage() {}
-func (*AllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{1}
-}
-func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocationResult.Merge(m, src)
-}
-func (m *AllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *AllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocationResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
-
-func (m *BasicDevice) Reset() { *m = BasicDevice{} }
-func (*BasicDevice) ProtoMessage() {}
-func (*BasicDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{2}
-}
-func (m *BasicDevice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *BasicDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *BasicDevice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BasicDevice.Merge(m, src)
-}
-func (m *BasicDevice) XXX_Size() int {
- return m.Size()
-}
-func (m *BasicDevice) XXX_DiscardUnknown() {
- xxx_messageInfo_BasicDevice.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BasicDevice proto.InternalMessageInfo
-
-func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-func (*CELDeviceSelector) ProtoMessage() {}
-func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{3}
-}
-func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CELDeviceSelector.Merge(m, src)
-}
-func (m *CELDeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *CELDeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
-
-func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} }
-func (*CapacityRequestPolicy) ProtoMessage() {}
-func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{4}
-}
-func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequestPolicy.Merge(m, src)
-}
-func (m *CapacityRequestPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequestPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo
-
-func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} }
-func (*CapacityRequestPolicyRange) ProtoMessage() {}
-func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{5}
-}
-func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src)
-}
-func (m *CapacityRequestPolicyRange) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo
-
-func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} }
-func (*CapacityRequirements) ProtoMessage() {}
-func (*CapacityRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{6}
-}
-func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequirements) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequirements.Merge(m, src)
-}
-func (m *CapacityRequirements) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequirements) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequirements.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo
-
-func (m *Counter) Reset() { *m = Counter{} }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{7}
-}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
-}
-func (m *Counter) XXX_Size() int {
- return m.Size()
-}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Counter proto.InternalMessageInfo
-
-func (m *CounterSet) Reset() { *m = CounterSet{} }
-func (*CounterSet) ProtoMessage() {}
-func (*CounterSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{8}
-}
-func (m *CounterSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CounterSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CounterSet.Merge(m, src)
-}
-func (m *CounterSet) XXX_Size() int {
- return m.Size()
-}
-func (m *CounterSet) XXX_DiscardUnknown() {
- xxx_messageInfo_CounterSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CounterSet proto.InternalMessageInfo
-
-func (m *Device) Reset() { *m = Device{} }
-func (*Device) ProtoMessage() {}
-func (*Device) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{9}
-}
-func (m *Device) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Device) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Device.Merge(m, src)
-}
-func (m *Device) XXX_Size() int {
- return m.Size()
-}
-func (m *Device) XXX_DiscardUnknown() {
- xxx_messageInfo_Device.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Device proto.InternalMessageInfo
-
-func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-func (*DeviceAllocationConfiguration) ProtoMessage() {}
-func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{10}
-}
-func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
-}
-func (m *DeviceAllocationConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
-
-func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-func (*DeviceAllocationResult) ProtoMessage() {}
-func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{11}
-}
-func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
-}
-func (m *DeviceAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
-
-func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-func (*DeviceAttribute) ProtoMessage() {}
-func (*DeviceAttribute) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{12}
-}
-func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAttribute.Merge(m, src)
-}
-func (m *DeviceAttribute) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAttribute) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
-
-func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
-func (*DeviceCapacity) ProtoMessage() {}
-func (*DeviceCapacity) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{13}
-}
-func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceCapacity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCapacity.Merge(m, src)
-}
-func (m *DeviceCapacity) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCapacity) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCapacity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo
-
-func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-func (*DeviceClaim) ProtoMessage() {}
-func (*DeviceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{14}
-}
-func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaim.Merge(m, src)
-}
-func (m *DeviceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
-
-func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-func (*DeviceClaimConfiguration) ProtoMessage() {}
-func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{15}
-}
-func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
-}
-func (m *DeviceClaimConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-func (*DeviceClass) ProtoMessage() {}
-func (*DeviceClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{16}
-}
-func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClass.Merge(m, src)
-}
-func (m *DeviceClass) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClass) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
-
-func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-func (*DeviceClassConfiguration) ProtoMessage() {}
-func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{17}
-}
-func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
-}
-func (m *DeviceClassConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-func (*DeviceClassList) ProtoMessage() {}
-func (*DeviceClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{18}
-}
-func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassList.Merge(m, src)
-}
-func (m *DeviceClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
-
-func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-func (*DeviceClassSpec) ProtoMessage() {}
-func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{19}
-}
-func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassSpec.Merge(m, src)
-}
-func (m *DeviceClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
-}
+func (m *BasicDevice) Reset() { *m = BasicDevice{} }
-var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
+func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-func (*DeviceConfiguration) ProtoMessage() {}
-func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{20}
-}
-func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConfiguration.Merge(m, src)
-}
-func (m *DeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
-}
+func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} }
-var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
+func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} }
-func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-func (*DeviceConstraint) ProtoMessage() {}
-func (*DeviceConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{21}
-}
-func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConstraint.Merge(m, src)
-}
-func (m *DeviceConstraint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConstraint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
-}
+func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} }
-var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
+func (m *Counter) Reset() { *m = Counter{} }
-func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-func (*DeviceCounterConsumption) ProtoMessage() {}
-func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{22}
-}
-func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
-}
-func (m *DeviceCounterConsumption) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
-}
+func (m *CounterSet) Reset() { *m = CounterSet{} }
-var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
+func (m *Device) Reset() { *m = Device{} }
-func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-func (*DeviceRequest) ProtoMessage() {}
-func (*DeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{23}
-}
-func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequest.Merge(m, src)
-}
-func (m *DeviceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
-}
+func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
+func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-func (*DeviceRequestAllocationResult) ProtoMessage() {}
-func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{24}
-}
-func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
-}
-func (m *DeviceRequestAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
-}
+func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
+func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
-func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (*DeviceSelector) ProtoMessage() {}
-func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{25}
-}
-func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSelector.Merge(m, src)
-}
-func (m *DeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
-}
+func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
+func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-func (*DeviceSubRequest) ProtoMessage() {}
-func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{26}
-}
-func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSubRequest.Merge(m, src)
-}
-func (m *DeviceSubRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSubRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
-}
+func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
+func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-func (*DeviceTaint) ProtoMessage() {}
-func (*DeviceTaint) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{27}
-}
-func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaint.Merge(m, src)
-}
-func (m *DeviceTaint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
-
-func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-func (*DeviceToleration) ProtoMessage() {}
-func (*DeviceToleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{28}
-}
-func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceToleration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceToleration.Merge(m, src)
-}
-func (m *DeviceToleration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceToleration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
-
-func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (*NetworkDeviceData) ProtoMessage() {}
-func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{29}
-}
-func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkDeviceData.Merge(m, src)
-}
-func (m *NetworkDeviceData) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkDeviceData) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
-
-func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-func (*OpaqueDeviceConfiguration) ProtoMessage() {}
-func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{30}
-}
-func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
-}
+func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
+func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (*ResourceClaim) ProtoMessage() {}
-func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{31}
-}
-func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaim.Merge(m, src)
-}
-func (m *ResourceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
-}
+func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-func (*ResourceClaimConsumerReference) ProtoMessage() {}
-func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{32}
-}
-func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
-}
-func (m *ResourceClaimConsumerReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
-}
+func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
+func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (*ResourceClaimList) ProtoMessage() {}
-func (*ResourceClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{33}
-}
-func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimList.Merge(m, src)
-}
-func (m *ResourceClaimList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
-}
+func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
+func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-func (*ResourceClaimSpec) ProtoMessage() {}
-func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{34}
-}
-func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
-}
-func (m *ResourceClaimSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
-}
+func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
+func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (*ResourceClaimStatus) ProtoMessage() {}
-func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{35}
-}
-func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
-}
-func (m *ResourceClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
-}
+func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
+func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-func (*ResourceClaimTemplate) ProtoMessage() {}
-func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{36}
-}
-func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
-}
-func (m *ResourceClaimTemplate) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
-}
+func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
+func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (*ResourceClaimTemplateList) ProtoMessage() {}
-func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{37}
-}
-func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
-}
-func (m *ResourceClaimTemplateList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
-}
+func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
+func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-func (*ResourceClaimTemplateSpec) ProtoMessage() {}
-func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{38}
-}
-func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
-}
+func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
+func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (*ResourcePool) ProtoMessage() {}
-func (*ResourcePool) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{39}
-}
-func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePool) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePool.Merge(m, src)
-}
-func (m *ResourcePool) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePool) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePool.DiscardUnknown(m)
-}
+func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
+func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-func (*ResourceSlice) ProtoMessage() {}
-func (*ResourceSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{40}
-}
-func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSlice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSlice.Merge(m, src)
-}
-func (m *ResourceSlice) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSlice) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
-}
+func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
+func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (*ResourceSliceList) ProtoMessage() {}
-func (*ResourceSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{41}
-}
-func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceList.Merge(m, src)
-}
-func (m *ResourceSliceList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
-}
+func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
+func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
-func (*ResourceSliceSpec) ProtoMessage() {}
-func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_ba331e3ec6484c27, []int{42}
-}
-func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
-}
-func (m *ResourceSliceSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1beta1.AllocatedDeviceStatus")
- proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1beta1.AllocationResult")
- proto.RegisterType((*BasicDevice)(nil), "k8s.io.api.resource.v1beta1.BasicDevice")
- proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.AttributesEntry")
- proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.BasicDevice.CapacityEntry")
- proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1beta1.CELDeviceSelector")
- proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1beta1.CapacityRequestPolicy")
- proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1beta1.CapacityRequestPolicyRange")
- proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1beta1.CapacityRequirements")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1beta1.CapacityRequirements.RequestsEntry")
- proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1beta1.Counter")
- proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1beta1.CounterSet")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1beta1.CounterSet.CountersEntry")
- proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1beta1.Device")
- proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationConfiguration")
- proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceAllocationResult")
- proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1beta1.DeviceAttribute")
- proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1beta1.DeviceCapacity")
- proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1beta1.DeviceClaim")
- proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClaimConfiguration")
- proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1beta1.DeviceClass")
- proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceClassConfiguration")
- proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1beta1.DeviceClassList")
- proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1beta1.DeviceClassSpec")
- proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.DeviceConfiguration")
- proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1beta1.DeviceConstraint")
- proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1beta1.DeviceCounterConsumption")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1beta1.DeviceCounterConsumption.CountersEntry")
- proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1beta1.DeviceRequest")
- proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1beta1.DeviceRequestAllocationResult")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1beta1.DeviceRequestAllocationResult.ConsumedCapacityEntry")
- proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1beta1.DeviceSelector")
- proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1beta1.DeviceSubRequest")
- proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1beta1.DeviceTaint")
- proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1beta1.DeviceToleration")
- proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1beta1.NetworkDeviceData")
- proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1beta1.OpaqueDeviceConfiguration")
- proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1beta1.ResourceClaim")
- proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimConsumerReference")
- proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimList")
- proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimSpec")
- proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimStatus")
- proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplate")
- proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateList")
- proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceClaimTemplateSpec")
- proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1beta1.ResourcePool")
- proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1beta1.ResourceSlice")
- proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceList")
- proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1beta1.ResourceSliceSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/resource/v1beta1/generated.proto", fileDescriptor_ba331e3ec6484c27)
-}
-
-var fileDescriptor_ba331e3ec6484c27 = []byte{
- // 3039 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x24, 0x47,
- 0xf5, 0x77, 0xcf, 0x8c, 0x3d, 0xe3, 0x37, 0x6b, 0xaf, 0x5d, 0xfb, 0x91, 0x89, 0xf3, 0x8f, 0xc7,
- 0xe9, 0xfd, 0x43, 0x9c, 0xcd, 0x66, 0x9c, 0x1d, 0x20, 0x44, 0x9b, 0x03, 0xcc, 0xd8, 0xde, 0xc4,
- 0xc9, 0xae, 0xd7, 0xa9, 0x71, 0x36, 0x4b, 0xbe, 0x44, 0xbb, 0xbb, 0x6c, 0x37, 0xee, 0xe9, 0x9e,
- 0xed, 0xee, 0xf1, 0xae, 0x85, 0x80, 0x00, 0x57, 0x0e, 0x5c, 0x90, 0x90, 0x00, 0x09, 0x21, 0x04,
- 0x42, 0x42, 0x88, 0x03, 0xe7, 0x20, 0x40, 0x11, 0xe1, 0x44, 0x14, 0x2e, 0x39, 0xa0, 0x09, 0x99,
- 0x9c, 0x38, 0x72, 0xe1, 0xb0, 0x27, 0x54, 0xd5, 0x55, 0xdd, 0xd5, 0x3d, 0xd3, 0x93, 0x1e, 0x67,
- 0x77, 0x15, 0x24, 0x6e, 0x9e, 0x57, 0xef, 0xfd, 0xaa, 0xea, 0xd5, 0xfb, 0xaa, 0xd7, 0x65, 0x78,
- 0xfc, 0xe0, 0x69, 0xaf, 0x66, 0x3a, 0x2b, 0x5a, 0xc7, 0x5c, 0x71, 0x89, 0xe7, 0x74, 0x5d, 0x9d,
- 0xac, 0x1c, 0x5e, 0xdc, 0x21, 0xbe, 0x76, 0x71, 0x65, 0x8f, 0xd8, 0xc4, 0xd5, 0x7c, 0x62, 0xd4,
- 0x3a, 0xae, 0xe3, 0x3b, 0xe8, 0xa1, 0x80, 0xb9, 0xa6, 0x75, 0xcc, 0x9a, 0x60, 0xae, 0x71, 0xe6,
- 0x85, 0x27, 0xf6, 0x4c, 0x7f, 0xbf, 0xbb, 0x53, 0xd3, 0x9d, 0xf6, 0xca, 0x9e, 0xb3, 0xe7, 0xac,
- 0x30, 0x99, 0x9d, 0xee, 0x2e, 0xfb, 0xc5, 0x7e, 0xb0, 0xbf, 0x02, 0xac, 0x05, 0x55, 0x9a, 0x58,
- 0x77, 0x5c, 0x3a, 0x69, 0x72, 0xbe, 0x85, 0xcf, 0x47, 0x3c, 0x6d, 0x4d, 0xdf, 0x37, 0x6d, 0xe2,
- 0x1e, 0xad, 0x74, 0x0e, 0xf6, 0xe2, 0xab, 0x1d, 0x47, 0xca, 0x5b, 0x69, 0x13, 0x5f, 0x1b, 0x36,
- 0xd7, 0x4a, 0x9a, 0x94, 0xdb, 0xb5, 0x7d, 0xb3, 0x3d, 0x38, 0xcd, 0x53, 0x1f, 0x27, 0xe0, 0xe9,
- 0xfb, 0xa4, 0xad, 0x25, 0xe5, 0xd4, 0xb7, 0xf3, 0x70, 0xa6, 0x61, 0x59, 0x8e, 0x4e, 0x69, 0x6b,
- 0xe4, 0xd0, 0xd4, 0x49, 0xcb, 0xd7, 0xfc, 0xae, 0x87, 0x3e, 0x0b, 0x53, 0x86, 0x6b, 0x1e, 0x12,
- 0xb7, 0xa2, 0x2c, 0x29, 0xcb, 0xd3, 0xcd, 0xd9, 0x77, 0x7a, 0xd5, 0x89, 0x7e, 0xaf, 0x3a, 0xb5,
- 0xc6, 0xa8, 0x98, 0x8f, 0xa2, 0x25, 0x28, 0x74, 0x1c, 0xc7, 0xaa, 0xe4, 0x18, 0xd7, 0x09, 0xce,
- 0x55, 0xd8, 0x72, 0x1c, 0x0b, 0xb3, 0x11, 0x86, 0xc4, 0x90, 0x2b, 0xf9, 0x04, 0x12, 0xa3, 0x62,
- 0x3e, 0x8a, 0x3e, 0x03, 0x45, 0x6f, 0x5f, 0x73, 0xc9, 0xc6, 0x5a, 0xa5, 0xc8, 0x18, 0xcb, 0xfd,
- 0x5e, 0xb5, 0xd8, 0x0a, 0x48, 0x58, 0x8c, 0x21, 0x1d, 0x40, 0x77, 0x6c, 0xc3, 0xf4, 0x4d, 0xc7,
- 0xf6, 0x2a, 0x85, 0xa5, 0xfc, 0x72, 0xb9, 0xbe, 0x52, 0x8b, 0x8c, 0x21, 0xdc, 0x7f, 0xad, 0x73,
- 0xb0, 0x47, 0x09, 0x5e, 0x8d, 0xaa, 0xb9, 0x76, 0x78, 0xb1, 0xb6, 0x2a, 0xe4, 0x9a, 0x88, 0xaf,
- 0x01, 0x42, 0x92, 0x87, 0x25, 0x58, 0xf4, 0x02, 0x14, 0x0c, 0xcd, 0xd7, 0x2a, 0x93, 0x4b, 0xca,
- 0x72, 0xb9, 0xfe, 0x44, 0x2a, 0x3c, 0x57, 0x6f, 0x0d, 0x6b, 0xb7, 0xd6, 0x6f, 0xfb, 0xc4, 0xf6,
- 0x28, 0x78, 0x89, 0x2a, 0x60, 0x4d, 0xf3, 0x35, 0xcc, 0x40, 0x90, 0x06, 0x65, 0x9b, 0xf8, 0xb7,
- 0x1c, 0xf7, 0x80, 0x12, 0x2b, 0x53, 0x0c, 0xb3, 0x56, 0x1b, 0x61, 0xbf, 0xb5, 0x4d, 0xce, 0xcf,
- 0x34, 0x43, 0xa5, 0x9a, 0x27, 0xfb, 0xbd, 0x6a, 0x79, 0x33, 0x82, 0xc1, 0x32, 0xa6, 0xfa, 0x87,
- 0x1c, 0xcc, 0xf1, 0x73, 0x34, 0x1d, 0x1b, 0x13, 0xaf, 0x6b, 0xf9, 0xe8, 0x0d, 0x28, 0x06, 0xaa,
- 0xf5, 0xd8, 0x19, 0x96, 0xeb, 0x9f, 0x1b, 0x39, 0x67, 0x30, 0x59, 0x12, 0xa5, 0x79, 0x92, 0xab,
- 0xaa, 0x18, 0x8c, 0x7b, 0x58, 0x80, 0xa2, 0xeb, 0x70, 0xc2, 0x76, 0x0c, 0xd2, 0x22, 0x16, 0xd1,
- 0x7d, 0xc7, 0x65, 0xc7, 0x5b, 0xae, 0x2f, 0xc9, 0x93, 0x50, 0x67, 0xa2, 0x9a, 0xdf, 0x94, 0xf8,
- 0x9a, 0x73, 0xfd, 0x5e, 0xf5, 0x84, 0x4c, 0xc1, 0x31, 0x1c, 0xd4, 0x85, 0x53, 0x5a, 0xb8, 0x8a,
- 0x6d, 0xb3, 0x4d, 0x3c, 0x5f, 0x6b, 0x77, 0xf8, 0x59, 0x9c, 0xcf, 0x76, 0xd4, 0x54, 0xac, 0xf9,
- 0x40, 0xbf, 0x57, 0x3d, 0xd5, 0x18, 0x84, 0xc2, 0xc3, 0xf0, 0xd5, 0x6f, 0x4f, 0x43, 0xb9, 0xa9,
- 0x79, 0xa6, 0x1e, 0x6c, 0x14, 0x7d, 0x03, 0x40, 0xf3, 0x7d, 0xd7, 0xdc, 0xe9, 0xfa, 0x4c, 0x83,
- 0xd4, 0xd0, 0x9e, 0x1e, 0xa9, 0x41, 0x49, 0xba, 0xd6, 0x08, 0x45, 0xd7, 0x6d, 0xdf, 0x3d, 0x6a,
- 0x9e, 0x13, 0x16, 0x17, 0x0d, 0x7c, 0xe7, 0x83, 0xea, 0xcc, 0x8b, 0x5d, 0xcd, 0x32, 0x77, 0x4d,
- 0x62, 0x6c, 0x6a, 0x6d, 0x82, 0xa5, 0x09, 0x51, 0x17, 0x4a, 0xba, 0xd6, 0xd1, 0x74, 0xd3, 0x3f,
- 0xaa, 0xe4, 0xd8, 0xe4, 0x4f, 0x65, 0x9e, 0x7c, 0x95, 0x0b, 0x06, 0x53, 0x3f, 0xc2, 0xa7, 0x2e,
- 0x09, 0xf2, 0xe0, 0xc4, 0xe1, 0x54, 0xe8, 0xeb, 0x30, 0xa7, 0x3b, 0xb6, 0xd7, 0x6d, 0x13, 0x6f,
- 0xd5, 0xe9, 0xda, 0x3e, 0x71, 0xbd, 0x4a, 0x9e, 0x4d, 0xff, 0x85, 0x0c, 0xd6, 0xc3, 0x45, 0x56,
- 0x19, 0x42, 0x87, 0xb9, 0x5a, 0x85, 0xcf, 0x3e, 0xb7, 0x9a, 0x80, 0xc5, 0x03, 0x13, 0xa1, 0x65,
- 0x28, 0x51, 0x4b, 0xa0, 0x4b, 0xaa, 0x14, 0x82, 0x80, 0x42, 0xd7, 0xbd, 0xc9, 0x69, 0x38, 0x1c,
- 0x1d, 0xb0, 0xbd, 0xc9, 0xbb, 0x64, 0x7b, 0xcb, 0x50, 0xd2, 0x2c, 0x8b, 0x32, 0x78, 0xcc, 0x51,
- 0x4b, 0xc1, 0x0a, 0x1a, 0x9c, 0x86, 0xc3, 0x51, 0xb4, 0x05, 0x53, 0xbe, 0x66, 0xda, 0xbe, 0x57,
- 0x29, 0x32, 0xf5, 0x2c, 0x67, 0x50, 0xcf, 0x36, 0x15, 0x88, 0x02, 0x20, 0xfb, 0xe9, 0x61, 0x8e,
- 0x83, 0x2e, 0x42, 0x79, 0xc7, 0xb4, 0x0d, 0x6f, 0xdb, 0xa1, 0x33, 0x54, 0x4a, 0x6c, 0x7a, 0xe6,
- 0xf7, 0xcd, 0x88, 0x8c, 0x65, 0x1e, 0xb4, 0x0a, 0xf3, 0xf4, 0xa7, 0x69, 0xef, 0x45, 0x81, 0xac,
- 0x32, 0xbd, 0x94, 0x5f, 0x9e, 0x6e, 0x9e, 0xe9, 0xf7, 0xaa, 0xf3, 0xcd, 0xe4, 0x20, 0x1e, 0xe4,
- 0x47, 0x37, 0xa0, 0xc2, 0x89, 0x97, 0x35, 0xd3, 0xea, 0xba, 0x44, 0xc2, 0x02, 0x86, 0xf5, 0x7f,
- 0xfd, 0x5e, 0xb5, 0xd2, 0x4c, 0xe1, 0xc1, 0xa9, 0xd2, 0x14, 0x99, 0x7a, 0xda, 0xad, 0xab, 0x5d,
- 0xcb, 0x37, 0x3b, 0x96, 0x14, 0x5c, 0xbc, 0x4a, 0x99, 0x6d, 0x8f, 0x21, 0x37, 0x52, 0x78, 0x70,
- 0xaa, 0xf4, 0xc2, 0x01, 0x9c, 0x4c, 0x78, 0x18, 0x9a, 0x83, 0xfc, 0x01, 0x39, 0x0a, 0xd2, 0x15,
- 0xa6, 0x7f, 0xa2, 0x26, 0x4c, 0x1e, 0x6a, 0x56, 0x97, 0xb0, 0xe4, 0x54, 0xae, 0x5f, 0xc8, 0x12,
- 0xfe, 0x04, 0x28, 0x0e, 0x44, 0x2f, 0xe5, 0x9e, 0x56, 0x16, 0xf6, 0x61, 0x26, 0xe6, 0x51, 0x43,
- 0xa6, 0x6a, 0xc4, 0xa7, 0x7a, 0x3c, 0x8b, 0xaf, 0x70, 0x48, 0x69, 0x26, 0xf5, 0x59, 0x98, 0x5f,
- 0x5d, 0xbf, 0xc2, 0x13, 0xb1, 0xb0, 0xc9, 0x3a, 0x00, 0xb9, 0xdd, 0x71, 0x89, 0x47, 0xb3, 0x0b,
- 0x4f, 0xc7, 0x61, 0x02, 0x5b, 0x0f, 0x47, 0xb0, 0xc4, 0xa5, 0xbe, 0x95, 0x83, 0x33, 0xe1, 0x04,
- 0xe4, 0x66, 0x97, 0x78, 0xfe, 0x96, 0x63, 0x99, 0xfa, 0x11, 0x7a, 0x89, 0x66, 0x85, 0x5d, 0xad,
- 0x6b, 0xf9, 0x3c, 0x2b, 0xd4, 0x46, 0x45, 0xd4, 0x68, 0xf1, 0x2f, 0x76, 0x35, 0xdb, 0x37, 0xfd,
- 0xa3, 0x20, 0x2d, 0xaf, 0x05, 0x10, 0x58, 0x60, 0x21, 0x02, 0xe5, 0x43, 0xcd, 0x32, 0x8d, 0xeb,
- 0x74, 0x2f, 0x22, 0x64, 0x8c, 0x0b, 0x7d, 0x8a, 0xef, 0xaa, 0x7c, 0x3d, 0x82, 0xc2, 0x32, 0x2e,
- 0xda, 0x03, 0x60, 0x3f, 0xb1, 0x66, 0xef, 0x05, 0x31, 0xa2, 0x5c, 0xff, 0xe2, 0x48, 0x65, 0x0f,
- 0xd5, 0x02, 0x13, 0x6f, 0xce, 0x52, 0x05, 0x5e, 0x0f, 0xe1, 0xb0, 0x04, 0xad, 0xbe, 0x99, 0x83,
- 0x85, 0x74, 0x51, 0xb4, 0x01, 0xf9, 0xb6, 0x69, 0x1f, 0x53, 0x83, 0xc5, 0x7e, 0xaf, 0x9a, 0xbf,
- 0x6a, 0xda, 0x98, 0x62, 0x30, 0x28, 0xed, 0x36, 0x37, 0x9c, 0xe3, 0x41, 0x69, 0xb7, 0x31, 0xc5,
- 0x40, 0x57, 0xa0, 0xe0, 0xf9, 0xa4, 0xc3, 0x33, 0xf1, 0xb8, 0x58, 0xac, 0x6e, 0x69, 0xf9, 0xa4,
- 0x83, 0x19, 0x8a, 0xfa, 0xdd, 0x1c, 0x9c, 0x96, 0x55, 0x60, 0xba, 0xa4, 0x4d, 0x68, 0xa0, 0xfa,
- 0x26, 0x94, 0xdc, 0x40, 0x25, 0x22, 0x2f, 0x7e, 0x29, 0xf3, 0x11, 0x08, 0x90, 0x1a, 0x57, 0xaa,
- 0x97, 0xc8, 0x51, 0x82, 0x3c, 0x24, 0x47, 0x89, 0x39, 0x17, 0x0e, 0x60, 0x26, 0x26, 0x3d, 0xc4,
- 0x1f, 0xd7, 0xe2, 0xfe, 0x38, 0xa6, 0x2a, 0x64, 0x97, 0x7c, 0x03, 0x8a, 0x3c, 0x3f, 0xa1, 0x96,
- 0x00, 0x3d, 0xde, 0xb1, 0xcf, 0xf0, 0x3d, 0x4e, 0x32, 0x5b, 0xe6, 0x73, 0xa8, 0xff, 0x56, 0x00,
- 0xf8, 0x04, 0x2d, 0xe2, 0xd3, 0x7a, 0xda, 0xa6, 0xe9, 0x4f, 0x89, 0xd7, 0xd3, 0x4c, 0x03, 0x6c,
- 0x04, 0xe9, 0x50, 0xd2, 0x45, 0x66, 0xce, 0x65, 0xc8, 0xcc, 0x11, 0xb8, 0xf8, 0x93, 0xeb, 0x7c,
- 0x2e, 0xac, 0x0b, 0x44, 0x46, 0x0e, 0x81, 0x17, 0x34, 0x98, 0x89, 0x31, 0x0f, 0x51, 0xf1, 0xa5,
- 0xb8, 0x8a, 0xff, 0x3f, 0xcb, 0x22, 0x64, 0xc5, 0x76, 0x81, 0xdf, 0x00, 0x32, 0xec, 0x79, 0x03,
- 0x26, 0x77, 0x68, 0x7d, 0xc3, 0xe7, 0x5a, 0xce, 0x5a, 0x09, 0x35, 0xa7, 0xa9, 0xbe, 0x19, 0x01,
- 0x07, 0x08, 0xea, 0xf7, 0x72, 0xf0, 0x70, 0xb2, 0xd4, 0x5d, 0x75, 0xec, 0x5d, 0x73, 0xaf, 0xeb,
- 0xb2, 0x1f, 0xe8, 0xcb, 0x30, 0x15, 0x20, 0xf2, 0x05, 0x2d, 0x8b, 0x7c, 0xdd, 0x62, 0xd4, 0x3b,
- 0xbd, 0xea, 0xd9, 0xa4, 0x68, 0x30, 0x82, 0xb9, 0x1c, 0xad, 0x22, 0x42, 0x07, 0xc9, 0xb1, 0x0c,
- 0x7a, 0x42, 0xb6, 0xed, 0xc8, 0x94, 0xd1, 0xb7, 0xe0, 0x94, 0xc1, 0x2b, 0x27, 0x69, 0x09, 0xdc,
- 0x81, 0x9f, 0xcc, 0x54, 0x71, 0x49, 0x72, 0xcd, 0x87, 0xf8, 0x52, 0x4f, 0x0d, 0x19, 0xc4, 0xc3,
- 0x66, 0x52, 0x3f, 0x52, 0xe0, 0xec, 0xf0, 0xca, 0x1f, 0x11, 0x28, 0xba, 0xec, 0x2f, 0xe1, 0xe5,
- 0x97, 0x32, 0xac, 0x87, 0xef, 0x31, 0xfd, 0x1a, 0x11, 0xfc, 0xf6, 0xb0, 0xc0, 0x46, 0x3b, 0x30,
- 0xa5, 0xb3, 0x25, 0x71, 0x6b, 0xbe, 0x34, 0xd6, 0x2d, 0x25, 0xbe, 0xff, 0xb0, 0xb4, 0x0a, 0xc8,
- 0x98, 0x23, 0xab, 0xbf, 0x52, 0xe0, 0x64, 0x22, 0xc1, 0xa3, 0x45, 0xc8, 0x9b, 0xb6, 0xcf, 0x2c,
- 0x2a, 0x1f, 0x9c, 0xcf, 0x86, 0xed, 0x07, 0xae, 0x49, 0x07, 0xd0, 0x23, 0x50, 0xd8, 0xa1, 0x37,
- 0xdb, 0x3c, 0x2b, 0x54, 0x66, 0xfa, 0xbd, 0xea, 0x74, 0xd3, 0x71, 0xac, 0x80, 0x83, 0x0d, 0xa1,
- 0x47, 0x61, 0xca, 0xf3, 0x5d, 0xd3, 0xde, 0xe3, 0xd5, 0x2a, 0x2b, 0xd6, 0x5a, 0x8c, 0x12, 0xb0,
- 0xf1, 0x61, 0x74, 0x1e, 0x8a, 0x87, 0xc4, 0x65, 0xf9, 0x7b, 0x92, 0x71, 0xb2, 0x3a, 0xf4, 0x7a,
- 0x40, 0x0a, 0x58, 0x05, 0x83, 0xfa, 0x9e, 0x02, 0xb3, 0xf1, 0x0a, 0xe1, 0x9e, 0x04, 0x1e, 0x74,
- 0x00, 0x33, 0xae, 0x9c, 0xd8, 0xb8, 0x6f, 0xd5, 0xc7, 0xcf, 0xa6, 0xcd, 0xf9, 0x7e, 0xaf, 0x3a,
- 0x13, 0xcf, 0x92, 0x71, 0x6c, 0xf5, 0xd7, 0x39, 0x28, 0xf3, 0x4d, 0x59, 0x9a, 0xd9, 0x46, 0x37,
- 0x06, 0x52, 0xc8, 0xf9, 0xec, 0xc6, 0x15, 0x45, 0xae, 0x21, 0x1e, 0x65, 0x40, 0x99, 0xde, 0x2b,
- 0x7c, 0x37, 0x28, 0xce, 0x03, 0x9b, 0x7a, 0x22, 0x9b, 0x27, 0x71, 0xa9, 0xa8, 0x0e, 0x89, 0x68,
- 0x1e, 0x96, 0x61, 0xd1, 0xeb, 0xa1, 0xd1, 0x8e, 0x71, 0x39, 0xa2, 0x3b, 0xcf, 0x66, 0xaf, 0x6f,
- 0x2b, 0x50, 0x49, 0x13, 0x8a, 0x45, 0x17, 0xe5, 0x38, 0xd1, 0x25, 0x77, 0xdf, 0xa2, 0xcb, 0xef,
- 0x15, 0xe9, 0xd8, 0x3d, 0x0f, 0x7d, 0x15, 0x4a, 0xf4, 0x86, 0xce, 0x7a, 0x2b, 0xca, 0xc0, 0x2a,
- 0x46, 0xdc, 0xe7, 0xaf, 0xed, 0x7c, 0x8d, 0xe8, 0xfe, 0x55, 0xe2, 0x6b, 0x51, 0xe9, 0x1b, 0xd1,
- 0x70, 0x88, 0x8a, 0x36, 0xa1, 0xe0, 0x75, 0x88, 0x3e, 0x46, 0xc9, 0xcf, 0x56, 0xd6, 0xea, 0x10,
- 0x3d, 0xca, 0x3c, 0xf4, 0x17, 0x66, 0x38, 0xea, 0x8f, 0xe4, 0x93, 0xf0, 0xbc, 0xf8, 0x49, 0xa4,
- 0xe8, 0x57, 0xb9, 0x6f, 0xfa, 0x7d, 0x2b, 0x8c, 0x6b, 0x6c, 0x75, 0x57, 0x4c, 0xcf, 0x47, 0xaf,
- 0x0d, 0xe8, 0xb8, 0x96, 0x4d, 0xc7, 0x54, 0x9a, 0x69, 0x38, 0x74, 0x2f, 0x41, 0x91, 0xf4, 0x7b,
- 0x15, 0x26, 0x4d, 0x9f, 0xb4, 0x85, 0x63, 0x2d, 0x67, 0x55, 0x70, 0x14, 0x84, 0x36, 0xa8, 0x38,
- 0x0e, 0x50, 0xd4, 0x1f, 0xe7, 0x62, 0x1b, 0xa0, 0x8a, 0x47, 0xaf, 0xc1, 0xb4, 0xc7, 0xef, 0x3e,
- 0x22, 0x38, 0x64, 0xb9, 0x4f, 0x85, 0x77, 0xfc, 0x79, 0x3e, 0xd3, 0xb4, 0xa0, 0x78, 0x38, 0x02,
- 0x94, 0x3c, 0x37, 0x37, 0x8e, 0xe7, 0x26, 0x8e, 0x3e, 0xcd, 0x73, 0xd1, 0x15, 0x38, 0x4d, 0x6e,
- 0xfb, 0xc4, 0x36, 0x88, 0x81, 0x39, 0x98, 0xd4, 0xce, 0xa8, 0xf4, 0x7b, 0xd5, 0xd3, 0xeb, 0x43,
- 0xc6, 0xf1, 0x50, 0x29, 0xf5, 0x26, 0x0c, 0xb3, 0x05, 0xf4, 0x0a, 0x4c, 0x39, 0x1d, 0xed, 0x66,
- 0x98, 0x10, 0x46, 0x77, 0x86, 0xae, 0x31, 0xd6, 0x61, 0x06, 0x07, 0x74, 0x03, 0xc1, 0x30, 0xe6,
- 0x88, 0xea, 0x3f, 0x15, 0x98, 0x4b, 0x06, 0xc4, 0x31, 0x42, 0xce, 0x16, 0xcc, 0xb6, 0x35, 0x5f,
- 0xdf, 0x0f, 0xf3, 0x2c, 0xef, 0x0c, 0x2f, 0xf7, 0x7b, 0xd5, 0xd9, 0xab, 0xb1, 0x91, 0x3b, 0xbd,
- 0x2a, 0xba, 0xdc, 0xb5, 0xac, 0xa3, 0x78, 0x9d, 0x9f, 0x90, 0x47, 0x5f, 0x81, 0x79, 0xc3, 0xf4,
- 0x7c, 0xd3, 0xd6, 0xfd, 0x08, 0x34, 0x68, 0x25, 0x3f, 0xde, 0xef, 0x55, 0xe7, 0xd7, 0x92, 0x83,
- 0x29, 0xb8, 0x83, 0x28, 0xea, 0xcf, 0x73, 0xa1, 0x73, 0x0f, 0x34, 0xae, 0xe8, 0xb5, 0x5b, 0x0f,
- 0x4b, 0xe7, 0xe4, 0xb5, 0x3b, 0x2a, 0xaa, 0xb1, 0xc4, 0x85, 0x6e, 0x0e, 0xd4, 0xe6, 0xab, 0xc7,
- 0xea, 0x9a, 0x7d, 0xba, 0x2a, 0xf5, 0xdf, 0x4d, 0xc2, 0x4c, 0x2c, 0x01, 0x67, 0xa8, 0xd8, 0x1b,
- 0x70, 0xd2, 0x88, 0x7c, 0x87, 0xb9, 0x40, 0x60, 0x08, 0x0f, 0x70, 0x66, 0xd9, 0xed, 0x99, 0x5c,
- 0x92, 0x3f, 0x1e, 0x07, 0xf2, 0x77, 0x3b, 0x0e, 0x5c, 0x87, 0xd9, 0xa8, 0x0b, 0x7c, 0xd5, 0x31,
- 0x84, 0x8b, 0xd6, 0xb8, 0xd4, 0x6c, 0x23, 0x36, 0x7a, 0xa7, 0x57, 0x3d, 0x9d, 0xac, 0x3d, 0x29,
- 0x1d, 0x27, 0x50, 0xd0, 0x39, 0x98, 0x64, 0x67, 0xc3, 0x0a, 0xbd, 0x7c, 0x14, 0xf6, 0x98, 0x5e,
- 0x71, 0x30, 0x86, 0x2e, 0x42, 0x59, 0x33, 0xda, 0xa6, 0xdd, 0xd0, 0x75, 0xe2, 0x89, 0x4e, 0x23,
- 0xab, 0x1e, 0x1b, 0x11, 0x19, 0xcb, 0x3c, 0xa8, 0x0d, 0xb3, 0xbb, 0xa6, 0xeb, 0xf9, 0x8d, 0x43,
- 0xcd, 0xb4, 0xb4, 0x1d, 0x8b, 0xf0, 0xbe, 0x63, 0x96, 0xd2, 0xa6, 0xd5, 0xdd, 0x11, 0xa5, 0xd3,
- 0x59, 0xb1, 0xbd, 0xcb, 0x31, 0x30, 0x9c, 0x00, 0xa7, 0x65, 0x94, 0xef, 0x58, 0xc4, 0xe5, 0xdd,
- 0xba, 0x52, 0xe6, 0xb9, 0xb6, 0x43, 0xa9, 0xa8, 0x8c, 0x8a, 0x68, 0x1e, 0x96, 0x61, 0xd1, 0xab,
- 0x52, 0x93, 0x7b, 0x9a, 0x19, 0xe7, 0xc5, 0xb1, 0x3b, 0x09, 0x41, 0x28, 0x0a, 0x47, 0x42, 0x40,
- 0xf5, 0x83, 0x29, 0x71, 0xd3, 0x4b, 0xb9, 0x94, 0xa0, 0xc7, 0xe8, 0x0d, 0x87, 0x0d, 0x71, 0x4b,
- 0x96, 0x6e, 0x29, 0x8c, 0x8c, 0xc5, 0xb8, 0xf4, 0x3d, 0x2c, 0x97, 0xe9, 0x7b, 0x58, 0x3e, 0xc3,
- 0xf7, 0xb0, 0xc2, 0xc8, 0xef, 0x61, 0x09, 0x1b, 0x99, 0xcc, 0x60, 0x23, 0x89, 0x43, 0x9b, 0xba,
- 0x37, 0x87, 0x36, 0xb4, 0xe9, 0x5c, 0xbc, 0x8b, 0x4d, 0xe7, 0xd2, 0x27, 0x6a, 0x3a, 0x3f, 0x1f,
- 0x7d, 0x47, 0x9c, 0x66, 0x0a, 0x7e, 0x52, 0xfa, 0x8e, 0x78, 0xa7, 0x57, 0x7d, 0x24, 0xed, 0x5b,
- 0xa9, 0x7f, 0xd4, 0x21, 0x5e, 0xed, 0x25, 0xf9, 0x63, 0xe3, 0x2f, 0x95, 0xf0, 0x73, 0x88, 0x21,
- 0x2c, 0x8c, 0xf5, 0xc4, 0xcb, 0xf5, 0xad, 0xe3, 0x5f, 0x86, 0x6b, 0xab, 0x09, 0xc8, 0x20, 0xca,
- 0x3f, 0x96, 0xf8, 0x52, 0x62, 0xa4, 0x7f, 0xaf, 0x19, 0x58, 0xd4, 0x82, 0x07, 0x67, 0x86, 0xa2,
- 0xde, 0xd3, 0xde, 0xd8, 0xab, 0xe2, 0xa6, 0x1a, 0xf6, 0xaa, 0x37, 0x20, 0xaf, 0x13, 0x6b, 0x48,
- 0xdd, 0x39, 0xc4, 0x97, 0x93, 0x8d, 0xee, 0xa0, 0x99, 0xb9, 0xba, 0x7e, 0x05, 0x53, 0x0c, 0xf5,
- 0x07, 0x05, 0x51, 0x88, 0x44, 0xe1, 0xeb, 0x7f, 0x89, 0xe7, 0x13, 0x26, 0x9e, 0x44, 0x84, 0x28,
- 0xde, 0xfb, 0xb0, 0x5e, 0xba, 0xdb, 0x61, 0xfd, 0x5f, 0xe1, 0x9d, 0x92, 0x7d, 0x3f, 0x43, 0x0f,
- 0x4b, 0x06, 0xde, 0x2c, 0xf3, 0xb5, 0xe5, 0x5f, 0x20, 0x47, 0x81, 0xb5, 0x9f, 0x93, 0xad, 0x7d,
- 0x3a, 0xa5, 0x17, 0xf2, 0x0c, 0x4c, 0x91, 0xdd, 0x5d, 0xa2, 0xfb, 0x3c, 0x6e, 0x8b, 0xaf, 0xb5,
- 0x53, 0xeb, 0x8c, 0x7a, 0x87, 0x96, 0x99, 0xd1, 0x94, 0x01, 0x11, 0x73, 0x11, 0xf4, 0x32, 0x4c,
- 0xfb, 0x66, 0x9b, 0x34, 0x0c, 0x83, 0x18, 0xfc, 0x93, 0xc4, 0x38, 0x5f, 0xa9, 0x59, 0x67, 0x69,
- 0x5b, 0x00, 0xe0, 0x08, 0xeb, 0x52, 0xe9, 0x87, 0x3f, 0xad, 0x4e, 0xbc, 0xf9, 0xf7, 0xa5, 0x09,
- 0xf5, 0x67, 0x39, 0xe1, 0x0b, 0x91, 0xce, 0x3f, 0x6e, 0xe3, 0xcf, 0x41, 0xc9, 0xe9, 0x50, 0x5e,
- 0x47, 0xe4, 0xac, 0x0b, 0xa2, 0x8c, 0xbc, 0xc6, 0xe9, 0x77, 0x7a, 0xd5, 0x4a, 0x12, 0x56, 0x8c,
- 0xe1, 0x50, 0x3a, 0x52, 0x61, 0x3e, 0x93, 0x0a, 0x0b, 0xe3, 0xab, 0x70, 0x15, 0xe6, 0x23, 0xfb,
- 0x69, 0x11, 0xdd, 0xb1, 0x0d, 0x8f, 0xdb, 0x31, 0x4b, 0x29, 0xdb, 0xc9, 0x41, 0x3c, 0xc8, 0xaf,
- 0xfe, 0x46, 0x81, 0xf9, 0x81, 0x87, 0x13, 0xe8, 0x19, 0x98, 0x31, 0x69, 0x41, 0xbb, 0xab, 0xf1,
- 0x9b, 0x58, 0xa0, 0xaf, 0x33, 0x7c, 0x79, 0x33, 0x1b, 0xf2, 0x20, 0x8e, 0xf3, 0xa2, 0x07, 0x21,
- 0x6f, 0x76, 0x44, 0x0f, 0x97, 0x85, 0xa7, 0x8d, 0x2d, 0x0f, 0x53, 0x1a, 0x8d, 0x33, 0xfb, 0x9a,
- 0x6b, 0xdc, 0xd2, 0x5c, 0x7a, 0x5a, 0x2e, 0x4d, 0xd1, 0xf9, 0x78, 0x9c, 0x79, 0x2e, 0x3e, 0x8c,
- 0x93, 0xfc, 0xea, 0x2f, 0x14, 0x78, 0x30, 0xf5, 0x72, 0x96, 0xf9, 0x05, 0x8e, 0x06, 0xd0, 0xd1,
- 0x5c, 0xad, 0x4d, 0xf8, 0xad, 0xe3, 0x18, 0x2f, 0x56, 0xc2, 0x6b, 0xcd, 0x56, 0x08, 0x84, 0x25,
- 0x50, 0xf5, 0x27, 0x39, 0x98, 0x11, 0xf7, 0xd2, 0xa0, 0x7f, 0x77, 0xef, 0x1b, 0x39, 0x5b, 0xb1,
- 0x46, 0xce, 0xe8, 0x54, 0x12, 0x5b, 0x5b, 0x5a, 0x2b, 0x07, 0xdd, 0x80, 0x29, 0x8f, 0x3d, 0x6e,
- 0xca, 0xd4, 0x5e, 0x8f, 0x63, 0x32, 0xb9, 0xe8, 0x08, 0x82, 0xdf, 0x98, 0xe3, 0xa9, 0x7d, 0x05,
- 0x16, 0x63, 0xfc, 0x3c, 0x15, 0xbb, 0x98, 0xec, 0x12, 0x97, 0xd8, 0x3a, 0x41, 0x17, 0xa0, 0xa4,
- 0x75, 0xcc, 0x67, 0x5d, 0xa7, 0xdb, 0xe1, 0xe7, 0x19, 0x5e, 0xea, 0x1a, 0x5b, 0x1b, 0x8c, 0x8e,
- 0x43, 0x0e, 0xca, 0x2d, 0x16, 0xc4, 0xad, 0x4a, 0x6a, 0x79, 0x06, 0x74, 0x1c, 0x72, 0x84, 0x49,
- 0xb1, 0x90, 0x9a, 0x14, 0x9b, 0x90, 0xef, 0x9a, 0x06, 0xef, 0x3d, 0x3f, 0x29, 0x42, 0xc5, 0x4b,
- 0x59, 0xeb, 0x21, 0x2a, 0xac, 0xfe, 0x51, 0x81, 0xf9, 0xd8, 0x26, 0xef, 0x43, 0xb7, 0xe9, 0x5a,
- 0xbc, 0xdb, 0x74, 0x3e, 0xfb, 0x89, 0xa5, 0xf4, 0x9b, 0xf6, 0x13, 0x7b, 0x60, 0x0d, 0xa7, 0x56,
- 0xf2, 0xa1, 0xd4, 0x72, 0xd6, 0x6e, 0x6e, 0xfa, 0xeb, 0x28, 0xf5, 0xcf, 0x39, 0x38, 0x35, 0xc4,
- 0x86, 0xd0, 0xeb, 0x00, 0x51, 0xe2, 0xe6, 0xf3, 0x8d, 0x4e, 0xc0, 0x03, 0xdf, 0x52, 0xd8, 0x77,
- 0x6b, 0x89, 0x2a, 0x01, 0x22, 0x17, 0xca, 0x2e, 0xf1, 0x88, 0x7b, 0x48, 0x8c, 0xcb, 0x2c, 0xf0,
- 0x53, 0xbd, 0x3d, 0x93, 0x5d, 0x6f, 0x03, 0x96, 0x1b, 0xa5, 0x7b, 0x1c, 0xe1, 0x62, 0x79, 0x12,
- 0xf4, 0x7a, 0xa4, 0xbf, 0xe0, 0x3d, 0x5e, 0x3d, 0xcb, 0x7e, 0xe2, 0x0f, 0x0e, 0x47, 0x68, 0xf2,
- 0x6f, 0x0a, 0x9c, 0x89, 0xad, 0x71, 0x9b, 0xb4, 0x3b, 0x96, 0xe6, 0x93, 0xfb, 0x10, 0x85, 0x6e,
- 0xc4, 0xa2, 0xd0, 0x53, 0xd9, 0xf5, 0x28, 0xd6, 0x98, 0xda, 0x58, 0x7e, 0x4f, 0x81, 0x07, 0x87,
- 0x4a, 0xdc, 0x07, 0xb7, 0x7a, 0x39, 0xee, 0x56, 0xf5, 0xf1, 0xb7, 0x95, 0xe2, 0x5e, 0x7f, 0x4d,
- 0xdb, 0x14, 0xf3, 0xb3, 0xff, 0xc2, 0xa4, 0xa1, 0xfe, 0x56, 0x81, 0x13, 0x82, 0x93, 0x5e, 0xe2,
- 0x33, 0xdc, 0x40, 0xea, 0x00, 0xfc, 0x9d, 0xad, 0xf8, 0xd8, 0x92, 0x8f, 0x96, 0xfd, 0x6c, 0x38,
- 0x82, 0x25, 0x2e, 0xf4, 0x3c, 0x20, 0xb1, 0xc0, 0x96, 0x25, 0x5a, 0x82, 0x2c, 0xf4, 0xe7, 0x9b,
- 0x0b, 0x5c, 0x16, 0xe1, 0x01, 0x0e, 0x3c, 0x44, 0x4a, 0xfd, 0x93, 0x12, 0x65, 0x6b, 0x46, 0xfe,
- 0x94, 0x2a, 0x9e, 0xad, 0x2d, 0x55, 0xf1, 0x72, 0xba, 0x61, 0x9c, 0x9f, 0xd6, 0x74, 0xc3, 0x16,
- 0x97, 0xe2, 0x0f, 0x7f, 0x29, 0x24, 0x36, 0xc1, 0xfc, 0x20, 0x6b, 0x65, 0xf7, 0x82, 0xf4, 0xb6,
- 0xba, 0x5c, 0x7f, 0x2c, 0xd3, 0x6a, 0xa8, 0x8d, 0x0e, 0x6d, 0x3b, 0x5d, 0x90, 0xde, 0x56, 0x26,
- 0x4a, 0x8a, 0x0c, 0xef, 0x2b, 0x0b, 0x77, 0xe9, 0x7d, 0xe5, 0x05, 0xe9, 0x7d, 0x65, 0xd0, 0xd1,
- 0x8a, 0xca, 0xa0, 0xc1, 0x37, 0x96, 0x9b, 0x51, 0x62, 0x09, 0x7a, 0x59, 0xe7, 0x32, 0x24, 0xe6,
- 0x11, 0x2f, 0x96, 0x31, 0x9c, 0xed, 0x10, 0x37, 0x20, 0x47, 0x8b, 0xa4, 0x5e, 0x5a, 0x64, 0x6b,
- 0x59, 0xe8, 0xf7, 0xaa, 0x67, 0xb7, 0x86, 0x72, 0xe0, 0x14, 0x49, 0xb4, 0x07, 0xb3, 0xac, 0x5b,
- 0x64, 0x84, 0xcf, 0x65, 0x83, 0x5e, 0xe9, 0xa3, 0x19, 0x1f, 0xe5, 0x44, 0x1d, 0xd9, 0x56, 0x0c,
- 0x06, 0x27, 0x60, 0x9b, 0x8d, 0x77, 0x3e, 0x5c, 0x9c, 0x78, 0xf7, 0xc3, 0xc5, 0x89, 0xf7, 0x3f,
- 0x5c, 0x9c, 0x78, 0xb3, 0xbf, 0xa8, 0xbc, 0xd3, 0x5f, 0x54, 0xde, 0xed, 0x2f, 0x2a, 0xef, 0xf7,
- 0x17, 0x95, 0x7f, 0xf4, 0x17, 0x95, 0xef, 0x7f, 0xb4, 0x38, 0xf1, 0xca, 0x43, 0x23, 0xfe, 0x87,
- 0xe2, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x31, 0x43, 0xa8, 0x61, 0x31, 0x00, 0x00,
-}
+func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1764,7 +383,7 @@ func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
baseI := i
@@ -1793,7 +412,7 @@ func (m *BasicDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ sort.Strings(keysForAttributes)
for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
baseI := i
@@ -1993,7 +612,7 @@ func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
v := m.Requests[QualifiedName(keysForRequests[iNdEx])]
baseI := i
@@ -2078,7 +697,7 @@ func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Counters {
keysForCounters = append(keysForCounters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Counters[string(keysForCounters[iNdEx])]
baseI := i
@@ -2739,7 +1358,7 @@ func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error
for k := range m.Counters {
keysForCounters = append(keysForCounters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Counters[string(keysForCounters[iNdEx])]
baseI := i
@@ -2901,7 +1520,7 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int,
for k := range m.ConsumedCapacity {
keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+ sort.Strings(keysForConsumedCapacity)
for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])]
baseI := i
@@ -4853,7 +3472,7 @@ func (this *BasicDevice) String() string {
for k := range this.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ sort.Strings(keysForAttributes)
mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
for _, k := range keysForAttributes {
mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
@@ -4863,7 +3482,7 @@ func (this *BasicDevice) String() string {
for k := range this.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
mapStringForCapacity := "map[QualifiedName]DeviceCapacity{"
for _, k := range keysForCapacity {
mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
@@ -4932,7 +3551,7 @@ func (this *CapacityRequirements) String() string {
for k := range this.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
mapStringForRequests := "map[QualifiedName]resource.Quantity{"
for _, k := range keysForRequests {
mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)])
@@ -4962,7 +3581,7 @@ func (this *CounterSet) String() string {
for k := range this.Counters {
keysForCounters = append(keysForCounters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
mapStringForCounters := "map[string]Counter{"
for _, k := range keysForCounters {
mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
@@ -5170,7 +3789,7 @@ func (this *DeviceCounterConsumption) String() string {
for k := range this.Counters {
keysForCounters = append(keysForCounters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
mapStringForCounters := "map[string]Counter{"
for _, k := range keysForCounters {
mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
@@ -5229,7 +3848,7 @@ func (this *DeviceRequestAllocationResult) String() string {
for k := range this.ConsumedCapacity {
keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+ sort.Strings(keysForConsumedCapacity)
mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{"
for _, k := range keysForConsumedCapacity {
mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)])
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/generated.proto b/operator/vendor/k8s.io/api/resource/v1beta1/generated.proto
index 6ce65b4d..fe2397a8 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/generated.proto
@@ -41,7 +41,7 @@ message AllocatedDeviceStatus {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
optional string driver = 1;
@@ -65,6 +65,8 @@ message AllocatedDeviceStatus {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
optional string shareID = 7;
// Conditions contains the latest observation of the device's state.
@@ -88,6 +90,7 @@ message AllocatedDeviceStatus {
// NetworkData contains network-related information specific to the device.
//
// +optional
+ // +k8s:optional
optional NetworkDeviceData networkData = 6;
}
@@ -139,14 +142,17 @@ message BasicDevice {
//
// There can only be a single entry per counterSet.
//
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
+ // The maximum number of device counter consumptions per
+ // device is 2.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=counterSet
// +featureGate=DRAPartitionableDevices
+ // +k8s:maxItems=2
repeated DeviceCounterConsumption consumesCounters = 3;
// NodeName identifies the node where the device is available.
@@ -182,7 +188,9 @@ message BasicDevice {
// If specified, these are the driver-defined taints.
//
- // The maximum number of taints is 4.
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
//
// This is an alpha field and requires enabling the DRADeviceTaints
// feature gate.
@@ -219,6 +227,8 @@ message BasicDevice {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingConditions = 9;
// BindingFailureConditions defines the conditions for binding failure.
@@ -235,6 +245,8 @@ message BasicDevice {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingFailureConditions = 10;
// AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
@@ -425,7 +437,7 @@ message Counter {
// CounterSet defines a named set of counters
// that are available to be used by devices defined in the
-// ResourceSlice.
+// ResourcePool.
//
// The counters are not allocatable by themselves, but
// can be referenced by devices. When a device is allocated,
@@ -436,6 +448,8 @@ message CounterSet {
// It must be a DNS label.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
optional string name = 1;
// Counters defines the set of counters for this CounterSet
@@ -470,6 +484,7 @@ message DeviceAllocationConfiguration {
// or from a claim.
//
// +required
+ // +k8s:required
optional string source = 1;
// Requests lists the names of requests where the configuration applies.
@@ -481,6 +496,10 @@ message DeviceAllocationConfiguration {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 2;
optional DeviceConfiguration deviceConfiguration = 3;
@@ -492,6 +511,8 @@ message DeviceAllocationResult {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceRequestAllocationResult results = 1;
// This field is a combination of all the claim and class configuration parameters.
@@ -504,6 +525,8 @@ message DeviceAllocationResult {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=64
repeated DeviceAllocationConfiguration config = 2;
}
@@ -512,26 +535,30 @@ message DeviceAttribute {
// IntValue is a number.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional int64 int = 2;
// BoolValue is a true/false value.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional bool bool = 3;
// StringValue is a string. Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional string string = 4;
// VersionValue is a semantic version according to semver.org spec 2.0.0.
// Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional string version = 5;
}
@@ -568,6 +595,11 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=32
repeated DeviceRequest requests = 1;
// These constraints must be satisfied by the set of devices that get
@@ -575,6 +607,8 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceConstraint constraints = 2;
// This field holds configuration for multiple potential drivers which
@@ -583,6 +617,8 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceClaimConfiguration config = 3;
}
@@ -597,6 +633,10 @@ message DeviceClaimConfiguration {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 1;
optional DeviceConfiguration deviceConfiguration = 2;
@@ -612,6 +652,8 @@ message DeviceClaimConfiguration {
message DeviceClass {
// Standard object metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines what can be allocated and how to configure it.
@@ -647,6 +689,8 @@ message DeviceClassSpec {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 1;
// Config defines configuration parameters that apply to each device that is claimed via this class.
@@ -657,6 +701,8 @@ message DeviceClassSpec {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceClassConfiguration config = 2;
// ExtendedResourceName is the extended resource name for the devices of this class.
@@ -671,6 +717,8 @@ message DeviceClassSpec {
// This is an alpha field.
// +optional
// +featureGate=DRAExtendedResource
+ // +k8s:optional
+ // +k8s:format=k8s-extended-resource-name
optional string extendedResourceName = 4;
}
@@ -682,6 +730,7 @@ message DeviceConfiguration {
//
// +optional
// +oneOf=ConfigurationType
+ // +k8s:optional
optional OpaqueDeviceConfiguration opaque = 1;
}
@@ -699,6 +748,10 @@ message DeviceConstraint {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 1;
// MatchAttribute requires that all devices in question have this
@@ -716,6 +769,8 @@ message DeviceConstraint {
//
// +optional
// +oneOf=ConstraintType
+ // +k8s:optional
+ // +k8s:format=k8s-resource-fully-qualified-name
optional string matchAttribute = 2;
// DistinctAttribute requires that all devices in question have this
@@ -742,14 +797,13 @@ message DeviceCounterConsumption {
// counters defined will be consumed.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
optional string counterSet = 1;
// Counters defines the counters that will be consumed by the device.
//
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
+ // The maximum number of counters is 32.
//
// +required
map counters = 2;
@@ -797,6 +851,8 @@ message DeviceRequest {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 3;
// AllocationMode and its related fields define how devices are allocated
@@ -822,6 +878,7 @@ message DeviceRequest {
// requests with unknown modes.
//
// +optional
+ // +k8s:optional
optional string allocationMode = 4;
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
@@ -871,6 +928,11 @@ message DeviceRequest {
// +oneOf=deviceRequestType
// +listType=atomic
// +featureGate=DRAPrioritizedList
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=8
repeated DeviceSubRequest firstAvailable = 7;
// If specified, the request's tolerations.
@@ -935,9 +997,11 @@ message DeviceRequestAllocationResult {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:format=k8s-long-name-caseless
+ // +k8s:required
optional string driver = 2;
// This name together with the driver name and the device name field
@@ -947,6 +1011,8 @@ message DeviceRequestAllocationResult {
// DNS sub-domains separated by slashes.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-resource-pool-name
optional string pool = 3;
// Device references one device instance via its name in the driver's
@@ -978,6 +1044,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceTaints
+ // +k8s:optional
+ // +k8s:maxItems=16
repeated DeviceToleration tolerations = 6;
// BindingConditions contains a copy of the BindingConditions
@@ -989,6 +1057,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingConditions = 7;
// BindingFailureConditions contains a copy of the BindingFailureConditions
@@ -1000,6 +1070,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingFailureConditions = 8;
// ShareID uniquely identifies an individual allocation share of the device,
@@ -1009,6 +1081,8 @@ message DeviceRequestAllocationResult {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
optional string shareID = 9;
// ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
@@ -1066,6 +1140,8 @@ message DeviceSubRequest {
// to reference.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name
optional string deviceClassName = 2;
// Selectors define criteria which must be satisfied by a specific
@@ -1075,6 +1151,7 @@ message DeviceSubRequest {
//
// +optional
// +listType=atomic
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 3;
// AllocationMode and its related fields define how devices are allocated
@@ -1166,10 +1243,13 @@ message DeviceTaint {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
+ // +k8s:required
optional string effect = 3;
// TimeAdded represents the time at which the taint was added.
@@ -1187,6 +1267,8 @@ message DeviceToleration {
// Must be a label name.
//
// +optional
+ // +k8s:optional
+ // +k8s:format=k8s-label-key
optional string key = 1;
// Operator represents a key's relationship to the value.
@@ -1233,6 +1315,8 @@ message NetworkDeviceData {
// Must not be longer than 256 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=256
optional string interfaceName = 1;
// IPs lists the network addresses assigned to the device's network interface.
@@ -1245,6 +1329,10 @@ message NetworkDeviceData {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=16
repeated string ips = 2;
// HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
@@ -1252,6 +1340,8 @@ message NetworkDeviceData {
// Must not be longer than 128 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=128
optional string hardwareAddress = 3;
}
@@ -1265,9 +1355,11 @@ message OpaqueDeviceConfiguration {
// to decide whether it needs to validate them.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name-caseless
optional string driver = 1;
// Parameters can contain arbitrary data. It is the responsibility of
@@ -1296,6 +1388,7 @@ message ResourceClaim {
// Spec describes what is being requested and how to configure it.
// The spec is immutable.
+ // +k8s:immutable
optional ResourceClaimSpec spec = 2;
// Status describes whether the claim is ready to use and what has been allocated.
@@ -1350,6 +1443,8 @@ message ResourceClaimStatus {
// Allocation is set once the claim has been allocated successfully.
//
// +optional
+ // +k8s:optional
+ // +k8s:update=NoModify
optional AllocationResult allocation = 1;
// ReservedFor indicates which entities are currently allowed to use
@@ -1377,6 +1472,10 @@ message ResourceClaimStatus {
// +listMapKey=uid
// +patchStrategy=merge
// +patchMergeKey=uid
+ // +k8s:optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=uid
+ // +k8s:maxItems=256
repeated ResourceClaimConsumerReference reservedFor = 2;
// Devices contains the status of each device allocated for this
@@ -1384,12 +1483,18 @@ message ResourceClaimStatus {
// information. Entries are owned by their respective drivers.
//
// +optional
+ // +k8s:optional
// +listType=map
// +listMapKey=driver
// +listMapKey=device
// +listMapKey=pool
// +listMapKey=shareID
// +featureGate=DRAResourceClaimDeviceStatus
+ // +k8s:listType=map
+ // +k8s:listMapKey=driver
+ // +k8s:listMapKey=device
+ // +k8s:listMapKey=pool
+ // +k8s:listMapKey=shareID
repeated AllocatedDeviceStatus devices = 4;
}
@@ -1523,7 +1628,8 @@ message ResourceSliceSpec {
// objects with a certain driver name.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver. This field is immutable.
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
//
// +required
optional string driver = 1;
@@ -1570,10 +1676,14 @@ message ResourceSliceSpec {
// Devices lists some or all of the devices in this pool.
//
- // Must not have more than 128 entries.
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +zeroOrOneOf=ResourceSliceType
repeated Device devices = 6;
// PerDeviceNodeSelection defines whether the access from nodes to
@@ -1591,13 +1701,21 @@ message ResourceSliceSpec {
// SharedCounters defines a list of counter sets, each of which
// has a name and a list of counters available.
//
- // The names of the SharedCounters must be unique in the ResourceSlice.
+ // The names of the counter sets must be unique in the ResourcePool.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
- // The maximum number of SharedCounters is 32.
+ // The maximum number of counter sets is 8.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
// +featureGate=DRAPartitionableDevices
+ // +zeroOrOneOf=ResourceSliceType
+ // +k8s:maxItems=8
repeated CounterSet sharedCounters = 8;
}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/resource/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..e47160fb
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,108 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*AllocatedDeviceStatus) ProtoMessage() {}
+
+func (*AllocationResult) ProtoMessage() {}
+
+func (*BasicDevice) ProtoMessage() {}
+
+func (*CELDeviceSelector) ProtoMessage() {}
+
+func (*CapacityRequestPolicy) ProtoMessage() {}
+
+func (*CapacityRequestPolicyRange) ProtoMessage() {}
+
+func (*CapacityRequirements) ProtoMessage() {}
+
+func (*Counter) ProtoMessage() {}
+
+func (*CounterSet) ProtoMessage() {}
+
+func (*Device) ProtoMessage() {}
+
+func (*DeviceAllocationConfiguration) ProtoMessage() {}
+
+func (*DeviceAllocationResult) ProtoMessage() {}
+
+func (*DeviceAttribute) ProtoMessage() {}
+
+func (*DeviceCapacity) ProtoMessage() {}
+
+func (*DeviceClaim) ProtoMessage() {}
+
+func (*DeviceClaimConfiguration) ProtoMessage() {}
+
+func (*DeviceClass) ProtoMessage() {}
+
+func (*DeviceClassConfiguration) ProtoMessage() {}
+
+func (*DeviceClassList) ProtoMessage() {}
+
+func (*DeviceClassSpec) ProtoMessage() {}
+
+func (*DeviceConfiguration) ProtoMessage() {}
+
+func (*DeviceConstraint) ProtoMessage() {}
+
+func (*DeviceCounterConsumption) ProtoMessage() {}
+
+func (*DeviceRequest) ProtoMessage() {}
+
+func (*DeviceRequestAllocationResult) ProtoMessage() {}
+
+func (*DeviceSelector) ProtoMessage() {}
+
+func (*DeviceSubRequest) ProtoMessage() {}
+
+func (*DeviceTaint) ProtoMessage() {}
+
+func (*DeviceToleration) ProtoMessage() {}
+
+func (*NetworkDeviceData) ProtoMessage() {}
+
+func (*OpaqueDeviceConfiguration) ProtoMessage() {}
+
+func (*ResourceClaim) ProtoMessage() {}
+
+func (*ResourceClaimConsumerReference) ProtoMessage() {}
+
+func (*ResourceClaimList) ProtoMessage() {}
+
+func (*ResourceClaimSpec) ProtoMessage() {}
+
+func (*ResourceClaimStatus) ProtoMessage() {}
+
+func (*ResourceClaimTemplate) ProtoMessage() {}
+
+func (*ResourceClaimTemplateList) ProtoMessage() {}
+
+func (*ResourceClaimTemplateSpec) ProtoMessage() {}
+
+func (*ResourcePool) ProtoMessage() {}
+
+func (*ResourceSlice) ProtoMessage() {}
+
+func (*ResourceSliceList) ProtoMessage() {}
+
+func (*ResourceSliceSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/types.go b/operator/vendor/k8s.io/api/resource/v1beta1/types.go
index 27967f38..c55e2e92 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/types.go
@@ -101,7 +101,8 @@ type ResourceSliceSpec struct {
// objects with a certain driver name.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver. This field is immutable.
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
//
// +required
Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
@@ -148,11 +149,15 @@ type ResourceSliceSpec struct {
// Devices lists some or all of the devices in this pool.
//
- // Must not have more than 128 entries.
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
// +optional
// +listType=atomic
- Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"`
+ // +k8s:optional
+ // +zeroOrOneOf=ResourceSliceType
+ Devices []Device `json:"devices,omitempty" protobuf:"bytes,6,name=devices"`
// PerDeviceNodeSelection defines whether the access from nodes to
// resources in the pool is set on the ResourceSlice level or on each
@@ -169,19 +174,27 @@ type ResourceSliceSpec struct {
// SharedCounters defines a list of counter sets, each of which
// has a name and a list of counters available.
//
- // The names of the SharedCounters must be unique in the ResourceSlice.
+ // The names of the counter sets must be unique in the ResourcePool.
//
- // The maximum number of SharedCounters is 32.
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ //
+ // The maximum number of counter sets is 8.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
// +featureGate=DRAPartitionableDevices
+ // +zeroOrOneOf=ResourceSliceType
+ // +k8s:maxItems=8
SharedCounters []CounterSet `json:"sharedCounters,omitempty" protobuf:"bytes,8,name=sharedCounters"`
}
// CounterSet defines a named set of counters
// that are available to be used by devices defined in the
-// ResourceSlice.
+// ResourcePool.
//
// The counters are not allocatable by themselves, but
// can be referenced by devices. When a device is allocated,
@@ -192,6 +205,8 @@ type CounterSet struct {
// It must be a DNS label.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
Name string `json:"name" protobuf:"bytes,1,name=name"`
// Counters defines the set of counters for this CounterSet
@@ -254,10 +269,28 @@ type ResourcePool struct {
const ResourceSliceMaxSharedCapacity = 128
const ResourceSliceMaxDevices = 128
+const ResourceSliceMaxDevicesWithTaintsOrConsumesCounters = 64
const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name.
const BindingConditionsMaxSize = 4
const BindingFailureConditionsMaxSize = 4
+// Defines the maximum number of counter sets (through the
+// SharedCounters field) that can be defined in a ResourceSlice.
+const ResourceSliceMaxCounterSets = 8
+
+// Defines the maximum number of counters that can be defined
+// in a counter set.
+const ResourceSliceMaxCountersPerCounterSet = 32
+
+// Defines the maximum number of device counter consumptions
+// (through the ConsumesCounters field) that can be defined per
+// device.
+const ResourceSliceMaxDeviceCounterConsumptionsPerDevice = 2
+
+// Defines the maximum number of counters that can be defined
+// per device counter consumption.
+const ResourceSliceMaxCountersPerDeviceCounterConsumption = 32
+
// Device represents one individual hardware instance that can be selected based
// on its attributes. Besides the name, exactly one field must be set.
type Device struct {
@@ -298,14 +331,17 @@ type BasicDevice struct {
//
// There can only be a single entry per counterSet.
//
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
+ // The maximum number of device counter consumptions per
+ // device is 2.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=counterSet
// +featureGate=DRAPartitionableDevices
+ // +k8s:maxItems=2
ConsumesCounters []DeviceCounterConsumption `json:"consumesCounters,omitempty" protobuf:"bytes,3,rep,name=consumesCounters"`
// NodeName identifies the node where the device is available.
@@ -341,7 +377,9 @@ type BasicDevice struct {
// If specified, these are the driver-defined taints.
//
- // The maximum number of taints is 4.
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
//
// This is an alpha field and requires enabling the DRADeviceTaints
// feature gate.
@@ -378,6 +416,8 @@ type BasicDevice struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingConditions []string `json:"bindingConditions,omitempty" protobuf:"bytes,9,rep,name=bindingConditions"`
// BindingFailureConditions defines the conditions for binding failure.
@@ -394,6 +434,8 @@ type BasicDevice struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingFailureConditions []string `json:"bindingFailureConditions,omitempty" protobuf:"bytes,10,rep,name=bindingFailureConditions"`
// AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
@@ -413,14 +455,13 @@ type DeviceCounterConsumption struct {
// counters defined will be consumed.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
CounterSet string `json:"counterSet" protobuf:"bytes,1,opt,name=counterSet"`
// Counters defines the counters that will be consumed by the device.
//
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
+ // The maximum number of counters is 32.
//
// +required
Counters map[string]Counter `json:"counters,omitempty" protobuf:"bytes,2,opt,name=counters"`
@@ -535,14 +576,6 @@ type CapacityRequestPolicyRange struct {
// Limit for the sum of the number of entries in both attributes and capacity.
const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32
-// Limit for the total number of counters in each device.
-const ResourceSliceMaxCountersPerDevice = 32
-
-// Limit for the total number of counters defined in devices in
-// a ResourceSlice. We want to allow up to 64 devices to specify
-// up to 16 counters, so the limit for the ResourceSlice will be 1024.
-const ResourceSliceMaxDeviceCountersPerSlice = 1024 // 64 * 16
-
// QualifiedName is the name of a device attribute or capacity.
//
// Attributes and capacities are defined either by the owner of the specific
@@ -562,6 +595,9 @@ const ResourceSliceMaxDeviceCountersPerSlice = 1024 // 64 * 16
type QualifiedName string
// FullyQualifiedName is a QualifiedName where the domain is set.
+// Format validation cannot be added to this type because one of its usages,
+// DistinctAttribute, is validated conditionally. This conditional validation
+// cannot be expressed declaratively.
type FullyQualifiedName string
// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name.
@@ -579,34 +615,38 @@ type DeviceAttribute struct {
// IntValue is a number.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"`
// BoolValue is a true/false value.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"`
// StringValue is a string. Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"`
// VersionValue is a semantic version according to semver.org spec 2.0.0.
// Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"`
}
// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value.
const DeviceAttributeMaxValueLength = 64
-// DeviceTaintsMaxLength is the maximum number of taints per device.
-const DeviceTaintsMaxLength = 4
+// DeviceTaintsMaxLength is the maximum number of taints per Device.
+const DeviceTaintsMaxLength = 16
// The device this taint is attached to has the "effect" on
// any claim which does not tolerate the taint and, through the claim,
@@ -628,16 +668,27 @@ type DeviceTaint struct {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
+ // +k8s:required
Effect DeviceTaintEffect `json:"effect" protobuf:"bytes,3,name=effect,casttype=DeviceTaintEffect"`
// ^^^^
//
// Implementing PreferNoSchedule would depend on a scoring solution for DRA.
// It might get added as part of that.
+ //
+ // A possible future new effect is NoExecuteWithPodDisruptionBudget:
+ // honor the pod disruption budget instead of simply deleting pods.
+ // This is currently undecided, it could also be a separate field.
+ //
+ // Validation must be prepared to allow unknown enums in stored objects,
+ // which will enable adding new enums within a single release without
+ // ratcheting.
// TimeAdded represents the time at which the taint was added.
// Added automatically during create or update if not set.
@@ -653,9 +704,13 @@ type DeviceTaint struct {
}
// +enum
+// +k8s:enum
type DeviceTaintEffect string
const (
+ // No effect, the taint is purely informational.
+ DeviceTaintEffectNone DeviceTaintEffect = "None"
+
// Do not allow new pods to schedule which use a tainted device unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
@@ -682,6 +737,7 @@ type ResourceSliceList struct {
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.32
+// +k8s:supportsSubresource=/status
// ResourceClaim describes a request for access to resources in the cluster,
// for use by workloads. For example, if a workload needs an accelerator device
@@ -699,6 +755,7 @@ type ResourceClaim struct {
// Spec describes what is being requested and how to configure it.
// The spec is immutable.
+ // +k8s:immutable
Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
// Status describes whether the claim is ready to use and what has been allocated.
@@ -726,6 +783,11 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=32
Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"`
// These constraints must be satisfied by the set of devices that get
@@ -733,6 +795,8 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"`
// This field holds configuration for multiple potential drivers which
@@ -741,6 +805,8 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"`
// Potential future extension, ignored by older schedulers. This is
@@ -807,6 +873,8 @@ type DeviceRequest struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"`
// AllocationMode and its related fields define how devices are allocated
@@ -832,6 +900,7 @@ type DeviceRequest struct {
// requests with unknown modes.
//
// +optional
+ // +k8s:optional
AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,4,opt,name=allocationMode"`
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
@@ -881,6 +950,11 @@ type DeviceRequest struct {
// +oneOf=deviceRequestType
// +listType=atomic
// +featureGate=DRAPrioritizedList
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=8
FirstAvailable []DeviceSubRequest `json:"firstAvailable,omitempty" protobuf:"bytes,7,name=firstAvailable"`
// If specified, the request's tolerations.
@@ -959,6 +1033,8 @@ type DeviceSubRequest struct {
// to reference.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name
DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"`
// Selectors define criteria which must be satisfied by a specific
@@ -968,6 +1044,7 @@ type DeviceSubRequest struct {
//
// +optional
// +listType=atomic
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"`
// AllocationMode and its related fields define how devices are allocated
@@ -1074,6 +1151,8 @@ const (
DeviceTolerationsMaxLength = 16
)
+// +enum
+// +k8s:enum
type DeviceAllocationMode string
// Valid [DeviceRequest.CountMode] values.
@@ -1192,6 +1271,10 @@ type DeviceConstraint struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
// MatchAttribute requires that all devices in question have this
@@ -1209,6 +1292,8 @@ type DeviceConstraint struct {
//
// +optional
// +oneOf=ConstraintType
+ // +k8s:optional
+ // +k8s:format=k8s-resource-fully-qualified-name
MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"`
// Potential future extension, not part of the current design:
@@ -1249,6 +1334,10 @@ type DeviceClaimConfiguration struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"`
@@ -1262,6 +1351,7 @@ type DeviceConfiguration struct {
//
// +optional
// +oneOf=ConfigurationType
+ // +k8s:optional
Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"`
}
@@ -1275,9 +1365,11 @@ type OpaqueDeviceConfiguration struct {
// to decide whether it needs to validate them.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name-caseless
Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
// Parameters can contain arbitrary data. It is the responsibility of
@@ -1303,6 +1395,8 @@ type DeviceToleration struct {
// Must be a label name.
//
// +optional
+ // +k8s:optional
+ // +k8s:format=k8s-label-key
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
@@ -1341,6 +1435,7 @@ type DeviceToleration struct {
// A toleration operator is the set of operators that can be used in a toleration.
//
// +enum
+// +k8s:enum
type DeviceTolerationOperator string
const (
@@ -1354,6 +1449,8 @@ type ResourceClaimStatus struct {
// Allocation is set once the claim has been allocated successfully.
//
// +optional
+ // +k8s:optional
+ // +k8s:update=NoModify
Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"`
// ReservedFor indicates which entities are currently allowed to use
@@ -1381,6 +1478,10 @@ type ResourceClaimStatus struct {
// +listMapKey=uid
// +patchStrategy=merge
// +patchMergeKey=uid
+ // +k8s:optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=uid
+ // +k8s:maxItems=256
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
// DeallocationRequested is tombstoned since Kubernetes 1.32 where
@@ -1393,12 +1494,18 @@ type ResourceClaimStatus struct {
// information. Entries are owned by their respective drivers.
//
// +optional
+ // +k8s:optional
// +listType=map
// +listMapKey=driver
// +listMapKey=device
// +listMapKey=pool
// +listMapKey=shareID
// +featureGate=DRAResourceClaimDeviceStatus
+ // +k8s:listType=map
+ // +k8s:listMapKey=driver
+ // +k8s:listMapKey=device
+ // +k8s:listMapKey=pool
+ // +k8s:listMapKey=shareID
Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
}
@@ -1461,6 +1568,8 @@ type DeviceAllocationResult struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"`
// This field is a combination of all the claim and class configuration parameters.
@@ -1473,6 +1582,8 @@ type DeviceAllocationResult struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=64
Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
}
@@ -1498,9 +1609,11 @@ type DeviceRequestAllocationResult struct {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:format=k8s-long-name-caseless
+ // +k8s:required
Driver string `json:"driver" protobuf:"bytes,2,name=driver"`
// This name together with the driver name and the device name field
@@ -1510,6 +1623,8 @@ type DeviceRequestAllocationResult struct {
// DNS sub-domains separated by slashes.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-resource-pool-name
Pool string `json:"pool" protobuf:"bytes,3,name=pool"`
// Device references one device instance via its name in the driver's
@@ -1541,6 +1656,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceTaints
+ // +k8s:optional
+ // +k8s:maxItems=16
Tolerations []DeviceToleration `json:"tolerations,omitempty" protobuf:"bytes,6,opt,name=tolerations"`
// BindingConditions contains a copy of the BindingConditions
@@ -1552,6 +1669,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingConditions []string `json:"bindingConditions,omitempty" protobuf:"bytes,7,rep,name=bindingConditions"`
// BindingFailureConditions contains a copy of the BindingFailureConditions
@@ -1563,6 +1682,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingFailureConditions []string `json:"bindingFailureConditions,omitempty" protobuf:"bytes,8,rep,name=bindingFailureConditions"`
// ShareID uniquely identifies an individual allocation share of the device,
@@ -1572,6 +1693,8 @@ type DeviceRequestAllocationResult struct {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
ShareID *types.UID `json:"shareID,omitempty" protobuf:"bytes,9,opt,name=shareID"`
// ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
@@ -1595,6 +1718,7 @@ type DeviceAllocationConfiguration struct {
// or from a claim.
//
// +required
+ // +k8s:required
Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"`
// Requests lists the names of requests where the configuration applies.
@@ -1606,17 +1730,23 @@ type DeviceAllocationConfiguration struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"`
DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"`
}
+// +enum
+// +k8s:enum
type AllocationConfigSource string
// Valid [DeviceAllocationConfiguration.Source] values.
const (
- AllocationConfigSourceClass = "FromClass"
- AllocationConfigSourceClaim = "FromClaim"
+ AllocationConfigSourceClass AllocationConfigSource = "FromClass"
+ AllocationConfigSourceClaim AllocationConfigSource = "FromClaim"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -1649,6 +1779,8 @@ type DeviceClass struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines what can be allocated and how to configure it.
@@ -1669,6 +1801,8 @@ type DeviceClassSpec struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"`
// Config defines configuration parameters that apply to each device that is claimed via this class.
@@ -1679,6 +1813,8 @@ type DeviceClassSpec struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
// SuitableNodes is tombstoned since Kubernetes 1.32 where
@@ -1698,6 +1834,8 @@ type DeviceClassSpec struct {
// This is an alpha field.
// +optional
// +featureGate=DRAExtendedResource
+ // +k8s:optional
+ // +k8s:format=k8s-extended-resource-name
ExtendedResourceName *string `json:"extendedResourceName,omitempty" protobuf:"bytes,4,opt,name=extendedResourceName"`
}
@@ -1799,7 +1937,7 @@ type AllocatedDeviceStatus struct {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"`
@@ -1823,6 +1961,8 @@ type AllocatedDeviceStatus struct {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
ShareID *string `json:"shareID,omitempty" protobuf:"bytes,7,opt,name=shareID"`
// Conditions contains the latest observation of the device's state.
@@ -1846,6 +1986,7 @@ type AllocatedDeviceStatus struct {
// NetworkData contains network-related information specific to the device.
//
// +optional
+ // +k8s:optional
NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"`
}
@@ -1860,6 +2001,8 @@ type NetworkDeviceData struct {
// Must not be longer than 256 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=256
InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"`
// IPs lists the network addresses assigned to the device's network interface.
@@ -1872,6 +2015,10 @@ type NetworkDeviceData struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=16
IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"`
// HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
@@ -1879,5 +2026,7 @@ type NetworkDeviceData struct {
// Must not be longer than 128 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=128
HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"`
}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go
index 473fbb95..070536e6 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_AllocatedDeviceStatus = map[string]string{
"": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.\n\nThe combination of Driver, Pool, Device, and ShareID must match the corresponding key in Status.Allocation.Devices.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
"shareID": "ShareID uniquely identifies an individual allocation share of the device.",
@@ -57,11 +57,11 @@ var map_BasicDevice = map[string]string{
"": "BasicDevice defines one device instance.",
"attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
"capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
- "consumesCounters": "ConsumesCounters defines a list of references to sharedCounters and the set of counters that the device will consume from those counter sets.\n\nThere can only be a single entry per counterSet.\n\nThe total number of device counter consumption entries must be <= 32. In addition, the total number in the entire ResourceSlice must be <= 1024 (for example, 64 devices with 16 counters each).",
+ "consumesCounters": "ConsumesCounters defines a list of references to sharedCounters and the set of counters that the device will consume from those counter sets.\n\nThere can only be a single entry per counterSet.\n\nThe maximum number of device counter consumptions per device is 2.",
"nodeName": "NodeName identifies the node where the device is available.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
"nodeSelector": "NodeSelector defines the nodes where the device is available.\n\nMust use exactly one term.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
"allNodes": "AllNodes indicates that all nodes have access to the device.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
- "taints": "If specified, these are the driver-defined taints.\n\nThe maximum number of taints is 4.\n\nThis is an alpha field and requires enabling the DRADeviceTaints feature gate.",
+ "taints": "If specified, these are the driver-defined taints.\n\nThe maximum number of taints is 16. If taints are set for any device in a ResourceSlice, then the maximum number of allowed devices per ResourceSlice is 64 instead of 128.\n\nThis is an alpha field and requires enabling the DRADeviceTaints feature gate.",
"bindsToNode": "BindsToNode indicates if the usage of an allocation involving this device has to be limited to exactly the node that was chosen when allocating the claim. If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector to match the node where the allocation was made.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
"bindingConditions": "BindingConditions defines the conditions for proceeding with binding. All of these conditions must be set in the per-device status conditions with a value of True to proceed with binding the pod to the node while scheduling the pod.\n\nThe maximum number of binding conditions is 4.\n\nThe conditions must be a valid condition type string.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
"bindingFailureConditions": "BindingFailureConditions defines the conditions for binding failure. They may be set in the per-device status conditions. If any is true, a binding failure occurred.\n\nThe maximum number of binding failure conditions is 4.\n\nThe conditions must be a valid condition type string.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
@@ -122,7 +122,7 @@ func (Counter) SwaggerDoc() map[string]string {
}
var map_CounterSet = map[string]string{
- "": "CounterSet defines a named set of counters that are available to be used by devices defined in the ResourceSlice.\n\nThe counters are not allocatable by themselves, but can be referenced by devices. When a device is allocated, the portion of counters it uses will no longer be available for use by other devices.",
+ "": "CounterSet defines a named set of counters that are available to be used by devices defined in the ResourcePool.\n\nThe counters are not allocatable by themselves, but can be referenced by devices. When a device is allocated, the portion of counters it uses will no longer be available for use by other devices.",
"name": "Name defines the name of the counter set. It must be a DNS label.",
"counters": "Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label.\n\nThe maximum number of counters is 32.",
}
@@ -265,7 +265,7 @@ func (DeviceConstraint) SwaggerDoc() map[string]string {
var map_DeviceCounterConsumption = map[string]string{
"": "DeviceCounterConsumption defines a set of counters that a device will consume from a CounterSet.",
"counterSet": "CounterSet is the name of the set from which the counters defined will be consumed.",
- "counters": "Counters defines the counters that will be consumed by the device.\n\nThe maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each).",
+ "counters": "Counters defines the counters that will be consumed by the device.\n\nThe maximum number of counters is 32.",
}
func (DeviceCounterConsumption) SwaggerDoc() map[string]string {
@@ -292,7 +292,7 @@ func (DeviceRequest) SwaggerDoc() map[string]string {
var map_DeviceRequestAllocationResult = map[string]string{
"": "DeviceRequestAllocationResult contains the allocation result for one request.",
"request": "Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format /.\n\nMultiple devices may have been allocated per request.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
"adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
@@ -335,7 +335,7 @@ var map_DeviceTaint = map[string]string{
"": "The device this taint is attached to has the \"effect\" on any claim which does not tolerate the taint and, through the claim, to pods using the claim.",
"key": "The taint key to be applied to a device. Must be a label name.",
"value": "The taint value corresponding to the taint key. Must be a label value.",
- "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them. Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here.",
+ "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them.\n\nValid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here. More effects may get added in the future. Consumers must treat unknown effects like None.",
"timeAdded": "TimeAdded represents the time at which the taint was added. Added automatically during create or update if not set.",
}
@@ -369,7 +369,7 @@ func (NetworkDeviceData) SwaggerDoc() map[string]string {
var map_OpaqueDeviceConfiguration = map[string]string{
"": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.",
- "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
}
@@ -493,14 +493,14 @@ func (ResourceSliceList) SwaggerDoc() map[string]string {
var map_ResourceSliceSpec = map[string]string{
"": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.",
- "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.",
+ "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters. This field is immutable.",
"pool": "Pool describes the pool that this ResourceSlice belongs to.",
"nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable.",
"nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
"allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
- "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.",
+ "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.\n\nOnly one of Devices and SharedCounters can be set in a ResourceSlice.",
"perDeviceNodeSelection": "PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
- "sharedCounters": "SharedCounters defines a list of counter sets, each of which has a name and a list of counters available.\n\nThe names of the SharedCounters must be unique in the ResourceSlice.\n\nThe maximum number of SharedCounters is 32.",
+ "sharedCounters": "SharedCounters defines a list of counter sets, each of which has a name and a list of counters available.\n\nThe names of the counter sets must be unique in the ResourcePool.\n\nOnly one of Devices and SharedCounters can be set in a ResourceSlice.\n\nThe maximum number of counter sets is 8.",
}
func (ResourceSliceSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/resource/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/resource/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..0d4983d3
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,237 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AllocatedDeviceStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.AllocatedDeviceStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.AllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in BasicDevice) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.BasicDevice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CELDeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.CELDeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequestPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.CapacityRequestPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequestPolicyRange) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.CapacityRequestPolicyRange"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequirements) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.CapacityRequirements"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Counter) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.Counter"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CounterSet) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.CounterSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Device) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.Device"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAllocationConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceAllocationConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceAllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAttribute) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceAttribute"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceCapacity) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceCapacity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClaimConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceClaimConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClass) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceClassConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceClassSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceConstraint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceConstraint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceCounterConsumption) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceCounterConsumption"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceRequestAllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceRequestAllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSubRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceSubRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceTaint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceToleration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.DeviceToleration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkDeviceData) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.NetworkDeviceData"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in OpaqueDeviceConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.OpaqueDeviceConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimConsumerReference) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimConsumerReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplate) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimTemplate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplateList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimTemplateList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceClaimTemplateSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePool) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourcePool"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSlice) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceSlice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSliceList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceSliceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSliceSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta1.ResourceSliceSpec"
+}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/doc.go b/operator/vendor/k8s.io/api/resource/v1beta2/doc.go
index 365113ae..4570270a 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta2/doc.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.resource.v1beta2
+
// +groupName=resource.k8s.io
// Package v1beta2 is the v1beta2 version of the resource API.
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/generated.pb.go b/operator/vendor/k8s.io/api/resource/v1beta2/generated.pb.go
index f512f41d..5a36e6d2 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta2/generated.pb.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/generated.pb.go
@@ -23,15 +23,13 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -39,1470 +37,91 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (m *AllocatedDeviceStatus) Reset() { *m = AllocatedDeviceStatus{} }
-func (*AllocatedDeviceStatus) ProtoMessage() {}
-func (*AllocatedDeviceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{0}
-}
-func (m *AllocatedDeviceStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AllocatedDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AllocatedDeviceStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocatedDeviceStatus.Merge(m, src)
-}
-func (m *AllocatedDeviceStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *AllocatedDeviceStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocatedDeviceStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocatedDeviceStatus proto.InternalMessageInfo
-
-func (m *AllocationResult) Reset() { *m = AllocationResult{} }
-func (*AllocationResult) ProtoMessage() {}
-func (*AllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{1}
-}
-func (m *AllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *AllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocationResult.Merge(m, src)
-}
-func (m *AllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *AllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocationResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocationResult proto.InternalMessageInfo
-
-func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-func (*CELDeviceSelector) ProtoMessage() {}
-func (*CELDeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{2}
-}
-func (m *CELDeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CELDeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CELDeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CELDeviceSelector.Merge(m, src)
-}
-func (m *CELDeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *CELDeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_CELDeviceSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CELDeviceSelector proto.InternalMessageInfo
-
-func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} }
-func (*CapacityRequestPolicy) ProtoMessage() {}
-func (*CapacityRequestPolicy) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{3}
-}
-func (m *CapacityRequestPolicy) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequestPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequestPolicy) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequestPolicy.Merge(m, src)
-}
-func (m *CapacityRequestPolicy) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequestPolicy) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequestPolicy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequestPolicy proto.InternalMessageInfo
-
-func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} }
-func (*CapacityRequestPolicyRange) ProtoMessage() {}
-func (*CapacityRequestPolicyRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{4}
-}
-func (m *CapacityRequestPolicyRange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequestPolicyRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequestPolicyRange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequestPolicyRange.Merge(m, src)
-}
-func (m *CapacityRequestPolicyRange) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequestPolicyRange) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequestPolicyRange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequestPolicyRange proto.InternalMessageInfo
-
-func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} }
-func (*CapacityRequirements) ProtoMessage() {}
-func (*CapacityRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{5}
-}
-func (m *CapacityRequirements) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CapacityRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CapacityRequirements) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CapacityRequirements.Merge(m, src)
-}
-func (m *CapacityRequirements) XXX_Size() int {
- return m.Size()
-}
-func (m *CapacityRequirements) XXX_DiscardUnknown() {
- xxx_messageInfo_CapacityRequirements.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CapacityRequirements proto.InternalMessageInfo
-
-func (m *Counter) Reset() { *m = Counter{} }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{6}
-}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
-}
-func (m *Counter) XXX_Size() int {
- return m.Size()
-}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Counter proto.InternalMessageInfo
-
-func (m *CounterSet) Reset() { *m = CounterSet{} }
-func (*CounterSet) ProtoMessage() {}
-func (*CounterSet) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{7}
-}
-func (m *CounterSet) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CounterSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CounterSet) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CounterSet.Merge(m, src)
-}
-func (m *CounterSet) XXX_Size() int {
- return m.Size()
-}
-func (m *CounterSet) XXX_DiscardUnknown() {
- xxx_messageInfo_CounterSet.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CounterSet proto.InternalMessageInfo
-
-func (m *Device) Reset() { *m = Device{} }
-func (*Device) ProtoMessage() {}
-func (*Device) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{8}
-}
-func (m *Device) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Device) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Device.Merge(m, src)
-}
-func (m *Device) XXX_Size() int {
- return m.Size()
-}
-func (m *Device) XXX_DiscardUnknown() {
- xxx_messageInfo_Device.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Device proto.InternalMessageInfo
-
-func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-func (*DeviceAllocationConfiguration) ProtoMessage() {}
-func (*DeviceAllocationConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{9}
-}
-func (m *DeviceAllocationConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationConfiguration.Merge(m, src)
-}
-func (m *DeviceAllocationConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAllocationConfiguration proto.InternalMessageInfo
-
-func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-func (*DeviceAllocationResult) ProtoMessage() {}
-func (*DeviceAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{10}
-}
-func (m *DeviceAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAllocationResult.Merge(m, src)
-}
-func (m *DeviceAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAllocationResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAllocationResult proto.InternalMessageInfo
-
-func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-func (*DeviceAttribute) ProtoMessage() {}
-func (*DeviceAttribute) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{11}
-}
-func (m *DeviceAttribute) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceAttribute) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceAttribute.Merge(m, src)
-}
-func (m *DeviceAttribute) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceAttribute) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceAttribute.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceAttribute proto.InternalMessageInfo
-
-func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
-func (*DeviceCapacity) ProtoMessage() {}
-func (*DeviceCapacity) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{12}
-}
-func (m *DeviceCapacity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceCapacity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCapacity.Merge(m, src)
-}
-func (m *DeviceCapacity) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCapacity) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCapacity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceCapacity proto.InternalMessageInfo
-
-func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-func (*DeviceClaim) ProtoMessage() {}
-func (*DeviceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{13}
-}
-func (m *DeviceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaim.Merge(m, src)
-}
-func (m *DeviceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaim.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClaim proto.InternalMessageInfo
-
-func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-func (*DeviceClaimConfiguration) ProtoMessage() {}
-func (*DeviceClaimConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{14}
-}
-func (m *DeviceClaimConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClaimConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClaimConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClaimConfiguration.Merge(m, src)
-}
-func (m *DeviceClaimConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClaimConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClaimConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClaimConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-func (*DeviceClass) ProtoMessage() {}
-func (*DeviceClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{15}
-}
-func (m *DeviceClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClass.Merge(m, src)
-}
-func (m *DeviceClass) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClass) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClass proto.InternalMessageInfo
-
-func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-func (*DeviceClassConfiguration) ProtoMessage() {}
-func (*DeviceClassConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{16}
-}
-func (m *DeviceClassConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassConfiguration.Merge(m, src)
-}
-func (m *DeviceClassConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassConfiguration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassConfiguration proto.InternalMessageInfo
-
-func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-func (*DeviceClassList) ProtoMessage() {}
-func (*DeviceClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{17}
-}
-func (m *DeviceClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassList.Merge(m, src)
-}
-func (m *DeviceClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassList proto.InternalMessageInfo
-
-func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-func (*DeviceClassSpec) ProtoMessage() {}
-func (*DeviceClassSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{18}
-}
-func (m *DeviceClassSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceClassSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceClassSpec.Merge(m, src)
-}
-func (m *DeviceClassSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceClassSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceClassSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceClassSpec proto.InternalMessageInfo
-
-func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-func (*DeviceConfiguration) ProtoMessage() {}
-func (*DeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{19}
-}
-func (m *DeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConfiguration.Merge(m, src)
-}
-func (m *DeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConfiguration.DiscardUnknown(m)
-}
+func (m *CELDeviceSelector) Reset() { *m = CELDeviceSelector{} }
-var xxx_messageInfo_DeviceConfiguration proto.InternalMessageInfo
+func (m *CapacityRequestPolicy) Reset() { *m = CapacityRequestPolicy{} }
-func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-func (*DeviceConstraint) ProtoMessage() {}
-func (*DeviceConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{20}
-}
-func (m *DeviceConstraint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceConstraint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceConstraint.Merge(m, src)
-}
-func (m *DeviceConstraint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceConstraint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceConstraint.DiscardUnknown(m)
-}
+func (m *CapacityRequestPolicyRange) Reset() { *m = CapacityRequestPolicyRange{} }
-var xxx_messageInfo_DeviceConstraint proto.InternalMessageInfo
+func (m *CapacityRequirements) Reset() { *m = CapacityRequirements{} }
-func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-func (*DeviceCounterConsumption) ProtoMessage() {}
-func (*DeviceCounterConsumption) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{21}
-}
-func (m *DeviceCounterConsumption) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceCounterConsumption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceCounterConsumption) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceCounterConsumption.Merge(m, src)
-}
-func (m *DeviceCounterConsumption) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceCounterConsumption) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceCounterConsumption.DiscardUnknown(m)
-}
+func (m *Counter) Reset() { *m = Counter{} }
-var xxx_messageInfo_DeviceCounterConsumption proto.InternalMessageInfo
+func (m *CounterSet) Reset() { *m = CounterSet{} }
-func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-func (*DeviceRequest) ProtoMessage() {}
-func (*DeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{22}
-}
-func (m *DeviceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequest.Merge(m, src)
-}
-func (m *DeviceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequest.DiscardUnknown(m)
-}
+func (m *Device) Reset() { *m = Device{} }
-var xxx_messageInfo_DeviceRequest proto.InternalMessageInfo
+func (m *DeviceAllocationConfiguration) Reset() { *m = DeviceAllocationConfiguration{} }
-func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-func (*DeviceRequestAllocationResult) ProtoMessage() {}
-func (*DeviceRequestAllocationResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{23}
-}
-func (m *DeviceRequestAllocationResult) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceRequestAllocationResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceRequestAllocationResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceRequestAllocationResult.Merge(m, src)
-}
-func (m *DeviceRequestAllocationResult) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceRequestAllocationResult) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceRequestAllocationResult.DiscardUnknown(m)
-}
+func (m *DeviceAllocationResult) Reset() { *m = DeviceAllocationResult{} }
-var xxx_messageInfo_DeviceRequestAllocationResult proto.InternalMessageInfo
+func (m *DeviceAttribute) Reset() { *m = DeviceAttribute{} }
-func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-func (*DeviceSelector) ProtoMessage() {}
-func (*DeviceSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{24}
-}
-func (m *DeviceSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSelector.Merge(m, src)
-}
-func (m *DeviceSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSelector.DiscardUnknown(m)
-}
+func (m *DeviceCapacity) Reset() { *m = DeviceCapacity{} }
-var xxx_messageInfo_DeviceSelector proto.InternalMessageInfo
+func (m *DeviceClaim) Reset() { *m = DeviceClaim{} }
-func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-func (*DeviceSubRequest) ProtoMessage() {}
-func (*DeviceSubRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{25}
-}
-func (m *DeviceSubRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceSubRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceSubRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceSubRequest.Merge(m, src)
-}
-func (m *DeviceSubRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceSubRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceSubRequest.DiscardUnknown(m)
-}
+func (m *DeviceClaimConfiguration) Reset() { *m = DeviceClaimConfiguration{} }
-var xxx_messageInfo_DeviceSubRequest proto.InternalMessageInfo
+func (m *DeviceClass) Reset() { *m = DeviceClass{} }
-func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-func (*DeviceTaint) ProtoMessage() {}
-func (*DeviceTaint) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{26}
-}
-func (m *DeviceTaint) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceTaint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceTaint) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceTaint.Merge(m, src)
-}
-func (m *DeviceTaint) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceTaint) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceTaint.DiscardUnknown(m)
-}
+func (m *DeviceClassConfiguration) Reset() { *m = DeviceClassConfiguration{} }
-var xxx_messageInfo_DeviceTaint proto.InternalMessageInfo
+func (m *DeviceClassList) Reset() { *m = DeviceClassList{} }
-func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-func (*DeviceToleration) ProtoMessage() {}
-func (*DeviceToleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{27}
-}
-func (m *DeviceToleration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeviceToleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeviceToleration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeviceToleration.Merge(m, src)
-}
-func (m *DeviceToleration) XXX_Size() int {
- return m.Size()
-}
-func (m *DeviceToleration) XXX_DiscardUnknown() {
- xxx_messageInfo_DeviceToleration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeviceToleration proto.InternalMessageInfo
-
-func (m *ExactDeviceRequest) Reset() { *m = ExactDeviceRequest{} }
-func (*ExactDeviceRequest) ProtoMessage() {}
-func (*ExactDeviceRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{28}
-}
-func (m *ExactDeviceRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ExactDeviceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ExactDeviceRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExactDeviceRequest.Merge(m, src)
-}
-func (m *ExactDeviceRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ExactDeviceRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExactDeviceRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExactDeviceRequest proto.InternalMessageInfo
-
-func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (*NetworkDeviceData) ProtoMessage() {}
-func (*NetworkDeviceData) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{29}
-}
-func (m *NetworkDeviceData) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *NetworkDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *NetworkDeviceData) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NetworkDeviceData.Merge(m, src)
-}
-func (m *NetworkDeviceData) XXX_Size() int {
- return m.Size()
-}
-func (m *NetworkDeviceData) XXX_DiscardUnknown() {
- xxx_messageInfo_NetworkDeviceData.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NetworkDeviceData proto.InternalMessageInfo
-
-func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-func (*OpaqueDeviceConfiguration) ProtoMessage() {}
-func (*OpaqueDeviceConfiguration) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{30}
-}
-func (m *OpaqueDeviceConfiguration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OpaqueDeviceConfiguration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OpaqueDeviceConfiguration.Merge(m, src)
-}
-func (m *OpaqueDeviceConfiguration) XXX_Size() int {
- return m.Size()
-}
-func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() {
- xxx_messageInfo_OpaqueDeviceConfiguration.DiscardUnknown(m)
-}
+func (m *DeviceClassSpec) Reset() { *m = DeviceClassSpec{} }
-var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo
+func (m *DeviceConfiguration) Reset() { *m = DeviceConfiguration{} }
-func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (*ResourceClaim) ProtoMessage() {}
-func (*ResourceClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{31}
-}
-func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaim) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaim.Merge(m, src)
-}
-func (m *ResourceClaim) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaim) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
-}
+func (m *DeviceConstraint) Reset() { *m = DeviceConstraint{} }
-var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+func (m *DeviceCounterConsumption) Reset() { *m = DeviceCounterConsumption{} }
-func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-func (*ResourceClaimConsumerReference) ProtoMessage() {}
-func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{32}
-}
-func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimConsumerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimConsumerReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimConsumerReference.Merge(m, src)
-}
-func (m *ResourceClaimConsumerReference) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimConsumerReference) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimConsumerReference.DiscardUnknown(m)
-}
+func (m *DeviceRequest) Reset() { *m = DeviceRequest{} }
-var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo
+func (m *DeviceRequestAllocationResult) Reset() { *m = DeviceRequestAllocationResult{} }
-func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (*ResourceClaimList) ProtoMessage() {}
-func (*ResourceClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{33}
-}
-func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimList.Merge(m, src)
-}
-func (m *ResourceClaimList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimList.DiscardUnknown(m)
-}
+func (m *DeviceSelector) Reset() { *m = DeviceSelector{} }
-var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo
+func (m *DeviceSubRequest) Reset() { *m = DeviceSubRequest{} }
-func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-func (*ResourceClaimSpec) ProtoMessage() {}
-func (*ResourceClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{34}
-}
-func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimSpec.Merge(m, src)
-}
-func (m *ResourceClaimSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimSpec.DiscardUnknown(m)
-}
+func (m *DeviceTaint) Reset() { *m = DeviceTaint{} }
-var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo
+func (m *DeviceToleration) Reset() { *m = DeviceToleration{} }
-func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (*ResourceClaimStatus) ProtoMessage() {}
-func (*ResourceClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{35}
-}
-func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimStatus.Merge(m, src)
-}
-func (m *ResourceClaimStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimStatus.DiscardUnknown(m)
-}
+func (m *ExactDeviceRequest) Reset() { *m = ExactDeviceRequest{} }
-var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo
+func (m *NetworkDeviceData) Reset() { *m = NetworkDeviceData{} }
-func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-func (*ResourceClaimTemplate) ProtoMessage() {}
-func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{36}
-}
-func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplate) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplate.Merge(m, src)
-}
-func (m *ResourceClaimTemplate) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplate) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplate.DiscardUnknown(m)
-}
+func (m *OpaqueDeviceConfiguration) Reset() { *m = OpaqueDeviceConfiguration{} }
-var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo
+func (m *ResourceClaim) Reset() { *m = ResourceClaim{} }
-func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (*ResourceClaimTemplateList) ProtoMessage() {}
-func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{37}
-}
-func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateList.Merge(m, src)
-}
-func (m *ResourceClaimTemplateList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateList.DiscardUnknown(m)
-}
+func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} }
-var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo
+func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} }
-func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-func (*ResourceClaimTemplateSpec) ProtoMessage() {}
-func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{38}
-}
-func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceClaimTemplateSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceClaimTemplateSpec.Merge(m, src)
-}
-func (m *ResourceClaimTemplateSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceClaimTemplateSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceClaimTemplateSpec.DiscardUnknown(m)
-}
+func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} }
-var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo
+func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} }
-func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (*ResourcePool) ProtoMessage() {}
-func (*ResourcePool) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{39}
-}
-func (m *ResourcePool) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourcePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourcePool) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourcePool.Merge(m, src)
-}
-func (m *ResourcePool) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourcePool) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourcePool.DiscardUnknown(m)
-}
+func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} }
-var xxx_messageInfo_ResourcePool proto.InternalMessageInfo
+func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} }
-func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-func (*ResourceSlice) ProtoMessage() {}
-func (*ResourceSlice) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{40}
-}
-func (m *ResourceSlice) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSlice) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSlice.Merge(m, src)
-}
-func (m *ResourceSlice) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSlice) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSlice.DiscardUnknown(m)
-}
+func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} }
-var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo
+func (m *ResourcePool) Reset() { *m = ResourcePool{} }
-func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (*ResourceSliceList) ProtoMessage() {}
-func (*ResourceSliceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{41}
-}
-func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSliceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceList.Merge(m, src)
-}
-func (m *ResourceSliceList) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceList) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceList.DiscardUnknown(m)
-}
+func (m *ResourceSlice) Reset() { *m = ResourceSlice{} }
-var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo
+func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} }
-func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
-func (*ResourceSliceSpec) ProtoMessage() {}
-func (*ResourceSliceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_57f2e1d27c072d6e, []int{42}
-}
-func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResourceSliceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ResourceSliceSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResourceSliceSpec.Merge(m, src)
-}
-func (m *ResourceSliceSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *ResourceSliceSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_ResourceSliceSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResourceSliceSpec proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*AllocatedDeviceStatus)(nil), "k8s.io.api.resource.v1beta2.AllocatedDeviceStatus")
- proto.RegisterType((*AllocationResult)(nil), "k8s.io.api.resource.v1beta2.AllocationResult")
- proto.RegisterType((*CELDeviceSelector)(nil), "k8s.io.api.resource.v1beta2.CELDeviceSelector")
- proto.RegisterType((*CapacityRequestPolicy)(nil), "k8s.io.api.resource.v1beta2.CapacityRequestPolicy")
- proto.RegisterType((*CapacityRequestPolicyRange)(nil), "k8s.io.api.resource.v1beta2.CapacityRequestPolicyRange")
- proto.RegisterType((*CapacityRequirements)(nil), "k8s.io.api.resource.v1beta2.CapacityRequirements")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1beta2.CapacityRequirements.RequestsEntry")
- proto.RegisterType((*Counter)(nil), "k8s.io.api.resource.v1beta2.Counter")
- proto.RegisterType((*CounterSet)(nil), "k8s.io.api.resource.v1beta2.CounterSet")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1beta2.CounterSet.CountersEntry")
- proto.RegisterType((*Device)(nil), "k8s.io.api.resource.v1beta2.Device")
- proto.RegisterMapType((map[QualifiedName]DeviceAttribute)(nil), "k8s.io.api.resource.v1beta2.Device.AttributesEntry")
- proto.RegisterMapType((map[QualifiedName]DeviceCapacity)(nil), "k8s.io.api.resource.v1beta2.Device.CapacityEntry")
- proto.RegisterType((*DeviceAllocationConfiguration)(nil), "k8s.io.api.resource.v1beta2.DeviceAllocationConfiguration")
- proto.RegisterType((*DeviceAllocationResult)(nil), "k8s.io.api.resource.v1beta2.DeviceAllocationResult")
- proto.RegisterType((*DeviceAttribute)(nil), "k8s.io.api.resource.v1beta2.DeviceAttribute")
- proto.RegisterType((*DeviceCapacity)(nil), "k8s.io.api.resource.v1beta2.DeviceCapacity")
- proto.RegisterType((*DeviceClaim)(nil), "k8s.io.api.resource.v1beta2.DeviceClaim")
- proto.RegisterType((*DeviceClaimConfiguration)(nil), "k8s.io.api.resource.v1beta2.DeviceClaimConfiguration")
- proto.RegisterType((*DeviceClass)(nil), "k8s.io.api.resource.v1beta2.DeviceClass")
- proto.RegisterType((*DeviceClassConfiguration)(nil), "k8s.io.api.resource.v1beta2.DeviceClassConfiguration")
- proto.RegisterType((*DeviceClassList)(nil), "k8s.io.api.resource.v1beta2.DeviceClassList")
- proto.RegisterType((*DeviceClassSpec)(nil), "k8s.io.api.resource.v1beta2.DeviceClassSpec")
- proto.RegisterType((*DeviceConfiguration)(nil), "k8s.io.api.resource.v1beta2.DeviceConfiguration")
- proto.RegisterType((*DeviceConstraint)(nil), "k8s.io.api.resource.v1beta2.DeviceConstraint")
- proto.RegisterType((*DeviceCounterConsumption)(nil), "k8s.io.api.resource.v1beta2.DeviceCounterConsumption")
- proto.RegisterMapType((map[string]Counter)(nil), "k8s.io.api.resource.v1beta2.DeviceCounterConsumption.CountersEntry")
- proto.RegisterType((*DeviceRequest)(nil), "k8s.io.api.resource.v1beta2.DeviceRequest")
- proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1beta2.DeviceRequestAllocationResult")
- proto.RegisterMapType((map[QualifiedName]resource.Quantity)(nil), "k8s.io.api.resource.v1beta2.DeviceRequestAllocationResult.ConsumedCapacityEntry")
- proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1beta2.DeviceSelector")
- proto.RegisterType((*DeviceSubRequest)(nil), "k8s.io.api.resource.v1beta2.DeviceSubRequest")
- proto.RegisterType((*DeviceTaint)(nil), "k8s.io.api.resource.v1beta2.DeviceTaint")
- proto.RegisterType((*DeviceToleration)(nil), "k8s.io.api.resource.v1beta2.DeviceToleration")
- proto.RegisterType((*ExactDeviceRequest)(nil), "k8s.io.api.resource.v1beta2.ExactDeviceRequest")
- proto.RegisterType((*NetworkDeviceData)(nil), "k8s.io.api.resource.v1beta2.NetworkDeviceData")
- proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1beta2.OpaqueDeviceConfiguration")
- proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1beta2.ResourceClaim")
- proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimConsumerReference")
- proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimList")
- proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimSpec")
- proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimStatus")
- proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimTemplate")
- proto.RegisterType((*ResourceClaimTemplateList)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimTemplateList")
- proto.RegisterType((*ResourceClaimTemplateSpec)(nil), "k8s.io.api.resource.v1beta2.ResourceClaimTemplateSpec")
- proto.RegisterType((*ResourcePool)(nil), "k8s.io.api.resource.v1beta2.ResourcePool")
- proto.RegisterType((*ResourceSlice)(nil), "k8s.io.api.resource.v1beta2.ResourceSlice")
- proto.RegisterType((*ResourceSliceList)(nil), "k8s.io.api.resource.v1beta2.ResourceSliceList")
- proto.RegisterType((*ResourceSliceSpec)(nil), "k8s.io.api.resource.v1beta2.ResourceSliceSpec")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/resource/v1beta2/generated.proto", fileDescriptor_57f2e1d27c072d6e)
-}
-
-var fileDescriptor_57f2e1d27c072d6e = []byte{
- // 3037 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0xcf, 0x6f, 0x24, 0x47,
- 0xf5, 0x77, 0xcf, 0xcc, 0x8e, 0xc7, 0x6f, 0x6c, 0xaf, 0x5d, 0xfb, 0x23, 0x13, 0xe7, 0x1b, 0x8f,
- 0xd3, 0xfb, 0x85, 0x38, 0x9b, 0x64, 0x9c, 0x35, 0x24, 0x44, 0x9b, 0x03, 0xcc, 0xd8, 0xde, 0xc4,
- 0xc9, 0xae, 0xd7, 0xa9, 0x71, 0x9c, 0x25, 0xbf, 0x44, 0xbb, 0xbb, 0x6c, 0x37, 0xee, 0xe9, 0x9e,
- 0xed, 0xae, 0xf1, 0xda, 0x42, 0x82, 0x08, 0xae, 0x1c, 0x38, 0x80, 0x84, 0x04, 0x48, 0x08, 0x21,
- 0x7e, 0x48, 0x08, 0xf1, 0x17, 0x04, 0x05, 0x14, 0x91, 0x1b, 0x51, 0xb8, 0xe4, 0x80, 0x26, 0x64,
- 0x72, 0xe2, 0xc8, 0x05, 0xa1, 0x3d, 0xa1, 0xaa, 0xae, 0xea, 0x5f, 0x33, 0x3d, 0xe9, 0x71, 0x76,
- 0x57, 0xcb, 0xcd, 0xf3, 0xea, 0xbd, 0x4f, 0x55, 0xbd, 0x7a, 0xbf, 0xea, 0x75, 0x19, 0x1e, 0x3f,
- 0x78, 0xd6, 0xab, 0x99, 0xce, 0x92, 0xd6, 0x36, 0x97, 0x5c, 0xe2, 0x39, 0x1d, 0x57, 0x27, 0x4b,
- 0x87, 0x97, 0x76, 0x08, 0xd5, 0x96, 0x97, 0xf6, 0x88, 0x4d, 0x5c, 0x8d, 0x12, 0xa3, 0xd6, 0x76,
- 0x1d, 0xea, 0xa0, 0x87, 0x7c, 0xe6, 0x9a, 0xd6, 0x36, 0x6b, 0x92, 0xb9, 0x26, 0x98, 0xe7, 0x9e,
- 0xdc, 0x33, 0xe9, 0x7e, 0x67, 0xa7, 0xa6, 0x3b, 0xad, 0xa5, 0x3d, 0x67, 0xcf, 0x59, 0xe2, 0x32,
- 0x3b, 0x9d, 0x5d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0xf9, 0x58, 0x73, 0x6a, 0x64, 0x62, 0xdd, 0x71,
- 0xd9, 0xa4, 0xc9, 0xf9, 0xe6, 0xbe, 0x1c, 0xf2, 0xb4, 0x34, 0x7d, 0xdf, 0xb4, 0x89, 0x7b, 0xbc,
- 0xd4, 0x3e, 0xd8, 0x8b, 0xaf, 0x76, 0x14, 0x29, 0x6f, 0xa9, 0x45, 0xa8, 0x36, 0x68, 0xae, 0xa5,
- 0x34, 0x29, 0xb7, 0x63, 0x53, 0xb3, 0xd5, 0x3f, 0xcd, 0x33, 0x9f, 0x25, 0xe0, 0xe9, 0xfb, 0xa4,
- 0xa5, 0x25, 0xe5, 0xd4, 0xf7, 0xf2, 0x70, 0xae, 0x6e, 0x59, 0x8e, 0xce, 0x68, 0xab, 0xe4, 0xd0,
- 0xd4, 0x49, 0x93, 0x6a, 0xb4, 0xe3, 0xa1, 0x2f, 0x42, 0xd1, 0x70, 0xcd, 0x43, 0xe2, 0x56, 0x94,
- 0x05, 0x65, 0x71, 0xa2, 0x31, 0xfd, 0x7e, 0xb7, 0x3a, 0xd6, 0xeb, 0x56, 0x8b, 0xab, 0x9c, 0x8a,
- 0xc5, 0x28, 0x5a, 0x80, 0x42, 0xdb, 0x71, 0xac, 0x4a, 0x8e, 0x73, 0x4d, 0x0a, 0xae, 0xc2, 0xa6,
- 0xe3, 0x58, 0x98, 0x8f, 0x70, 0x24, 0x8e, 0x5c, 0xc9, 0x27, 0x90, 0x38, 0x15, 0x8b, 0x51, 0xf4,
- 0x05, 0x18, 0xf7, 0xf6, 0x35, 0x97, 0xac, 0xaf, 0x56, 0xc6, 0x39, 0x63, 0xb9, 0xd7, 0xad, 0x8e,
- 0x37, 0x7d, 0x12, 0x96, 0x63, 0x48, 0x07, 0xd0, 0x1d, 0xdb, 0x30, 0xa9, 0xe9, 0xd8, 0x5e, 0xa5,
- 0xb0, 0x90, 0x5f, 0x2c, 0x2f, 0x2f, 0xd5, 0x42, 0x63, 0x08, 0xf6, 0x5f, 0x6b, 0x1f, 0xec, 0x31,
- 0x82, 0x57, 0x63, 0x6a, 0xae, 0x1d, 0x5e, 0xaa, 0xad, 0x48, 0xb9, 0x06, 0x12, 0x6b, 0x80, 0x80,
- 0xe4, 0xe1, 0x08, 0x2c, 0x7a, 0x09, 0x0a, 0x86, 0x46, 0xb5, 0xca, 0xa9, 0x05, 0x65, 0xb1, 0xbc,
- 0xfc, 0x64, 0x2a, 0xbc, 0x50, 0x6f, 0x0d, 0x6b, 0xb7, 0xd6, 0x8e, 0x28, 0xb1, 0x3d, 0x06, 0x5e,
- 0x62, 0x0a, 0x58, 0xd5, 0xa8, 0x86, 0x39, 0x08, 0xd2, 0xa0, 0x6c, 0x13, 0x7a, 0xcb, 0x71, 0x0f,
- 0x18, 0xb1, 0x52, 0xe4, 0x98, 0xb5, 0xda, 0x10, 0xfb, 0xad, 0x6d, 0x08, 0x7e, 0xae, 0x19, 0x26,
- 0xd5, 0x38, 0xdd, 0xeb, 0x56, 0xcb, 0x1b, 0x21, 0x0c, 0x8e, 0x62, 0xaa, 0xef, 0xe6, 0x60, 0x46,
- 0x9c, 0xa3, 0xe9, 0xd8, 0x98, 0x78, 0x1d, 0x8b, 0xa2, 0xb7, 0x60, 0xdc, 0x57, 0xad, 0xc7, 0xcf,
- 0xb0, 0xbc, 0xfc, 0xa5, 0xa1, 0x73, 0xfa, 0x93, 0x25, 0x51, 0x1a, 0xa7, 0x85, 0xaa, 0xc6, 0xfd,
- 0x71, 0x0f, 0x4b, 0x50, 0xb4, 0x0d, 0x93, 0xb6, 0x63, 0x90, 0x26, 0xb1, 0x88, 0x4e, 0x1d, 0x97,
- 0x1f, 0x6f, 0x79, 0x79, 0x21, 0x3a, 0x09, 0x73, 0x26, 0xa6, 0xf9, 0x8d, 0x08, 0x5f, 0x63, 0xa6,
- 0xd7, 0xad, 0x4e, 0x46, 0x29, 0x38, 0x86, 0x83, 0x3a, 0x70, 0x46, 0x0b, 0x56, 0xb1, 0x65, 0xb6,
- 0x88, 0x47, 0xb5, 0x56, 0x5b, 0x9c, 0xc5, 0xc5, 0x6c, 0x47, 0xcd, 0xc4, 0x1a, 0x0f, 0xf4, 0xba,
- 0xd5, 0x33, 0xf5, 0x7e, 0x28, 0x3c, 0x08, 0x5f, 0x7d, 0x1e, 0x66, 0x57, 0xd6, 0xae, 0x0a, 0x27,
- 0x90, 0x6b, 0x59, 0x06, 0x20, 0x47, 0x6d, 0x97, 0x78, 0xec, 0x64, 0x85, 0x2b, 0x04, 0xc6, 0xb3,
- 0x16, 0x8c, 0xe0, 0x08, 0x97, 0xfa, 0x4e, 0x0e, 0xce, 0xad, 0x68, 0x6d, 0x4d, 0x37, 0xe9, 0x31,
- 0x26, 0x37, 0x3b, 0xc4, 0xa3, 0x9b, 0x8e, 0x65, 0xea, 0xc7, 0xe8, 0x15, 0x76, 0x22, 0xbb, 0x5a,
- 0xc7, 0xa2, 0xe2, 0x44, 0x6a, 0xc3, 0x76, 0x13, 0x1e, 0xd1, 0xcb, 0x1d, 0xcd, 0xa6, 0x26, 0x3d,
- 0xf6, 0x5d, 0x62, 0xd5, 0x87, 0xc0, 0x12, 0x0b, 0x11, 0x28, 0x1f, 0x6a, 0x96, 0x69, 0x6c, 0x6b,
- 0x56, 0x87, 0x78, 0x95, 0x3c, 0xf7, 0x89, 0x51, 0xa1, 0xcf, 0x88, 0x5d, 0x95, 0xb7, 0x43, 0x28,
- 0x1c, 0xc5, 0x45, 0x7b, 0x00, 0xfc, 0x27, 0xd6, 0xec, 0x3d, 0x52, 0x29, 0xf0, 0x0d, 0x7c, 0x65,
- 0xa8, 0x49, 0x0d, 0xd4, 0x02, 0x17, 0x6f, 0x4c, 0x33, 0x05, 0x6e, 0x07, 0x70, 0x38, 0x02, 0xad,
- 0xbe, 0x9d, 0x83, 0xb9, 0x74, 0x51, 0xb4, 0x0e, 0xf9, 0x96, 0x69, 0x9f, 0x50, 0x83, 0xe3, 0xbd,
- 0x6e, 0x35, 0x7f, 0xcd, 0xb4, 0x31, 0xc3, 0xe0, 0x50, 0xda, 0x11, 0x0f, 0x5e, 0x27, 0x85, 0xd2,
- 0x8e, 0x30, 0xc3, 0x40, 0x57, 0xa1, 0xe0, 0x51, 0xd2, 0x16, 0x5e, 0x30, 0x2a, 0x16, 0x8f, 0x19,
- 0x4d, 0x4a, 0xda, 0x98, 0xa3, 0xa8, 0xdf, 0xcb, 0xc1, 0xd9, 0xa8, 0x0a, 0x4c, 0x97, 0xb4, 0x88,
- 0x4d, 0x3d, 0xf4, 0x6d, 0x28, 0xb9, 0xbe, 0x4a, 0x98, 0x57, 0xb3, 0x83, 0xfe, 0x6a, 0xe6, 0x23,
- 0x90, 0x20, 0x35, 0xa1, 0x54, 0x6f, 0xcd, 0xa6, 0xee, 0x71, 0xe3, 0x11, 0x71, 0xf2, 0x25, 0x49,
- 0xfe, 0xee, 0xc7, 0xd5, 0xa9, 0x97, 0x3b, 0x9a, 0x65, 0xee, 0x9a, 0xc4, 0xd8, 0xd0, 0x5a, 0x04,
- 0x07, 0x73, 0xce, 0x1d, 0xc0, 0x54, 0x4c, 0x1a, 0xcd, 0x40, 0xfe, 0x80, 0x1c, 0xfb, 0xae, 0x81,
- 0xd9, 0x9f, 0x68, 0x15, 0x4e, 0x1d, 0x32, 0x8b, 0x39, 0x99, 0x5a, 0xb1, 0x2f, 0x7c, 0x39, 0xf7,
- 0xac, 0xa2, 0xbe, 0x05, 0xe3, 0x2b, 0x4e, 0xc7, 0xa6, 0xc4, 0x45, 0x4d, 0x09, 0x7a, 0xb2, 0x63,
- 0x9f, 0x12, 0x7b, 0x3c, 0xc5, 0x6d, 0x59, 0xcc, 0xa1, 0xfe, 0x5b, 0x01, 0x10, 0x13, 0x34, 0x09,
- 0x65, 0xb9, 0xcc, 0xd6, 0x5a, 0x44, 0xb8, 0x79, 0x90, 0xcb, 0xb8, 0x06, 0xf8, 0x08, 0xd2, 0xa1,
- 0xa4, 0xfb, 0xfc, 0x5e, 0x25, 0xc7, 0xb5, 0xff, 0xf4, 0x70, 0xed, 0x07, 0xe0, 0xf2, 0x4f, 0xa1,
- 0xf3, 0x19, 0xa9, 0x73, 0x49, 0xc6, 0x01, 0xf0, 0x9c, 0x06, 0x53, 0x31, 0xe6, 0x01, 0x2a, 0xbe,
- 0x1c, 0x57, 0xf1, 0xff, 0x67, 0x59, 0x44, 0x54, 0xb1, 0x3f, 0x9c, 0x00, 0x91, 0x7e, 0x33, 0x6c,
- 0xfa, 0x08, 0x40, 0xa3, 0xd4, 0x35, 0x77, 0x3a, 0x94, 0xc8, 0x6d, 0x67, 0x49, 0x25, 0xb5, 0x7a,
- 0x20, 0xe5, 0x6f, 0xfa, 0x82, 0x0c, 0x9c, 0xe1, 0x40, 0xbf, 0xa9, 0x45, 0xe6, 0x42, 0x6d, 0x28,
- 0xe9, 0xc2, 0x7e, 0x45, 0x54, 0xbb, 0x94, 0x65, 0x5e, 0x69, 0xf3, 0x09, 0xf3, 0x96, 0xe4, 0x01,
- 0xe6, 0x2d, 0x67, 0x41, 0xdf, 0x82, 0x19, 0xdd, 0xb1, 0xbd, 0x4e, 0x8b, 0x78, 0xf2, 0x0c, 0x44,
- 0x8d, 0xf1, 0x74, 0x86, 0x99, 0x85, 0xc8, 0x0a, 0x47, 0x68, 0xf3, 0x4a, 0xa3, 0x22, 0x66, 0x9f,
- 0x59, 0x49, 0xc0, 0xe2, 0xbe, 0x89, 0xd0, 0x22, 0x94, 0x58, 0x22, 0x64, 0x4b, 0xe2, 0xd9, 0x6e,
- 0xa2, 0x31, 0xc9, 0xd6, 0xbd, 0x21, 0x68, 0x38, 0x18, 0xed, 0x4b, 0xbd, 0xc5, 0x3b, 0x94, 0x7a,
- 0x17, 0xa1, 0xa4, 0x59, 0x16, 0x63, 0xf0, 0x78, 0x11, 0x56, 0xf2, 0x57, 0x50, 0x17, 0x34, 0x1c,
- 0x8c, 0xa2, 0x4d, 0x28, 0x52, 0xcd, 0xb4, 0xa9, 0x57, 0x29, 0x71, 0xf5, 0x2c, 0x66, 0x50, 0xcf,
- 0x16, 0x13, 0x08, 0xeb, 0x3f, 0xfe, 0xd3, 0xc3, 0x02, 0x07, 0x5d, 0x82, 0xf2, 0x8e, 0x69, 0x1b,
- 0xde, 0x96, 0xc3, 0x66, 0xa8, 0x4c, 0xf0, 0xe9, 0x79, 0xd9, 0xd3, 0x08, 0xc9, 0x38, 0xca, 0x83,
- 0x56, 0x60, 0x96, 0xfd, 0x34, 0xed, 0xbd, 0xb0, 0x8e, 0xab, 0xc0, 0x42, 0x7e, 0x71, 0xa2, 0x71,
- 0xae, 0xd7, 0xad, 0xce, 0x36, 0x92, 0x83, 0xb8, 0x9f, 0x1f, 0xdd, 0x80, 0x8a, 0x20, 0x5e, 0xd1,
- 0x4c, 0xab, 0xe3, 0x92, 0x08, 0x56, 0x99, 0x63, 0xfd, 0x5f, 0xaf, 0x5b, 0xad, 0x34, 0x52, 0x78,
- 0x70, 0xaa, 0x34, 0x43, 0x66, 0x85, 0xc6, 0xad, 0x6b, 0x1d, 0x8b, 0x9a, 0x6d, 0x2b, 0x52, 0x5b,
- 0x79, 0x95, 0x49, 0xbe, 0x3d, 0x8e, 0x5c, 0x4f, 0xe1, 0xc1, 0xa9, 0xd2, 0x73, 0x07, 0x70, 0x3a,
- 0xe1, 0x5c, 0x03, 0x82, 0x44, 0x23, 0x1e, 0x24, 0x9e, 0xc8, 0x52, 0xfd, 0x49, 0xd0, 0x48, 0xb0,
- 0x98, 0xdb, 0x87, 0xa9, 0x98, 0x47, 0x0d, 0x98, 0xaa, 0x1e, 0x9f, 0xea, 0xf1, 0x2c, 0xbe, 0x22,
- 0x13, 0x53, 0x24, 0x2c, 0x7d, 0x3f, 0x07, 0x0f, 0x27, 0xcb, 0xd0, 0x15, 0xc7, 0xde, 0x35, 0xf7,
- 0x3a, 0x2e, 0xff, 0x81, 0xbe, 0x06, 0x45, 0x1f, 0x4d, 0xc4, 0xab, 0x45, 0x69, 0x4c, 0x4d, 0x4e,
- 0xbd, 0xdd, 0xad, 0x9e, 0x4f, 0x8a, 0xfa, 0x23, 0x58, 0xc8, 0x31, 0x13, 0x0f, 0x12, 0x68, 0x8e,
- 0x1f, 0xef, 0x64, 0x34, 0xf7, 0x85, 0xa9, 0x0e, 0x7d, 0x07, 0xce, 0x18, 0xc2, 0xad, 0x23, 0x4b,
- 0x10, 0x09, 0xfe, 0xa9, 0x4c, 0xe1, 0x20, 0x22, 0xd7, 0x78, 0x48, 0x2c, 0xf5, 0xcc, 0x80, 0x41,
- 0x3c, 0x68, 0x26, 0xf5, 0x53, 0x05, 0xce, 0x0f, 0xae, 0xca, 0x11, 0x81, 0x71, 0x97, 0xff, 0x25,
- 0xab, 0x80, 0xcb, 0x19, 0xd6, 0x23, 0xf6, 0x98, 0x5e, 0xe2, 0xfb, 0xbf, 0x3d, 0x2c, 0xb1, 0xd1,
- 0x0e, 0x14, 0x75, 0xbe, 0x24, 0x11, 0xf6, 0x2f, 0x8f, 0x74, 0x83, 0x88, 0xef, 0x3f, 0xf0, 0x7b,
- 0x9f, 0x8c, 0x05, 0xb2, 0xfa, 0x5b, 0x05, 0x4e, 0x27, 0xac, 0x0f, 0xcd, 0x43, 0xde, 0xb4, 0x29,
- 0xb7, 0xa6, 0xbc, 0x7f, 0x3e, 0xeb, 0x36, 0xf5, 0x53, 0x37, 0x1b, 0x40, 0x8f, 0x40, 0x61, 0x87,
- 0xdd, 0x3a, 0xf3, 0xdc, 0x8b, 0xa6, 0x7a, 0xdd, 0xea, 0x44, 0xc3, 0x71, 0x2c, 0x9f, 0x83, 0x0f,
- 0xa1, 0x47, 0xa1, 0xe8, 0x51, 0xd7, 0xb4, 0xf7, 0x78, 0xa5, 0x3a, 0xe1, 0x47, 0x92, 0x26, 0xa7,
- 0xf8, 0x6c, 0x62, 0x18, 0x5d, 0x84, 0xf1, 0x43, 0xe2, 0xf2, 0xfa, 0xde, 0x0f, 0xba, 0x3c, 0x48,
- 0x6e, 0xfb, 0x24, 0x9f, 0x55, 0x32, 0xa8, 0x1f, 0x2a, 0x30, 0x1d, 0x37, 0xdf, 0xbb, 0x52, 0x98,
- 0xa0, 0x03, 0x98, 0x72, 0xa3, 0x85, 0xaf, 0xf0, 0xab, 0xe5, 0xd1, 0xab, 0xed, 0xc6, 0x6c, 0xaf,
- 0x5b, 0x9d, 0x8a, 0x57, 0xd1, 0x71, 0x6c, 0xf5, 0x77, 0x39, 0x28, 0x8b, 0x4d, 0x59, 0x9a, 0xd9,
- 0x42, 0x37, 0xfa, 0x4a, 0xcc, 0x8b, 0xd9, 0x8d, 0x2b, 0xac, 0x6c, 0x06, 0x78, 0x94, 0x01, 0x65,
- 0x96, 0xf4, 0xa8, 0xeb, 0x67, 0x0e, 0xdf, 0xa6, 0x9e, 0xcc, 0xe6, 0x49, 0x42, 0x2a, 0xbc, 0xa7,
- 0x84, 0x34, 0x0f, 0x47, 0x61, 0xd1, 0x9b, 0x81, 0xd1, 0xe6, 0xb3, 0x67, 0x6e, 0xb6, 0xf3, 0x6c,
- 0xf6, 0xfa, 0x9e, 0x02, 0x95, 0x34, 0xa1, 0x58, 0x74, 0x51, 0x4e, 0x12, 0x5d, 0x72, 0xf7, 0x2c,
- 0xba, 0xfc, 0x51, 0x89, 0x1c, 0xbb, 0xe7, 0xa1, 0x6f, 0x40, 0x89, 0xdd, 0x9e, 0x79, 0xdf, 0x43,
- 0xe9, 0x5b, 0xc5, 0x90, 0xbb, 0xf6, 0xf5, 0x9d, 0x6f, 0x12, 0x9d, 0x5e, 0x23, 0x54, 0x0b, 0xaf,
- 0xc6, 0x21, 0x0d, 0x07, 0xa8, 0x68, 0x03, 0x0a, 0x5e, 0x9b, 0xe8, 0x23, 0xe4, 0x23, 0xbe, 0xb2,
- 0x66, 0x9b, 0xe8, 0x61, 0x61, 0xca, 0x7e, 0x61, 0x8e, 0xa3, 0xfe, 0x24, 0x7a, 0x12, 0x9e, 0x17,
- 0x3f, 0x89, 0x14, 0xfd, 0x2a, 0xf7, 0x4c, 0xbf, 0xef, 0x04, 0x71, 0x8d, 0xaf, 0xee, 0xaa, 0xe9,
- 0x51, 0xf4, 0x46, 0x9f, 0x8e, 0x6b, 0xd9, 0x74, 0xcc, 0xa4, 0xb9, 0x86, 0x03, 0xf7, 0x92, 0x94,
- 0x88, 0x7e, 0xaf, 0xc1, 0x29, 0x93, 0x92, 0x96, 0x74, 0xac, 0xc5, 0xac, 0x0a, 0x0e, 0x83, 0xd0,
- 0x3a, 0x13, 0xc7, 0x3e, 0x8a, 0xfa, 0xd3, 0x5c, 0x6c, 0x03, 0x4c, 0xf1, 0xe8, 0x0d, 0x98, 0xf0,
- 0x44, 0xb1, 0x28, 0x83, 0x43, 0x96, 0x64, 0x1f, 0x14, 0xa0, 0xb3, 0x62, 0xa6, 0x09, 0x49, 0xf1,
- 0x70, 0x08, 0x18, 0xf1, 0xdc, 0xdc, 0x28, 0x9e, 0x9b, 0x38, 0xfa, 0x34, 0xcf, 0x45, 0x57, 0xe1,
- 0x2c, 0x39, 0xa2, 0xc4, 0x36, 0x88, 0x81, 0x05, 0x18, 0xaf, 0xb5, 0xfd, 0x04, 0x51, 0xe9, 0x75,
- 0xab, 0x67, 0xd7, 0x06, 0x8c, 0xe3, 0x81, 0x52, 0xea, 0x4d, 0x18, 0x64, 0x0b, 0xe8, 0x35, 0x28,
- 0x3a, 0x6d, 0xed, 0x66, 0x90, 0x10, 0x9e, 0x19, 0xba, 0x87, 0xeb, 0x9c, 0x75, 0x90, 0xc1, 0x01,
- 0xdb, 0x80, 0x3f, 0x8c, 0x05, 0xa2, 0xfa, 0x4f, 0x05, 0x66, 0x92, 0x01, 0x71, 0x84, 0x90, 0xb3,
- 0x09, 0xd3, 0x2d, 0x8d, 0xea, 0xfb, 0x41, 0x9e, 0x15, 0x5d, 0xdb, 0xc5, 0x5e, 0xb7, 0x3a, 0x7d,
- 0x2d, 0x36, 0x72, 0xbb, 0x5b, 0x45, 0x57, 0x3a, 0x96, 0x75, 0x1c, 0xbf, 0x28, 0x25, 0xe4, 0xd1,
- 0xd7, 0x61, 0xd6, 0x30, 0x3d, 0x6a, 0xda, 0x3a, 0x0d, 0x41, 0xfd, 0x36, 0xef, 0xe3, 0xac, 0x00,
- 0x5f, 0x4d, 0x0e, 0xa6, 0xe0, 0xf6, 0xa3, 0xa8, 0xbf, 0xcc, 0x05, 0xce, 0xdd, 0x77, 0xab, 0x42,
- 0xcb, 0x00, 0x7a, 0x70, 0xb5, 0x4e, 0xb6, 0xe5, 0xc2, 0x4b, 0x37, 0x8e, 0x70, 0xa1, 0x9b, 0x7d,
- 0x77, 0xf7, 0x95, 0x13, 0x5d, 0xe9, 0xee, 0xaf, 0x9b, 0xfc, 0x7f, 0x14, 0x98, 0x8a, 0x25, 0xe0,
- 0x0c, 0x17, 0xfa, 0x6d, 0x18, 0x27, 0x47, 0x9a, 0x4e, 0x2d, 0x59, 0x57, 0x2c, 0x0d, 0x9d, 0x75,
- 0x8d, 0xf1, 0xc6, 0x93, 0x3c, 0xef, 0x43, 0xae, 0xf9, 0x18, 0x58, 0x82, 0xa1, 0x16, 0x4c, 0xef,
- 0x9a, 0xae, 0x47, 0xeb, 0x87, 0x9a, 0x69, 0x69, 0x3b, 0x16, 0x11, 0x09, 0x38, 0x4b, 0x86, 0x6f,
- 0x76, 0x76, 0x24, 0xf8, 0x79, 0xb1, 0xe4, 0xe9, 0x2b, 0x31, 0x30, 0x9c, 0x00, 0x57, 0x3f, 0x2e,
- 0xca, 0xdb, 0x42, 0x4a, 0x61, 0x8b, 0x1e, 0x63, 0x55, 0x32, 0x1f, 0x12, 0xda, 0x88, 0x54, 0xba,
- 0x9c, 0x8c, 0xe5, 0x78, 0xe4, 0x7b, 0x47, 0x2e, 0xd3, 0xf7, 0x8e, 0x7c, 0x86, 0xef, 0x1d, 0x85,
- 0xa1, 0xdf, 0x3b, 0x2e, 0x41, 0x59, 0x33, 0x5a, 0xa6, 0x5d, 0xd7, 0x75, 0xe2, 0x79, 0xbc, 0xf6,
- 0x14, 0xf7, 0xdd, 0x7a, 0x48, 0xc6, 0x51, 0x1e, 0x56, 0x3f, 0x51, 0xc7, 0x22, 0xae, 0xb8, 0x43,
- 0x16, 0x33, 0x6b, 0x77, 0x2b, 0x90, 0x0a, 0xeb, 0xa7, 0x90, 0xe6, 0xe1, 0x28, 0xec, 0xe0, 0x5b,
- 0xf5, 0xf8, 0x1d, 0xbc, 0x55, 0x97, 0x3e, 0xd7, 0xad, 0xfa, 0xc5, 0xf0, 0x3b, 0xd1, 0x04, 0x57,
- 0xf0, 0x53, 0x91, 0xef, 0x44, 0xb7, 0xbb, 0xd5, 0x47, 0xd2, 0xbe, 0x85, 0xd1, 0xe3, 0x36, 0xf1,
- 0x6a, 0xaf, 0x44, 0x3f, 0x26, 0xfd, 0x5a, 0x09, 0xfa, 0x3d, 0x86, 0x2c, 0x9f, 0x79, 0x03, 0xa1,
- 0xbc, 0xbc, 0x79, 0xf2, 0x0b, 0x55, 0x6d, 0x25, 0x01, 0xe9, 0x47, 0x8a, 0xc7, 0x12, 0xad, 0x20,
- 0x23, 0xbd, 0x21, 0xd5, 0xb7, 0xa8, 0x39, 0x0f, 0xce, 0x0d, 0x44, 0xbd, 0xab, 0xfd, 0xd7, 0xd7,
- 0xe5, 0x6d, 0x27, 0x68, 0x10, 0xad, 0x43, 0x5e, 0x27, 0xd6, 0x80, 0xda, 0x65, 0x40, 0xb0, 0x4a,
- 0x7e, 0x4c, 0xf1, 0x1b, 0xe6, 0x2b, 0x6b, 0x57, 0x31, 0xc3, 0x50, 0x7f, 0x54, 0x90, 0xc9, 0x2c,
- 0xf4, 0xfd, 0x0c, 0xc1, 0xab, 0x0e, 0xa7, 0x8d, 0x30, 0xf1, 0xf3, 0xfc, 0xed, 0x7b, 0xec, 0x03,
- 0x82, 0x39, 0x5a, 0xb3, 0x70, 0xb9, 0x24, 0x7f, 0xbc, 0x88, 0xc9, 0xdf, 0xe9, 0x22, 0x66, 0x1b,
- 0xa6, 0xc3, 0xcf, 0x4b, 0xd7, 0x1c, 0x43, 0xc6, 0x81, 0x9a, 0x0c, 0x6b, 0xf5, 0xd8, 0xe8, 0xed,
- 0x6e, 0xf5, 0x6c, 0xf2, 0xe2, 0xcc, 0xe8, 0x38, 0x81, 0x82, 0x2e, 0xc0, 0x29, 0x9e, 0x58, 0x78,
- 0xa4, 0xc8, 0x87, 0x35, 0x1b, 0x4f, 0x0a, 0xd8, 0x1f, 0xbb, 0x47, 0x11, 0xe2, 0xf5, 0x48, 0x5f,
- 0x76, 0x9c, 0x9b, 0xc2, 0xa5, 0x91, 0x3f, 0x42, 0xf8, 0x55, 0x4a, 0x30, 0x12, 0x00, 0xaa, 0xff,
- 0x0a, 0xee, 0x25, 0xbc, 0x41, 0x88, 0x1e, 0x8e, 0x18, 0x78, 0xa3, 0x2c, 0xd6, 0x96, 0x7f, 0x89,
- 0x1c, 0xfb, 0xd6, 0x7e, 0x21, 0x6a, 0xed, 0x13, 0x29, 0xf7, 0xe9, 0xe7, 0xa0, 0x48, 0x76, 0x77,
- 0x89, 0x4e, 0x45, 0xdc, 0x96, 0x9d, 0xe8, 0xe2, 0x1a, 0xa7, 0xde, 0x66, 0xa5, 0x4a, 0x38, 0xa5,
- 0x4f, 0xc4, 0x42, 0x04, 0xbd, 0x0a, 0x13, 0xd4, 0x6c, 0x91, 0xba, 0x61, 0x10, 0x43, 0x7c, 0xf6,
- 0x1a, 0xe5, 0x2b, 0x24, 0xef, 0x4e, 0x6c, 0x49, 0x00, 0x1c, 0x62, 0x5d, 0x2e, 0xfd, 0xf8, 0xe7,
- 0xd5, 0xb1, 0xb7, 0xff, 0xbe, 0x30, 0xa6, 0xfe, 0x22, 0x27, 0x7d, 0x21, 0xd4, 0xf9, 0x67, 0x6d,
- 0xfc, 0x05, 0x28, 0x39, 0x6d, 0xc6, 0xeb, 0xc8, 0x9c, 0xf5, 0x84, 0x2c, 0x45, 0xae, 0x0b, 0xfa,
- 0xed, 0x6e, 0xb5, 0x92, 0x84, 0x95, 0x63, 0x38, 0x90, 0x0e, 0x55, 0x98, 0xcf, 0xa4, 0xc2, 0xc2,
- 0xe8, 0x2a, 0x5c, 0x81, 0xd9, 0xd0, 0x7e, 0x9a, 0x44, 0x77, 0x6c, 0xc3, 0x13, 0x76, 0xcc, 0x53,
- 0xca, 0x56, 0x72, 0x10, 0xf7, 0xf3, 0xab, 0xbf, 0x29, 0x00, 0xea, 0xaf, 0x45, 0x06, 0x05, 0x04,
- 0xe5, 0xf3, 0x04, 0x84, 0xdc, 0xdd, 0x0f, 0x08, 0xf9, 0x3b, 0x1b, 0x10, 0x0a, 0x43, 0x02, 0xc2,
- 0x7d, 0x5b, 0x65, 0xdc, 0xd5, 0x18, 0xf2, 0x7b, 0x05, 0x66, 0xfb, 0xde, 0x50, 0xa0, 0xe7, 0x60,
- 0xca, 0x64, 0xf5, 0xf3, 0xae, 0x26, 0x2e, 0x7e, 0xbe, 0x9d, 0x9c, 0x13, 0x6b, 0x9d, 0x5a, 0x8f,
- 0x0e, 0xe2, 0x38, 0x2f, 0x7a, 0x10, 0xf2, 0x66, 0x5b, 0xb6, 0x8c, 0x79, 0x26, 0x5b, 0xdf, 0xf4,
- 0x30, 0xa3, 0x31, 0x0b, 0xdc, 0xd7, 0x5c, 0xe3, 0x96, 0xe6, 0x32, 0xc7, 0x76, 0x99, 0x9e, 0xf3,
- 0x71, 0x0b, 0x7c, 0x21, 0x3e, 0x8c, 0x93, 0xfc, 0xea, 0xaf, 0x14, 0x78, 0x30, 0xf5, 0x2e, 0x98,
- 0xf9, 0x31, 0x8e, 0x06, 0xd0, 0xd6, 0x5c, 0xad, 0x45, 0xc4, 0x25, 0xe7, 0x04, 0x8f, 0x57, 0x82,
- 0x5b, 0xd4, 0x66, 0x00, 0x84, 0x23, 0xa0, 0xea, 0xcf, 0x72, 0x30, 0x25, 0xaf, 0xc1, 0x7e, 0xbb,
- 0xf0, 0xee, 0xf7, 0x8d, 0x36, 0x63, 0x7d, 0xa3, 0xe1, 0x55, 0x47, 0x6c, 0x6d, 0x69, 0x9d, 0x23,
- 0x74, 0x03, 0x8a, 0x1e, 0x7f, 0xe7, 0x94, 0xa9, 0x9b, 0x1f, 0xc7, 0xe4, 0x72, 0xe1, 0x11, 0xf8,
- 0xbf, 0xb1, 0xc0, 0x53, 0x7b, 0x0a, 0xcc, 0xc7, 0xf8, 0x45, 0xd5, 0xe6, 0x62, 0xb2, 0x4b, 0x5c,
- 0x62, 0xeb, 0x04, 0x3d, 0x01, 0x25, 0xad, 0x6d, 0x3e, 0xef, 0x3a, 0x9d, 0xb6, 0x38, 0xcf, 0xe0,
- 0x0e, 0x59, 0xdf, 0x5c, 0xe7, 0x74, 0x1c, 0x70, 0x30, 0x6e, 0xb9, 0x20, 0x61, 0x55, 0x91, 0x0e,
- 0xab, 0x4f, 0xc7, 0x01, 0x47, 0x50, 0x3f, 0x15, 0x52, 0xeb, 0xa7, 0x06, 0xe4, 0x3b, 0xa6, 0x21,
- 0x5a, 0xdd, 0x4f, 0xc9, 0xac, 0xf2, 0x4a, 0xd6, 0xd2, 0x99, 0x09, 0xab, 0x7f, 0x52, 0x60, 0x36,
- 0xb6, 0xc9, 0x7b, 0xd0, 0xdc, 0xba, 0x1e, 0x6f, 0x6e, 0x5d, 0xcc, 0x7e, 0x62, 0x29, 0xed, 0xad,
- 0xfd, 0xc4, 0x1e, 0x78, 0x7f, 0xab, 0x99, 0x7c, 0x33, 0xb5, 0x98, 0xb5, 0x79, 0x9c, 0xfe, 0x50,
- 0x4a, 0xfd, 0x4b, 0x0e, 0xce, 0x0c, 0xb0, 0x21, 0xf4, 0x26, 0x40, 0x18, 0xd2, 0xc5, 0x7c, 0xc3,
- 0xe3, 0x6c, 0xdf, 0xa7, 0x1b, 0xfe, 0x8c, 0x26, 0x42, 0x8d, 0x00, 0x22, 0x17, 0xca, 0x2e, 0xf1,
- 0x88, 0x7b, 0x48, 0x8c, 0x2b, 0xbc, 0x46, 0x60, 0x7a, 0x7b, 0x2e, 0xbb, 0xde, 0xfa, 0x2c, 0x37,
- 0x8c, 0xea, 0x38, 0xc4, 0xc5, 0xd1, 0x49, 0xd0, 0x9b, 0xa1, 0xfe, 0xfc, 0xcf, 0xe6, 0xcb, 0x59,
- 0xf6, 0x13, 0x7f, 0x7b, 0x38, 0x44, 0x93, 0x7f, 0x53, 0xe0, 0x5c, 0x6c, 0x8d, 0x5b, 0xa4, 0xd5,
- 0xb6, 0x34, 0x4a, 0xee, 0x41, 0x14, 0xba, 0x11, 0x8b, 0x42, 0xcf, 0x64, 0xd7, 0xa3, 0x5c, 0x63,
- 0x6a, 0x1f, 0xfb, 0x43, 0x05, 0x1e, 0x1c, 0x28, 0x71, 0x0f, 0xdc, 0xea, 0xd5, 0xb8, 0x5b, 0x2d,
- 0x8f, 0xbe, 0xad, 0x14, 0xf7, 0xfa, 0x6b, 0xda, 0xa6, 0xb8, 0x9f, 0xfd, 0x0f, 0x26, 0x0d, 0xf5,
- 0x0f, 0x0a, 0x4c, 0x4a, 0xce, 0x4d, 0xc7, 0xb1, 0x32, 0x5c, 0x56, 0x97, 0x01, 0xc4, 0x93, 0x5b,
- 0xf9, 0x6d, 0x27, 0x1f, 0x2e, 0xfb, 0xf9, 0x60, 0x04, 0x47, 0xb8, 0xd0, 0x8b, 0x80, 0xe4, 0x02,
- 0x9b, 0x96, 0xec, 0x40, 0xf2, 0xd0, 0x9f, 0x6f, 0xcc, 0x09, 0x59, 0x84, 0xfb, 0x38, 0xf0, 0x00,
- 0x29, 0xf5, 0xcf, 0x4a, 0x98, 0xad, 0x39, 0xf9, 0x3e, 0x55, 0x3c, 0x5f, 0x5b, 0xaa, 0xe2, 0xa3,
- 0xe9, 0x86, 0x73, 0xde, 0xaf, 0xe9, 0x86, 0x2f, 0x2e, 0xc5, 0x1f, 0xde, 0x2d, 0x24, 0x36, 0xc1,
- 0xfd, 0x20, 0x6b, 0x65, 0xf7, 0x52, 0xe4, 0x99, 0x75, 0x79, 0xf9, 0xb1, 0x4c, 0xab, 0x61, 0x36,
- 0x3a, 0xb0, 0x43, 0x19, 0x7d, 0x67, 0x94, 0x1f, 0xe9, 0x9d, 0x51, 0xe1, 0x2e, 0xbc, 0x33, 0x3a,
- 0x35, 0xf4, 0x9d, 0xd1, 0x46, 0x98, 0x50, 0xfc, 0x8b, 0xc8, 0x85, 0x0c, 0x09, 0x79, 0xc8, 0xa3,
- 0x65, 0x0c, 0xe7, 0xdb, 0xc4, 0xf5, 0xc9, 0xe1, 0x02, 0x99, 0x77, 0xfa, 0xef, 0x9d, 0xe6, 0x7a,
- 0xdd, 0xea, 0xf9, 0xcd, 0x81, 0x1c, 0x38, 0x45, 0x12, 0xed, 0xc1, 0x34, 0x6f, 0x28, 0x1a, 0xc1,
- 0x93, 0x31, 0xff, 0x4d, 0xd4, 0xa3, 0x19, 0xdf, 0x06, 0x86, 0x1d, 0xef, 0x66, 0x0c, 0x06, 0x27,
- 0x60, 0x1b, 0xf5, 0xf7, 0x3f, 0x99, 0x1f, 0xfb, 0xe0, 0x93, 0xf9, 0xb1, 0x8f, 0x3e, 0x99, 0x1f,
- 0x7b, 0xbb, 0x37, 0xaf, 0xbc, 0xdf, 0x9b, 0x57, 0x3e, 0xe8, 0xcd, 0x2b, 0x1f, 0xf5, 0xe6, 0x95,
- 0x7f, 0xf4, 0xe6, 0x95, 0x1f, 0x7c, 0x3a, 0x3f, 0xf6, 0xda, 0x43, 0x43, 0xfe, 0x8d, 0xe2, 0xbf,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0x60, 0x24, 0x5a, 0x50, 0x64, 0x31, 0x00, 0x00,
-}
+func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} }
func (m *AllocatedDeviceStatus) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -1817,7 +436,7 @@ func (m *CapacityRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
v := m.Requests[QualifiedName(keysForRequests[iNdEx])]
baseI := i
@@ -1902,7 +521,7 @@ func (m *CounterSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Counters {
keysForCounters = append(keysForCounters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Counters[string(keysForCounters[iNdEx])]
baseI := i
@@ -2054,7 +673,7 @@ func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.Capacity[QualifiedName(keysForCapacity[iNdEx])]
baseI := i
@@ -2083,7 +702,7 @@ func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ sort.Strings(keysForAttributes)
for iNdEx := len(keysForAttributes) - 1; iNdEx >= 0; iNdEx-- {
v := m.Attributes[QualifiedName(keysForAttributes[iNdEx])]
baseI := i
@@ -2704,7 +1323,7 @@ func (m *DeviceCounterConsumption) MarshalToSizedBuffer(dAtA []byte) (int, error
for k := range m.Counters {
keysForCounters = append(keysForCounters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
for iNdEx := len(keysForCounters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Counters[string(keysForCounters[iNdEx])]
baseI := i
@@ -2815,7 +1434,7 @@ func (m *DeviceRequestAllocationResult) MarshalToSizedBuffer(dAtA []byte) (int,
for k := range m.ConsumedCapacity {
keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+ sort.Strings(keysForConsumedCapacity)
for iNdEx := len(keysForConsumedCapacity) - 1; iNdEx >= 0; iNdEx-- {
v := m.ConsumedCapacity[QualifiedName(keysForConsumedCapacity[iNdEx])]
baseI := i
@@ -4890,7 +3509,7 @@ func (this *CapacityRequirements) String() string {
for k := range this.Requests {
keysForRequests = append(keysForRequests, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+ sort.Strings(keysForRequests)
mapStringForRequests := "map[QualifiedName]resource.Quantity{"
for _, k := range keysForRequests {
mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[QualifiedName(k)])
@@ -4920,7 +3539,7 @@ func (this *CounterSet) String() string {
for k := range this.Counters {
keysForCounters = append(keysForCounters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
mapStringForCounters := "map[string]Counter{"
for _, k := range keysForCounters {
mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
@@ -4951,7 +3570,7 @@ func (this *Device) String() string {
for k := range this.Attributes {
keysForAttributes = append(keysForAttributes, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttributes)
+ sort.Strings(keysForAttributes)
mapStringForAttributes := "map[QualifiedName]DeviceAttribute{"
for _, k := range keysForAttributes {
mapStringForAttributes += fmt.Sprintf("%v: %v,", k, this.Attributes[QualifiedName(k)])
@@ -4961,7 +3580,7 @@ func (this *Device) String() string {
for k := range this.Capacity {
keysForCapacity = append(keysForCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+ sort.Strings(keysForCapacity)
mapStringForCapacity := "map[QualifiedName]DeviceCapacity{"
for _, k := range keysForCapacity {
mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[QualifiedName(k)])
@@ -5168,7 +3787,7 @@ func (this *DeviceCounterConsumption) String() string {
for k := range this.Counters {
keysForCounters = append(keysForCounters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForCounters)
+ sort.Strings(keysForCounters)
mapStringForCounters := "map[string]Counter{"
for _, k := range keysForCounters {
mapStringForCounters += fmt.Sprintf("%v: %v,", k, this.Counters[k])
@@ -5211,7 +3830,7 @@ func (this *DeviceRequestAllocationResult) String() string {
for k := range this.ConsumedCapacity {
keysForConsumedCapacity = append(keysForConsumedCapacity, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForConsumedCapacity)
+ sort.Strings(keysForConsumedCapacity)
mapStringForConsumedCapacity := "map[QualifiedName]resource.Quantity{"
for _, k := range keysForConsumedCapacity {
mapStringForConsumedCapacity += fmt.Sprintf("%v: %v,", k, this.ConsumedCapacity[QualifiedName(k)])
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/generated.proto b/operator/vendor/k8s.io/api/resource/v1beta2/generated.proto
index 213a5615..76af5aa4 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta2/generated.proto
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/generated.proto
@@ -41,7 +41,7 @@ message AllocatedDeviceStatus {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
optional string driver = 1;
@@ -65,6 +65,8 @@ message AllocatedDeviceStatus {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
optional string shareID = 7;
// Conditions contains the latest observation of the device's state.
@@ -88,6 +90,7 @@ message AllocatedDeviceStatus {
// NetworkData contains network-related information specific to the device.
//
// +optional
+ // +k8s:optional
optional NetworkDeviceData networkData = 6;
}
@@ -293,7 +296,7 @@ message Counter {
// CounterSet defines a named set of counters
// that are available to be used by devices defined in the
-// ResourceSlice.
+// ResourcePool.
//
// The counters are not allocatable by themselves, but
// can be referenced by devices. When a device is allocated,
@@ -304,12 +307,14 @@ message CounterSet {
// It must be a DNS label.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
optional string name = 1;
// Counters defines the set of counters for this CounterSet
// The name of each counter must be unique in that set and must be a DNS label.
//
- // The maximum number of counters in all sets is 32.
+ // The maximum number of counters is 32.
//
// +required
map counters = 2;
@@ -346,14 +351,17 @@ message Device {
//
// There can only be a single entry per counterSet.
//
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
+ // The maximum number of device counter consumptions per
+ // device is 2.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=counterSet
// +featureGate=DRAPartitionableDevices
+ // +k8s:maxItems=2
repeated DeviceCounterConsumption consumesCounters = 4;
// NodeName identifies the node where the device is available.
@@ -390,7 +398,9 @@ message Device {
// If specified, these are the driver-defined taints.
//
- // The maximum number of taints is 4.
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
//
// This is an alpha field and requires enabling the DRADeviceTaints
// feature gate.
@@ -427,6 +437,8 @@ message Device {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingConditions = 10;
// BindingFailureConditions defines the conditions for binding failure.
@@ -443,6 +455,8 @@ message Device {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingFailureConditions = 11;
// AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
@@ -462,6 +476,7 @@ message DeviceAllocationConfiguration {
// or from a claim.
//
// +required
+ // +k8s:required
optional string source = 1;
// Requests lists the names of requests where the configuration applies.
@@ -473,6 +488,10 @@ message DeviceAllocationConfiguration {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 2;
optional DeviceConfiguration deviceConfiguration = 3;
@@ -484,6 +503,8 @@ message DeviceAllocationResult {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceRequestAllocationResult results = 1;
// This field is a combination of all the claim and class configuration parameters.
@@ -496,6 +517,8 @@ message DeviceAllocationResult {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=64
repeated DeviceAllocationConfiguration config = 2;
}
@@ -504,26 +527,30 @@ message DeviceAttribute {
// IntValue is a number.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional int64 int = 2;
// BoolValue is a true/false value.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional bool bool = 3;
// StringValue is a string. Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional string string = 4;
// VersionValue is a semantic version according to semver.org spec 2.0.0.
// Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
optional string version = 5;
}
@@ -560,6 +587,11 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=32
repeated DeviceRequest requests = 1;
// These constraints must be satisfied by the set of devices that get
@@ -567,6 +599,8 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceConstraint constraints = 2;
// This field holds configuration for multiple potential drivers which
@@ -575,6 +609,8 @@ message DeviceClaim {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceClaimConfiguration config = 3;
}
@@ -589,6 +625,10 @@ message DeviceClaimConfiguration {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 1;
optional DeviceConfiguration deviceConfiguration = 2;
@@ -604,6 +644,8 @@ message DeviceClaimConfiguration {
message DeviceClass {
// Standard object metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec defines what can be allocated and how to configure it.
@@ -639,6 +681,8 @@ message DeviceClassSpec {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 1;
// Config defines configuration parameters that apply to each device that is claimed via this class.
@@ -649,6 +693,8 @@ message DeviceClassSpec {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceClassConfiguration config = 2;
// ExtendedResourceName is the extended resource name for the devices of this class.
@@ -663,6 +709,8 @@ message DeviceClassSpec {
// This is an alpha field.
// +optional
// +featureGate=DRAExtendedResource
+ // +k8s:optional
+ // +k8s:format=k8s-extended-resource-name
optional string extendedResourceName = 4;
}
@@ -674,6 +722,7 @@ message DeviceConfiguration {
//
// +optional
// +oneOf=ConfigurationType
+ // +k8s:optional
optional OpaqueDeviceConfiguration opaque = 1;
}
@@ -691,6 +740,10 @@ message DeviceConstraint {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
repeated string requests = 1;
// MatchAttribute requires that all devices in question have this
@@ -708,6 +761,8 @@ message DeviceConstraint {
//
// +optional
// +oneOf=ConstraintType
+ // +k8s:optional
+ // +k8s:format=k8s-resource-fully-qualified-name
optional string matchAttribute = 2;
// DistinctAttribute requires that all devices in question have this
@@ -734,14 +789,13 @@ message DeviceCounterConsumption {
// counters defined will be consumed.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
optional string counterSet = 1;
// Counters defines the counters that will be consumed by the device.
//
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
+ // The maximum number of counters is 32.
//
// +required
map counters = 2;
@@ -773,6 +827,7 @@ message DeviceRequest {
//
// +optional
// +oneOf=deviceRequestType
+ // +k8s:optional
optional ExactDeviceRequest exactly = 2;
// FirstAvailable contains subrequests, of which exactly one will be
@@ -793,6 +848,11 @@ message DeviceRequest {
// +oneOf=deviceRequestType
// +listType=atomic
// +featureGate=DRAPrioritizedList
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=8
repeated DeviceSubRequest firstAvailable = 3;
}
@@ -814,9 +874,11 @@ message DeviceRequestAllocationResult {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:format=k8s-long-name-caseless
+ // +k8s:required
optional string driver = 2;
// This name together with the driver name and the device name field
@@ -826,6 +888,8 @@ message DeviceRequestAllocationResult {
// DNS sub-domains separated by slashes.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-resource-pool-name
optional string pool = 3;
// Device references one device instance via its name in the driver's
@@ -868,6 +932,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingConditions = 7;
// BindingFailureConditions contains a copy of the BindingFailureConditions
@@ -879,6 +945,8 @@ message DeviceRequestAllocationResult {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
repeated string bindingFailureConditions = 8;
// ShareID uniquely identifies an individual allocation share of the device,
@@ -888,6 +956,8 @@ message DeviceRequestAllocationResult {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
optional string shareID = 9;
// ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
@@ -944,6 +1014,8 @@ message DeviceSubRequest {
// to reference.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name
optional string deviceClassName = 2;
// Selectors define criteria which must be satisfied by a specific
@@ -953,6 +1025,8 @@ message DeviceSubRequest {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 3;
// AllocationMode and its related fields define how devices are allocated
@@ -1044,10 +1118,13 @@ message DeviceTaint {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
+ // +k8s:required
optional string effect = 3;
// TimeAdded represents the time at which the taint was added.
@@ -1065,6 +1142,8 @@ message DeviceToleration {
// Must be a label name.
//
// +optional
+ // +k8s:optional
+ // +k8s:format=k8s-label-key
optional string key = 1;
// Operator represents a key's relationship to the value.
@@ -1124,6 +1203,8 @@ message ExactDeviceRequest {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
repeated DeviceSelector selectors = 2;
// AllocationMode and its related fields define how devices are allocated
@@ -1146,6 +1227,7 @@ message ExactDeviceRequest {
// requests with unknown modes.
//
// +optional
+ // +k8s:optional
optional string allocationMode = 3;
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
@@ -1221,6 +1303,8 @@ message NetworkDeviceData {
// Must not be longer than 256 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=256
optional string interfaceName = 1;
// IPs lists the network addresses assigned to the device's network interface.
@@ -1231,6 +1315,10 @@ message NetworkDeviceData {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=16
repeated string ips = 2;
// HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
@@ -1238,6 +1326,8 @@ message NetworkDeviceData {
// Must not be longer than 128 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=128
optional string hardwareAddress = 3;
}
@@ -1251,9 +1341,11 @@ message OpaqueDeviceConfiguration {
// to decide whether it needs to validate them.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name-caseless
optional string driver = 1;
// Parameters can contain arbitrary data. It is the responsibility of
@@ -1282,6 +1374,7 @@ message ResourceClaim {
// Spec describes what is being requested and how to configure it.
// The spec is immutable.
+ // +k8s:immutable
optional ResourceClaimSpec spec = 2;
// Status describes whether the claim is ready to use and what has been allocated.
@@ -1336,6 +1429,8 @@ message ResourceClaimStatus {
// Allocation is set once the claim has been allocated successfully.
//
// +optional
+ // +k8s:optional
+ // +k8s:update=NoModify
optional AllocationResult allocation = 1;
// ReservedFor indicates which entities are currently allowed to use
@@ -1363,6 +1458,10 @@ message ResourceClaimStatus {
// +listMapKey=uid
// +patchStrategy=merge
// +patchMergeKey=uid
+ // +k8s:optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=uid
+ // +k8s:maxItems=256
repeated ResourceClaimConsumerReference reservedFor = 2;
// Devices contains the status of each device allocated for this
@@ -1370,12 +1469,18 @@ message ResourceClaimStatus {
// information. Entries are owned by their respective drivers.
//
// +optional
+ // +k8s:optional
// +listType=map
// +listMapKey=driver
// +listMapKey=device
// +listMapKey=pool
// +listMapKey=shareID
// +featureGate=DRAResourceClaimDeviceStatus
+ // +k8s:listType=map
+ // +k8s:listMapKey=driver
+ // +k8s:listMapKey=device
+ // +k8s:listMapKey=pool
+ // +k8s:listMapKey=shareID
repeated AllocatedDeviceStatus devices = 4;
}
@@ -1509,7 +1614,8 @@ message ResourceSliceSpec {
// objects with a certain driver name.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver. This field is immutable.
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
//
// +required
optional string driver = 1;
@@ -1556,10 +1662,14 @@ message ResourceSliceSpec {
// Devices lists some or all of the devices in this pool.
//
- // Must not have more than 128 entries.
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +zeroOrOneOf=ResourceSliceType
repeated Device devices = 6;
// PerDeviceNodeSelection defines whether the access from nodes to
@@ -1577,13 +1687,21 @@ message ResourceSliceSpec {
// SharedCounters defines a list of counter sets, each of which
// has a name and a list of counters available.
//
- // The names of the SharedCounters must be unique in the ResourceSlice.
+ // The names of the counter sets must be unique in the ResourcePool.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
- // The maximum number of counters in all sets is 32.
+ // The maximum number of counter sets is 8.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
// +featureGate=DRAPartitionableDevices
+ // +zeroOrOneOf=ResourceSliceType
+ // +k8s:maxItems=8
repeated CounterSet sharedCounters = 8;
}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/resource/v1beta2/generated.protomessage.pb.go
new file mode 100644
index 00000000..b6417d18
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/generated.protomessage.pb.go
@@ -0,0 +1,108 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta2
+
+func (*AllocatedDeviceStatus) ProtoMessage() {}
+
+func (*AllocationResult) ProtoMessage() {}
+
+func (*CELDeviceSelector) ProtoMessage() {}
+
+func (*CapacityRequestPolicy) ProtoMessage() {}
+
+func (*CapacityRequestPolicyRange) ProtoMessage() {}
+
+func (*CapacityRequirements) ProtoMessage() {}
+
+func (*Counter) ProtoMessage() {}
+
+func (*CounterSet) ProtoMessage() {}
+
+func (*Device) ProtoMessage() {}
+
+func (*DeviceAllocationConfiguration) ProtoMessage() {}
+
+func (*DeviceAllocationResult) ProtoMessage() {}
+
+func (*DeviceAttribute) ProtoMessage() {}
+
+func (*DeviceCapacity) ProtoMessage() {}
+
+func (*DeviceClaim) ProtoMessage() {}
+
+func (*DeviceClaimConfiguration) ProtoMessage() {}
+
+func (*DeviceClass) ProtoMessage() {}
+
+func (*DeviceClassConfiguration) ProtoMessage() {}
+
+func (*DeviceClassList) ProtoMessage() {}
+
+func (*DeviceClassSpec) ProtoMessage() {}
+
+func (*DeviceConfiguration) ProtoMessage() {}
+
+func (*DeviceConstraint) ProtoMessage() {}
+
+func (*DeviceCounterConsumption) ProtoMessage() {}
+
+func (*DeviceRequest) ProtoMessage() {}
+
+func (*DeviceRequestAllocationResult) ProtoMessage() {}
+
+func (*DeviceSelector) ProtoMessage() {}
+
+func (*DeviceSubRequest) ProtoMessage() {}
+
+func (*DeviceTaint) ProtoMessage() {}
+
+func (*DeviceToleration) ProtoMessage() {}
+
+func (*ExactDeviceRequest) ProtoMessage() {}
+
+func (*NetworkDeviceData) ProtoMessage() {}
+
+func (*OpaqueDeviceConfiguration) ProtoMessage() {}
+
+func (*ResourceClaim) ProtoMessage() {}
+
+func (*ResourceClaimConsumerReference) ProtoMessage() {}
+
+func (*ResourceClaimList) ProtoMessage() {}
+
+func (*ResourceClaimSpec) ProtoMessage() {}
+
+func (*ResourceClaimStatus) ProtoMessage() {}
+
+func (*ResourceClaimTemplate) ProtoMessage() {}
+
+func (*ResourceClaimTemplateList) ProtoMessage() {}
+
+func (*ResourceClaimTemplateSpec) ProtoMessage() {}
+
+func (*ResourcePool) ProtoMessage() {}
+
+func (*ResourceSlice) ProtoMessage() {}
+
+func (*ResourceSliceList) ProtoMessage() {}
+
+func (*ResourceSliceSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/types.go b/operator/vendor/k8s.io/api/resource/v1beta2/types.go
index 9fa98abd..49534348 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta2/types.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/types.go
@@ -101,7 +101,8 @@ type ResourceSliceSpec struct {
// objects with a certain driver name.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver. This field is immutable.
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
//
// +required
Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
@@ -148,11 +149,15 @@ type ResourceSliceSpec struct {
// Devices lists some or all of the devices in this pool.
//
- // Must not have more than 128 entries.
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
//
// +optional
// +listType=atomic
- Devices []Device `json:"devices" protobuf:"bytes,6,name=devices"`
+ // +k8s:optional
+ // +zeroOrOneOf=ResourceSliceType
+ Devices []Device `json:"devices,omitempty" protobuf:"bytes,6,name=devices"`
// PerDeviceNodeSelection defines whether the access from nodes to
// resources in the pool is set on the ResourceSlice level or on each
@@ -169,19 +174,27 @@ type ResourceSliceSpec struct {
// SharedCounters defines a list of counter sets, each of which
// has a name and a list of counters available.
//
- // The names of the SharedCounters must be unique in the ResourceSlice.
+ // The names of the counter sets must be unique in the ResourcePool.
//
- // The maximum number of counters in all sets is 32.
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ //
+ // The maximum number of counter sets is 8.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
// +featureGate=DRAPartitionableDevices
+ // +zeroOrOneOf=ResourceSliceType
+ // +k8s:maxItems=8
SharedCounters []CounterSet `json:"sharedCounters,omitempty" protobuf:"bytes,8,name=sharedCounters"`
}
// CounterSet defines a named set of counters
// that are available to be used by devices defined in the
-// ResourceSlice.
+// ResourcePool.
//
// The counters are not allocatable by themselves, but
// can be referenced by devices. When a device is allocated,
@@ -192,12 +205,14 @@ type CounterSet struct {
// It must be a DNS label.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
Name string `json:"name" protobuf:"bytes,1,name=name"`
// Counters defines the set of counters for this CounterSet
// The name of each counter must be unique in that set and must be a DNS label.
//
- // The maximum number of counters in all sets is 32.
+ // The maximum number of counters is 32.
//
// +required
Counters map[string]Counter `json:"counters,omitempty" protobuf:"bytes,2,name=counters"`
@@ -246,13 +261,27 @@ type ResourcePool struct {
const ResourceSliceMaxSharedCapacity = 128
const ResourceSliceMaxDevices = 128
+const ResourceSliceMaxDevicesWithTaintsOrConsumesCounters = 64
const PoolNameMaxLength = validation.DNS1123SubdomainMaxLength // Same as for a single node name.
const BindingConditionsMaxSize = 4
const BindingFailureConditionsMaxSize = 4
-// Defines the max number of shared counters that can be specified
-// in a ResourceSlice. The number is summed up across all sets.
-const ResourceSliceMaxSharedCounters = 32
+// Defines the maximum number of counter sets (through the
+// SharedCounters field) that can be defined in a ResourceSlice.
+const ResourceSliceMaxCounterSets = 8
+
+// Defines the maximum number of counters that can be defined
+// in a counter set.
+const ResourceSliceMaxCountersPerCounterSet = 32
+
+// Defines the maximum number of device counter consumptions
+// (through the ConsumesCounters field) that can be defined per
+// device.
+const ResourceSliceMaxDeviceCounterConsumptionsPerDevice = 2
+
+// Defines the maximum number of counters that can be defined
+// per device counter consumption.
+const ResourceSliceMaxCountersPerDeviceCounterConsumption = 32
// Device represents one individual hardware instance that can be selected based
// on its attributes. Besides the name, exactly one field must be set.
@@ -285,14 +314,17 @@ type Device struct {
//
// There can only be a single entry per counterSet.
//
- // The total number of device counter consumption entries
- // must be <= 32. In addition, the total number in the
- // entire ResourceSlice must be <= 1024 (for example,
- // 64 devices with 16 counters each).
+ // The maximum number of device counter consumptions per
+ // device is 2.
//
// +optional
+ // +k8s:optional
// +listType=atomic
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=counterSet
// +featureGate=DRAPartitionableDevices
+ // +k8s:maxItems=2
ConsumesCounters []DeviceCounterConsumption `json:"consumesCounters,omitempty" protobuf:"bytes,4,rep,name=consumesCounters"`
// NodeName identifies the node where the device is available.
@@ -329,7 +361,9 @@ type Device struct {
// If specified, these are the driver-defined taints.
//
- // The maximum number of taints is 4.
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
//
// This is an alpha field and requires enabling the DRADeviceTaints
// feature gate.
@@ -366,6 +400,8 @@ type Device struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingConditions []string `json:"bindingConditions,omitempty" protobuf:"bytes,10,rep,name=bindingConditions"`
// BindingFailureConditions defines the conditions for binding failure.
@@ -382,6 +418,8 @@ type Device struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingFailureConditions []string `json:"bindingFailureConditions,omitempty" protobuf:"bytes,11,rep,name=bindingFailureConditions"`
// AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
@@ -401,14 +439,13 @@ type DeviceCounterConsumption struct {
// counters defined will be consumed.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-short-name
CounterSet string `json:"counterSet" protobuf:"bytes,1,opt,name=counterSet"`
// Counters defines the counters that will be consumed by the device.
//
- // The maximum number counters in a device is 32.
- // In addition, the maximum number of all counters
- // in all devices is 1024 (for example, 64 devices with
- // 16 counters each).
+ // The maximum number of counters is 32.
//
// +required
Counters map[string]Counter `json:"counters,omitempty" protobuf:"bytes,2,opt,name=counters"`
@@ -531,14 +568,6 @@ type CapacityRequestPolicyRange struct {
// Limit for the sum of the number of entries in both attributes and capacity.
const ResourceSliceMaxAttributesAndCapacitiesPerDevice = 32
-// Limit for the total number of counters in each device.
-const ResourceSliceMaxCountersPerDevice = 32
-
-// Limit for the total number of counters defined in devices in
-// a ResourceSlice. We want to allow up to 64 devices to specify
-// up to 16 counters, so the limit for the ResourceSlice will be 1024.
-const ResourceSliceMaxDeviceCountersPerSlice = 1024 // 64 * 16
-
// QualifiedName is the name of a device attribute or capacity.
//
// Attributes and capacities are defined either by the owner of the specific
@@ -558,6 +587,9 @@ const ResourceSliceMaxDeviceCountersPerSlice = 1024 // 64 * 16
type QualifiedName string
// FullyQualifiedName is a QualifiedName where the domain is set.
+// Format validation cannot be added to this type because one of its usages,
+// DistinctAttribute, is validated conditionally. This conditional validation
+// cannot be expressed declaratively.
type FullyQualifiedName string
// DeviceMaxDomainLength is the maximum length of the domain prefix in a fully-qualified name.
@@ -575,34 +607,38 @@ type DeviceAttribute struct {
// IntValue is a number.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
IntValue *int64 `json:"int,omitempty" protobuf:"varint,2,opt,name=int"`
// BoolValue is a true/false value.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
BoolValue *bool `json:"bool,omitempty" protobuf:"varint,3,opt,name=bool"`
// StringValue is a string. Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
StringValue *string `json:"string,omitempty" protobuf:"bytes,4,opt,name=string"`
// VersionValue is a semantic version according to semver.org spec 2.0.0.
// Must not be longer than 64 characters.
//
// +optional
- // +oneOf=ValueType
+ // +k8s:optional
+ // +k8s:unionMember
VersionValue *string `json:"version,omitempty" protobuf:"bytes,5,opt,name=version"`
}
// DeviceAttributeMaxValueLength is the maximum length of a string or version attribute value.
const DeviceAttributeMaxValueLength = 64
-// DeviceTaintsMaxLength is the maximum number of taints per device.
-const DeviceTaintsMaxLength = 4
+// DeviceTaintsMaxLength is the maximum number of taints per Device.
+const DeviceTaintsMaxLength = 16
// The device this taint is attached to has the "effect" on
// any claim which does not tolerate the taint and, through the claim,
@@ -624,16 +660,27 @@ type DeviceTaint struct {
// The effect of the taint on claims that do not tolerate the taint
// and through such claims on the pods using them.
- // Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for
- // nodes is not valid here.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
//
// +required
+ // +k8s:required
Effect DeviceTaintEffect `json:"effect" protobuf:"bytes,3,name=effect,casttype=DeviceTaintEffect"`
// ^^^^
//
// Implementing PreferNoSchedule would depend on a scoring solution for DRA.
// It might get added as part of that.
+ //
+ // A possible future new effect is NoExecuteWithPodDisruptionBudget:
+ // honor the pod disruption budget instead of simply deleting pods.
+ // This is currently undecided, it could also be a separate field.
+ //
+ // Validation must be prepared to allow unknown enums in stored objects,
+ // which will enable adding new enums within a single release without
+ // ratcheting.
// TimeAdded represents the time at which the taint was added.
// Added automatically during create or update if not set.
@@ -649,9 +696,13 @@ type DeviceTaint struct {
}
// +enum
+// +k8s:enum
type DeviceTaintEffect string
const (
+ // No effect, the taint is purely informational.
+ DeviceTaintEffectNone DeviceTaintEffect = "None"
+
// Do not allow new pods to schedule which use a tainted device unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
@@ -678,6 +729,7 @@ type ResourceSliceList struct {
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.33
+// +k8s:supportsSubresource=/status
// ResourceClaim describes a request for access to resources in the cluster,
// for use by workloads. For example, if a workload needs an accelerator device
@@ -695,6 +747,7 @@ type ResourceClaim struct {
// Spec describes what is being requested and how to configure it.
// The spec is immutable.
+ // +k8s:immutable
Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
// Status describes whether the claim is ready to use and what has been allocated.
@@ -722,6 +775,11 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=32
Requests []DeviceRequest `json:"requests" protobuf:"bytes,1,name=requests"`
// These constraints must be satisfied by the set of devices that get
@@ -729,6 +787,8 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Constraints []DeviceConstraint `json:"constraints,omitempty" protobuf:"bytes,2,opt,name=constraints"`
// This field holds configuration for multiple potential drivers which
@@ -737,6 +797,8 @@ type DeviceClaim struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Config []DeviceClaimConfiguration `json:"config,omitempty" protobuf:"bytes,3,opt,name=config"`
// Potential future extension, ignored by older schedulers. This is
@@ -787,6 +849,7 @@ type DeviceRequest struct {
//
// +optional
// +oneOf=deviceRequestType
+ // +k8s:optional
Exactly *ExactDeviceRequest `json:"exactly,omitempty" protobuf:"bytes,2,name=exactly"`
// FirstAvailable contains subrequests, of which exactly one will be
@@ -807,6 +870,11 @@ type DeviceRequest struct {
// +oneOf=deviceRequestType
// +listType=atomic
// +featureGate=DRAPrioritizedList
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=map
+ // +k8s:listMapKey=name
+ // +k8s:maxItems=8
FirstAvailable []DeviceSubRequest `json:"firstAvailable,omitempty" protobuf:"bytes,3,name=firstAvailable"`
}
@@ -834,6 +902,8 @@ type ExactDeviceRequest struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,2,name=selectors"`
// AllocationMode and its related fields define how devices are allocated
@@ -856,6 +926,7 @@ type ExactDeviceRequest struct {
// requests with unknown modes.
//
// +optional
+ // +k8s:optional
AllocationMode DeviceAllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"`
// Count is used only when the count mode is "ExactCount". Must be greater than zero.
@@ -951,6 +1022,8 @@ type DeviceSubRequest struct {
// to reference.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name
DeviceClassName string `json:"deviceClassName" protobuf:"bytes,2,name=deviceClassName"`
// Selectors define criteria which must be satisfied by a specific
@@ -960,6 +1033,8 @@ type DeviceSubRequest struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,3,name=selectors"`
// AllocationMode and its related fields define how devices are allocated
@@ -1066,6 +1141,8 @@ const (
DeviceTolerationsMaxLength = 16
)
+// +enum
+// +k8s:enum
type DeviceAllocationMode string
// Valid [DeviceRequest.CountMode] values.
@@ -1184,6 +1261,10 @@ type DeviceConstraint struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
// MatchAttribute requires that all devices in question have this
@@ -1201,6 +1282,8 @@ type DeviceConstraint struct {
//
// +optional
// +oneOf=ConstraintType
+ // +k8s:optional
+ // +k8s:format=k8s-resource-fully-qualified-name
MatchAttribute *FullyQualifiedName `json:"matchAttribute,omitempty" protobuf:"bytes,2,opt,name=matchAttribute"`
// Potential future extension, not part of the current design:
@@ -1241,6 +1324,10 @@ type DeviceClaimConfiguration struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,1,opt,name=requests"`
DeviceConfiguration `json:",inline" protobuf:"bytes,2,name=deviceConfiguration"`
@@ -1254,6 +1341,7 @@ type DeviceConfiguration struct {
//
// +optional
// +oneOf=ConfigurationType
+ // +k8s:optional
Opaque *OpaqueDeviceConfiguration `json:"opaque,omitempty" protobuf:"bytes,1,opt,name=opaque"`
}
@@ -1267,9 +1355,11 @@ type OpaqueDeviceConfiguration struct {
// to decide whether it needs to validate them.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-long-name-caseless
Driver string `json:"driver" protobuf:"bytes,1,name=driver"`
// Parameters can contain arbitrary data. It is the responsibility of
@@ -1295,6 +1385,8 @@ type DeviceToleration struct {
// Must be a label name.
//
// +optional
+ // +k8s:optional
+ // +k8s:format=k8s-label-key
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
@@ -1333,6 +1425,7 @@ type DeviceToleration struct {
// A toleration operator is the set of operators that can be used in a toleration.
//
// +enum
+// +k8s:enum
type DeviceTolerationOperator string
const (
@@ -1346,6 +1439,8 @@ type ResourceClaimStatus struct {
// Allocation is set once the claim has been allocated successfully.
//
// +optional
+ // +k8s:optional
+ // +k8s:update=NoModify
Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,1,opt,name=allocation"`
// ReservedFor indicates which entities are currently allowed to use
@@ -1373,6 +1468,10 @@ type ResourceClaimStatus struct {
// +listMapKey=uid
// +patchStrategy=merge
// +patchMergeKey=uid
+ // +k8s:optional
+ // +k8s:listType=map
+ // +k8s:listMapKey=uid
+ // +k8s:maxItems=256
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"`
// DeallocationRequested is tombstoned since Kubernetes 1.32 where
@@ -1385,12 +1484,18 @@ type ResourceClaimStatus struct {
// information. Entries are owned by their respective drivers.
//
// +optional
+ // +k8s:optional
// +listType=map
// +listMapKey=driver
// +listMapKey=device
// +listMapKey=pool
// +listMapKey=shareID
// +featureGate=DRAResourceClaimDeviceStatus
+ // +k8s:listType=map
+ // +k8s:listMapKey=driver
+ // +k8s:listMapKey=device
+ // +k8s:listMapKey=pool
+ // +k8s:listMapKey=shareID
Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
}
@@ -1453,6 +1558,8 @@ type DeviceAllocationResult struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Results []DeviceRequestAllocationResult `json:"results,omitempty" protobuf:"bytes,1,opt,name=results"`
// This field is a combination of all the claim and class configuration parameters.
@@ -1465,6 +1572,8 @@ type DeviceAllocationResult struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=64
Config []DeviceAllocationConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
}
@@ -1490,9 +1599,11 @@ type DeviceRequestAllocationResult struct {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
+ // +k8s:format=k8s-long-name-caseless
+ // +k8s:required
Driver string `json:"driver" protobuf:"bytes,2,name=driver"`
// This name together with the driver name and the device name field
@@ -1502,6 +1613,8 @@ type DeviceRequestAllocationResult struct {
// DNS sub-domains separated by slashes.
//
// +required
+ // +k8s:required
+ // +k8s:format=k8s-resource-pool-name
Pool string `json:"pool" protobuf:"bytes,3,name=pool"`
// Device references one device instance via its name in the driver's
@@ -1544,6 +1657,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingConditions []string `json:"bindingConditions,omitempty" protobuf:"bytes,7,rep,name=bindingConditions"`
// BindingFailureConditions contains a copy of the BindingFailureConditions
@@ -1555,6 +1670,8 @@ type DeviceRequestAllocationResult struct {
// +optional
// +listType=atomic
// +featureGate=DRADeviceBindingConditions,DRAResourceClaimDeviceStatus
+ // +k8s:optional
+ // +k8s:maxItems=4
BindingFailureConditions []string `json:"bindingFailureConditions,omitempty" protobuf:"bytes,8,rep,name=bindingFailureConditions"`
// ShareID uniquely identifies an individual allocation share of the device,
@@ -1564,6 +1681,8 @@ type DeviceRequestAllocationResult struct {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
ShareID *types.UID `json:"shareID,omitempty" protobuf:"bytes,9,opt,name=shareID"`
// ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
@@ -1587,6 +1706,7 @@ type DeviceAllocationConfiguration struct {
// or from a claim.
//
// +required
+ // +k8s:required
Source AllocationConfigSource `json:"source" protobuf:"bytes,1,name=source"`
// Requests lists the names of requests where the configuration applies.
@@ -1598,17 +1718,23 @@ type DeviceAllocationConfiguration struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=32
Requests []string `json:"requests,omitempty" protobuf:"bytes,2,opt,name=requests"`
DeviceConfiguration `json:",inline" protobuf:"bytes,3,name=deviceConfiguration"`
}
+// +enum
+// +k8s:enum
type AllocationConfigSource string
// Valid [DeviceAllocationConfiguration.Source] values.
const (
- AllocationConfigSourceClass = "FromClass"
- AllocationConfigSourceClaim = "FromClaim"
+ AllocationConfigSourceClass AllocationConfigSource = "FromClass"
+ AllocationConfigSourceClaim AllocationConfigSource = "FromClaim"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -1641,6 +1767,8 @@ type DeviceClass struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
// +optional
+ // +k8s:subfield(name)=+k8s:optional
+ // +k8s:subfield(name)=+k8s:format=k8s-long-name
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines what can be allocated and how to configure it.
@@ -1661,6 +1789,8 @@ type DeviceClassSpec struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Selectors []DeviceSelector `json:"selectors,omitempty" protobuf:"bytes,1,opt,name=selectors"`
// Config defines configuration parameters that apply to each device that is claimed via this class.
@@ -1671,6 +1801,8 @@ type DeviceClassSpec struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:maxItems=32
Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"`
// SuitableNodes is tombstoned since Kubernetes 1.32 where
@@ -1690,6 +1822,8 @@ type DeviceClassSpec struct {
// This is an alpha field.
// +optional
// +featureGate=DRAExtendedResource
+ // +k8s:optional
+ // +k8s:format=k8s-extended-resource-name
ExtendedResourceName *string `json:"extendedResourceName,omitempty" protobuf:"bytes,4,opt,name=extendedResourceName"`
}
@@ -1791,7 +1925,7 @@ type AllocatedDeviceStatus struct {
// needed on a node.
//
// Must be a DNS subdomain and should end with a DNS domain owned by the
- // vendor of the driver.
+ // vendor of the driver. It should use only lower case characters.
//
// +required
Driver string `json:"driver" protobuf:"bytes,1,rep,name=driver"`
@@ -1815,6 +1949,8 @@ type AllocatedDeviceStatus struct {
//
// +optional
// +featureGate=DRAConsumableCapacity
+ // +k8s:optional
+ // +k8s:format=k8s-uuid
ShareID *string `json:"shareID,omitempty" protobuf:"bytes,7,opt,name=shareID"`
// Conditions contains the latest observation of the device's state.
@@ -1838,6 +1974,7 @@ type AllocatedDeviceStatus struct {
// NetworkData contains network-related information specific to the device.
//
// +optional
+ // +k8s:optional
NetworkData *NetworkDeviceData `json:"networkData,omitempty" protobuf:"bytes,6,opt,name=networkData"`
}
@@ -1852,6 +1989,8 @@ type NetworkDeviceData struct {
// Must not be longer than 256 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=256
InterfaceName string `json:"interfaceName,omitempty" protobuf:"bytes,1,opt,name=interfaceName"`
// IPs lists the network addresses assigned to the device's network interface.
@@ -1862,6 +2001,10 @@ type NetworkDeviceData struct {
//
// +optional
// +listType=atomic
+ // +k8s:optional
+ // +k8s:listType=atomic
+ // +k8s:unique=set
+ // +k8s:maxItems=16
IPs []string `json:"ips,omitempty" protobuf:"bytes,2,opt,name=ips"`
// HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
@@ -1869,5 +2012,7 @@ type NetworkDeviceData struct {
// Must not be longer than 128 characters.
//
// +optional
+ // +k8s:optional
+ // +k8s:maxLength=128
HardwareAddress string `json:"hardwareAddress,omitempty" protobuf:"bytes,3,opt,name=hardwareAddress"`
}
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go
index c390ad21..a086f9bf 100644
--- a/operator/vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1beta2
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_AllocatedDeviceStatus = map[string]string{
"": "AllocatedDeviceStatus contains the status of an allocated device, if the driver chooses to report it. This may include driver-specific information.\n\nThe combination of Driver, Pool, Device, and ShareID must match the corresponding key in Status.Allocation.Devices.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
"shareID": "ShareID uniquely identifies an individual allocation share of the device.",
@@ -103,9 +103,9 @@ func (Counter) SwaggerDoc() map[string]string {
}
var map_CounterSet = map[string]string{
- "": "CounterSet defines a named set of counters that are available to be used by devices defined in the ResourceSlice.\n\nThe counters are not allocatable by themselves, but can be referenced by devices. When a device is allocated, the portion of counters it uses will no longer be available for use by other devices.",
+ "": "CounterSet defines a named set of counters that are available to be used by devices defined in the ResourcePool.\n\nThe counters are not allocatable by themselves, but can be referenced by devices. When a device is allocated, the portion of counters it uses will no longer be available for use by other devices.",
"name": "Name defines the name of the counter set. It must be a DNS label.",
- "counters": "Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label.\n\nThe maximum number of counters in all sets is 32.",
+ "counters": "Counters defines the set of counters for this CounterSet The name of each counter must be unique in that set and must be a DNS label.\n\nThe maximum number of counters is 32.",
}
func (CounterSet) SwaggerDoc() map[string]string {
@@ -117,11 +117,11 @@ var map_Device = map[string]string{
"name": "Name is unique identifier among all devices managed by the driver in the pool. It must be a DNS label.",
"attributes": "Attributes defines the set of attributes for this device. The name of each attribute must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
"capacity": "Capacity defines the set of capacities for this device. The name of each capacity must be unique in that set.\n\nThe maximum number of attributes and capacities combined is 32.",
- "consumesCounters": "ConsumesCounters defines a list of references to sharedCounters and the set of counters that the device will consume from those counter sets.\n\nThere can only be a single entry per counterSet.\n\nThe total number of device counter consumption entries must be <= 32. In addition, the total number in the entire ResourceSlice must be <= 1024 (for example, 64 devices with 16 counters each).",
+ "consumesCounters": "ConsumesCounters defines a list of references to sharedCounters and the set of counters that the device will consume from those counter sets.\n\nThere can only be a single entry per counterSet.\n\nThe maximum number of device counter consumptions per device is 2.",
"nodeName": "NodeName identifies the node where the device is available.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
"nodeSelector": "NodeSelector defines the nodes where the device is available.\n\nMust use exactly one term.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
"allNodes": "AllNodes indicates that all nodes have access to the device.\n\nMust only be set if Spec.PerDeviceNodeSelection is set to true. At most one of NodeName, NodeSelector and AllNodes can be set.",
- "taints": "If specified, these are the driver-defined taints.\n\nThe maximum number of taints is 4.\n\nThis is an alpha field and requires enabling the DRADeviceTaints feature gate.",
+ "taints": "If specified, these are the driver-defined taints.\n\nThe maximum number of taints is 16. If taints are set for any device in a ResourceSlice, then the maximum number of allowed devices per ResourceSlice is 64 instead of 128.\n\nThis is an alpha field and requires enabling the DRADeviceTaints feature gate.",
"bindsToNode": "BindsToNode indicates if the usage of an allocation involving this device has to be limited to exactly the node that was chosen when allocating the claim. If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector to match the node where the allocation was made.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
"bindingConditions": "BindingConditions defines the conditions for proceeding with binding. All of these conditions must be set in the per-device status conditions with a value of True to proceed with binding the pod to the node while scheduling the pod.\n\nThe maximum number of binding conditions is 4.\n\nThe conditions must be a valid condition type string.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
"bindingFailureConditions": "BindingFailureConditions defines the conditions for binding failure. They may be set in the per-device status conditions. If any is set to \"True\", a binding failure occurred.\n\nThe maximum number of binding failure conditions is 4.\n\nThe conditions must be a valid condition type string.\n\nThis is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus feature gates.",
@@ -256,7 +256,7 @@ func (DeviceConstraint) SwaggerDoc() map[string]string {
var map_DeviceCounterConsumption = map[string]string{
"": "DeviceCounterConsumption defines a set of counters that a device will consume from a CounterSet.",
"counterSet": "CounterSet is the name of the set from which the counters defined will be consumed.",
- "counters": "Counters defines the counters that will be consumed by the device.\n\nThe maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each).",
+ "counters": "Counters defines the counters that will be consumed by the device.\n\nThe maximum number of counters is 32.",
}
func (DeviceCounterConsumption) SwaggerDoc() map[string]string {
@@ -277,7 +277,7 @@ func (DeviceRequest) SwaggerDoc() map[string]string {
var map_DeviceRequestAllocationResult = map[string]string{
"": "DeviceRequestAllocationResult contains the allocation result for one request.",
"request": "Request is the name of the request in the claim which caused this device to be allocated. If it references a subrequest in the firstAvailable list on a DeviceRequest, this field must include both the name of the main request and the subrequest using the format /.\n\nMultiple devices may have been allocated per request.",
- "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver specifies the name of the DRA driver whose kubelet plugin should be invoked to process the allocation once the claim is needed on a node.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"pool": "This name together with the driver name and the device name field identify which device was allocated (`//`).\n\nMust not be longer than 253 characters and may contain one or more DNS sub-domains separated by slashes.",
"device": "Device references one device instance via its name in the driver's resource pool. It must be a DNS label.",
"adminAccess": "AdminAccess indicates that this device was allocated for administrative access. See the corresponding request field for a definition of mode.\n\nThis is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled.",
@@ -320,7 +320,7 @@ var map_DeviceTaint = map[string]string{
"": "The device this taint is attached to has the \"effect\" on any claim which does not tolerate the taint and, through the claim, to pods using the claim.",
"key": "The taint key to be applied to a device. Must be a label name.",
"value": "The taint value corresponding to the taint key. Must be a label value.",
- "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them. Valid effects are NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here.",
+ "effect": "The effect of the taint on claims that do not tolerate the taint and through such claims on the pods using them.\n\nValid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for nodes is not valid here. More effects may get added in the future. Consumers must treat unknown effects like None.",
"timeAdded": "TimeAdded represents the time at which the taint was added. Added automatically during create or update if not set.",
}
@@ -369,7 +369,7 @@ func (NetworkDeviceData) SwaggerDoc() map[string]string {
var map_OpaqueDeviceConfiguration = map[string]string{
"": "OpaqueDeviceConfiguration contains configuration parameters for a driver in a format defined by the driver vendor.",
- "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.",
+ "driver": "Driver is used to determine which kubelet plugin needs to be passed these configuration parameters.\n\nAn admission policy provided by the driver developer could use this to decide whether it needs to validate them.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters.",
"parameters": "Parameters can contain arbitrary data. It is the responsibility of the driver developer to handle validation and versioning. Typically this includes self-identification and a version (\"kind\" + \"apiVersion\" for Kubernetes types), with conversion between different versions.\n\nThe length of the raw data must be smaller or equal to 10 Ki.",
}
@@ -493,14 +493,14 @@ func (ResourceSliceList) SwaggerDoc() map[string]string {
var map_ResourceSliceSpec = map[string]string{
"": "ResourceSliceSpec contains the information published by the driver in one ResourceSlice.",
- "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. This field is immutable.",
+ "driver": "Driver identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver. It should use only lower case characters. This field is immutable.",
"pool": "Pool describes the pool that this ResourceSlice belongs to.",
"nodeName": "NodeName identifies the node which provides the resources in this pool. A field selector can be used to list only ResourceSlice objects belonging to a certain node.\n\nThis field can be used to limit access from nodes to ResourceSlices with the same node name. It also indicates to autoscalers that adding new nodes of the same type as some old node might also make new resources available.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set. This field is immutable.",
"nodeSelector": "NodeSelector defines which nodes have access to the resources in the pool, when that pool is not limited to a single node.\n\nMust use exactly one term.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
"allNodes": "AllNodes indicates that all nodes have access to the resources in the pool.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
- "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries.",
+ "devices": "Devices lists some or all of the devices in this pool.\n\nMust not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.\n\nOnly one of Devices and SharedCounters can be set in a ResourceSlice.",
"perDeviceNodeSelection": "PerDeviceNodeSelection defines whether the access from nodes to resources in the pool is set on the ResourceSlice level or on each device. If it is set to true, every device defined the ResourceSlice must specify this individually.\n\nExactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.",
- "sharedCounters": "SharedCounters defines a list of counter sets, each of which has a name and a list of counters available.\n\nThe names of the SharedCounters must be unique in the ResourceSlice.\n\nThe maximum number of counters in all sets is 32.",
+ "sharedCounters": "SharedCounters defines a list of counter sets, each of which has a name and a list of counters available.\n\nThe names of the counter sets must be unique in the ResourcePool.\n\nOnly one of Devices and SharedCounters can be set in a ResourceSlice.\n\nThe maximum number of counter sets is 8.",
}
func (ResourceSliceSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/resource/v1beta2/zz_generated.model_name.go b/operator/vendor/k8s.io/api/resource/v1beta2/zz_generated.model_name.go
new file mode 100644
index 00000000..0518ff73
--- /dev/null
+++ b/operator/vendor/k8s.io/api/resource/v1beta2/zz_generated.model_name.go
@@ -0,0 +1,237 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta2
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AllocatedDeviceStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.AllocatedDeviceStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in AllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.AllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CELDeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.CELDeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequestPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.CapacityRequestPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequestPolicyRange) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.CapacityRequestPolicyRange"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CapacityRequirements) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.CapacityRequirements"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Counter) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.Counter"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CounterSet) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.CounterSet"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Device) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.Device"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAllocationConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceAllocationConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceAllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceAttribute) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceAttribute"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceCapacity) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceCapacity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClaimConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceClaimConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClass) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceClassConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceClassSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceClassSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceConstraint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceConstraint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceCounterConsumption) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceCounterConsumption"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceRequestAllocationResult) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceRequestAllocationResult"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSelector) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceSubRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceSubRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceTaint) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceTaint"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeviceToleration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.DeviceToleration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExactDeviceRequest) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ExactDeviceRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NetworkDeviceData) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.NetworkDeviceData"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in OpaqueDeviceConfiguration) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.OpaqueDeviceConfiguration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaim) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaim"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimConsumerReference) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimConsumerReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimStatus) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplate) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimTemplate"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplateList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimTemplateList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceClaimTemplateSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceClaimTemplateSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourcePool) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourcePool"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSlice) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceSlice"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSliceList) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceSliceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ResourceSliceSpec) OpenAPIModelName() string {
+ return "io.k8s.api.resource.v1beta2.ResourceSliceSpec"
+}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1/doc.go b/operator/vendor/k8s.io/api/scheduling/v1/doc.go
index c587bee9..8fee2e90 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1/doc.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.scheduling.v1
+
// +groupName=scheduling.k8s.io
package v1
diff --git a/operator/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/operator/vendor/k8s.io/api/scheduling/v1/generated.pb.go
index 6fef1a93..752e559e 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1/generated.pb.go
@@ -24,125 +24,16 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *PriorityClass) Reset() { *m = PriorityClass{} }
-func (m *PriorityClass) Reset() { *m = PriorityClass{} }
-func (*PriorityClass) ProtoMessage() {}
-func (*PriorityClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_3f12bd05064e996e, []int{0}
-}
-func (m *PriorityClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityClass.Merge(m, src)
-}
-func (m *PriorityClass) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityClass) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityClass proto.InternalMessageInfo
-
-func (m *PriorityClassList) Reset() { *m = PriorityClassList{} }
-func (*PriorityClassList) ProtoMessage() {}
-func (*PriorityClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_3f12bd05064e996e, []int{1}
-}
-func (m *PriorityClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityClassList.Merge(m, src)
-}
-func (m *PriorityClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass")
- proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_3f12bd05064e996e)
-}
-
-var fileDescriptor_3f12bd05064e996e = []byte{
- // 476 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30,
- 0x18, 0xc6, 0xe3, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x46, 0xbd, 0x81,
- 0x2c, 0xd8, 0xf4, 0x04, 0x08, 0xe9, 0x24, 0x86, 0x70, 0x12, 0x42, 0x3a, 0x44, 0x95, 0x81, 0x01,
- 0x31, 0xe0, 0xa6, 0x3e, 0xd7, 0x34, 0x89, 0x23, 0xdb, 0xa9, 0xd4, 0x8d, 0x8f, 0xc0, 0x37, 0x62,
- 0xed, 0x78, 0xe3, 0x4d, 0x15, 0x0d, 0x1f, 0x81, 0x8d, 0x09, 0x25, 0x2d, 0x97, 0xfe, 0xb9, 0x0a,
- 0xb6, 0xbc, 0xef, 0xfb, 0xfc, 0x1e, 0xdb, 0x4f, 0x6c, 0xe8, 0x4f, 0x5f, 0x6a, 0x2c, 0x24, 0xa1,
- 0x99, 0x20, 0x3a, 0x9a, 0xb0, 0x71, 0x1e, 0x8b, 0x94, 0x93, 0xd9, 0x80, 0x70, 0x96, 0x32, 0x45,
- 0x0d, 0x1b, 0xe3, 0x4c, 0x49, 0x23, 0x1d, 0x77, 0xad, 0xc4, 0x34, 0x13, 0xb8, 0x56, 0xe2, 0xd9,
- 0xa0, 0xfb, 0x84, 0x0b, 0x33, 0xc9, 0x47, 0x38, 0x92, 0x09, 0xe1, 0x92, 0x4b, 0x52, 0x01, 0xa3,
- 0xfc, 0xaa, 0xaa, 0xaa, 0xa2, 0xfa, 0x5a, 0x1b, 0x75, 0xfb, 0x5b, 0x4b, 0x46, 0x52, 0xb1, 0x3b,
- 0x16, 0xeb, 0x3e, 0xab, 0x35, 0x09, 0x8d, 0x26, 0x22, 0x65, 0x6a, 0x4e, 0xb2, 0x29, 0x2f, 0x1b,
- 0x9a, 0x24, 0xcc, 0xd0, 0xbb, 0x28, 0x72, 0x8c, 0x52, 0x79, 0x6a, 0x44, 0xc2, 0x0e, 0x80, 0x17,
- 0xff, 0x02, 0xca, 0x83, 0x26, 0x74, 0x9f, 0xeb, 0xff, 0x6a, 0xc0, 0xf6, 0x50, 0x09, 0xa9, 0x84,
- 0x99, 0xbf, 0x8e, 0xa9, 0xd6, 0xce, 0x67, 0xd8, 0x2c, 0x77, 0x35, 0xa6, 0x86, 0xba, 0xc0, 0x03,
- 0x7e, 0xeb, 0xec, 0x29, 0xae, 0x03, 0xbb, 0x35, 0xc7, 0xd9, 0x94, 0x97, 0x0d, 0x8d, 0x4b, 0x35,
- 0x9e, 0x0d, 0xf0, 0xfb, 0xd1, 0x17, 0x16, 0x99, 0x77, 0xcc, 0xd0, 0xc0, 0x59, 0x2c, 0x7b, 0x56,
- 0xb1, 0xec, 0xc1, 0xba, 0x17, 0xde, 0xba, 0x3a, 0xa7, 0xd0, 0x9e, 0xd1, 0x38, 0x67, 0x6e, 0xc3,
- 0x03, 0xbe, 0x1d, 0xb4, 0x37, 0x62, 0xfb, 0x43, 0xd9, 0x0c, 0xd7, 0x33, 0xe7, 0x1c, 0xb6, 0x79,
- 0x2c, 0x47, 0x34, 0xbe, 0x60, 0x57, 0x34, 0x8f, 0x8d, 0x7b, 0xe2, 0x01, 0xbf, 0x19, 0x3c, 0xda,
- 0x88, 0xdb, 0x6f, 0xb6, 0x87, 0xe1, 0xae, 0xd6, 0x79, 0x0e, 0x5b, 0x63, 0xa6, 0x23, 0x25, 0x32,
- 0x23, 0x64, 0xea, 0xde, 0xf3, 0x80, 0x7f, 0x3f, 0x78, 0xb8, 0x41, 0x5b, 0x17, 0xf5, 0x28, 0xdc,
- 0xd6, 0x39, 0x1c, 0x76, 0x32, 0xc5, 0x58, 0x52, 0x55, 0x43, 0x19, 0x8b, 0x68, 0xee, 0xda, 0x15,
- 0x7b, 0x5e, 0x2c, 0x7b, 0x9d, 0xe1, 0xde, 0xec, 0xf7, 0xb2, 0x77, 0x7a, 0x78, 0x03, 0xf0, 0xbe,
- 0x2c, 0x3c, 0x30, 0xed, 0x7f, 0x07, 0xf0, 0xc1, 0x4e, 0xea, 0x97, 0x42, 0x1b, 0xe7, 0xd3, 0x41,
- 0xf2, 0xf8, 0xff, 0x92, 0x2f, 0xe9, 0x2a, 0xf7, 0xce, 0xe6, 0x88, 0xcd, 0xbf, 0x9d, 0xad, 0xd4,
- 0x2f, 0xa1, 0x2d, 0x0c, 0x4b, 0xb4, 0xdb, 0xf0, 0x4e, 0xfc, 0xd6, 0xd9, 0x63, 0x7c, 0xec, 0x15,
- 0xe0, 0x9d, 0x9d, 0xd5, 0xbf, 0xe7, 0x6d, 0x49, 0x87, 0x6b, 0x93, 0xe0, 0xd5, 0x62, 0x85, 0xac,
- 0xeb, 0x15, 0xb2, 0x6e, 0x56, 0xc8, 0xfa, 0x5a, 0x20, 0xb0, 0x28, 0x10, 0xb8, 0x2e, 0x10, 0xb8,
- 0x29, 0x10, 0xf8, 0x51, 0x20, 0xf0, 0xed, 0x27, 0xb2, 0x3e, 0xba, 0xc7, 0xde, 0xe4, 0x9f, 0x00,
- 0x00, 0x00, 0xff, 0xff, 0x9a, 0x3d, 0x5f, 0x2e, 0xae, 0x03, 0x00, 0x00,
-}
+func (m *PriorityClassList) Reset() { *m = PriorityClassList{} }
func (m *PriorityClass) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/scheduling/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/scheduling/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..261d4903
--- /dev/null
+++ b/operator/vendor/k8s.io/api/scheduling/v1/generated.protomessage.pb.go
@@ -0,0 +1,26 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*PriorityClass) ProtoMessage() {}
+
+func (*PriorityClassList) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/scheduling/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..a5cc1d11
--- /dev/null
+++ b/operator/vendor/k8s.io/api/scheduling/v1/zz_generated.model_name.go
@@ -0,0 +1,32 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityClass) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1.PriorityClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityClassList) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1.PriorityClassList"
+}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/doc.go
index 476ab6f6..2b6182e8 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.api.scheduling.v1alpha1
// +groupName=scheduling.k8s.io
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
index 83e504b5..64c39b4c 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
@@ -24,124 +24,165 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *BasicSchedulingPolicy) Reset() { *m = BasicSchedulingPolicy{} }
+
+func (m *GangSchedulingPolicy) Reset() { *m = GangSchedulingPolicy{} }
+
+func (m *PodGroup) Reset() { *m = PodGroup{} }
+
+func (m *PodGroupPolicy) Reset() { *m = PodGroupPolicy{} }
+
+func (m *PriorityClass) Reset() { *m = PriorityClass{} }
+
+func (m *PriorityClassList) Reset() { *m = PriorityClassList{} }
+
+func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
+
+func (m *Workload) Reset() { *m = Workload{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *WorkloadList) Reset() { *m = WorkloadList{} }
+
+func (m *WorkloadSpec) Reset() { *m = WorkloadSpec{} }
+
+func (m *BasicSchedulingPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
-func (m *PriorityClass) Reset() { *m = PriorityClass{} }
-func (*PriorityClass) ProtoMessage() {}
-func (*PriorityClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_260442fbb28d876a, []int{0}
+func (m *BasicSchedulingPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PriorityClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (m *BasicSchedulingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ return len(dAtA) - i, nil
}
-func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+
+func (m *GangSchedulingPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *PriorityClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityClass.Merge(m, src)
+ return dAtA[:n], nil
}
-func (m *PriorityClass) XXX_Size() int {
- return m.Size()
+
+func (m *GangSchedulingPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PriorityClass) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityClass.DiscardUnknown(m)
+
+func (m *GangSchedulingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinCount))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
}
-var xxx_messageInfo_PriorityClass proto.InternalMessageInfo
+func (m *PodGroup) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
-func (m *PriorityClassList) Reset() { *m = PriorityClassList{} }
-func (*PriorityClassList) ProtoMessage() {}
-func (*PriorityClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_260442fbb28d876a, []int{1}
+func (m *PodGroup) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *PriorityClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
+
+func (m *PodGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
-func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
+
+func (m *PodGroupPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
- return b[:n], nil
-}
-func (m *PriorityClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityClassList.Merge(m, src)
-}
-func (m *PriorityClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass")
- proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_260442fbb28d876a)
-}
-
-var fileDescriptor_260442fbb28d876a = []byte{
- // 480 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x30,
- 0x18, 0x86, 0xeb, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xea, 0xe0, 0x46, 0xbd, 0x25,
- 0xcb, 0xd9, 0xf4, 0x04, 0x08, 0xe9, 0xb6, 0x50, 0x09, 0x21, 0x81, 0xa8, 0x32, 0x30, 0x20, 0x06,
- 0xdc, 0xd4, 0xe7, 0x9a, 0x26, 0x71, 0x64, 0x3b, 0x95, 0xba, 0xf1, 0x13, 0xf8, 0x53, 0x48, 0x1d,
- 0x6f, 0xbc, 0xa9, 0xa2, 0xe1, 0x27, 0xb0, 0x31, 0xa1, 0xa4, 0xbd, 0x4b, 0xdb, 0xc0, 0x71, 0x5b,
- 0xbe, 0xef, 0x7b, 0xde, 0xd7, 0xf6, 0x1b, 0x1b, 0xe2, 0xf9, 0x4b, 0x8d, 0x85, 0x24, 0x34, 0x15,
- 0x44, 0x87, 0x33, 0x36, 0xcd, 0x22, 0x91, 0x70, 0xb2, 0x18, 0xd2, 0x28, 0x9d, 0xd1, 0x21, 0xe1,
- 0x2c, 0x61, 0x8a, 0x1a, 0x36, 0xc5, 0xa9, 0x92, 0x46, 0xda, 0x68, 0xcb, 0x63, 0x9a, 0x0a, 0x5c,
- 0xf1, 0xf8, 0x86, 0xef, 0x9d, 0x71, 0x61, 0x66, 0xd9, 0x04, 0x87, 0x32, 0x26, 0x5c, 0x72, 0x49,
- 0x4a, 0xd9, 0x24, 0xbb, 0x2c, 0xab, 0xb2, 0x28, 0xbf, 0xb6, 0x76, 0xbd, 0xc1, 0xde, 0xf2, 0xa1,
- 0x54, 0x8c, 0x2c, 0x6a, 0x4b, 0xf6, 0x9e, 0x55, 0x4c, 0x4c, 0xc3, 0x99, 0x48, 0x98, 0x5a, 0x92,
- 0x74, 0xce, 0x8b, 0x86, 0x26, 0x31, 0x33, 0xf4, 0x6f, 0x2a, 0xf2, 0x2f, 0x95, 0xca, 0x12, 0x23,
- 0x62, 0x56, 0x13, 0xbc, 0xf8, 0x9f, 0xa0, 0x38, 0x6e, 0x4c, 0x8f, 0x75, 0x83, 0x5f, 0x4d, 0xd8,
- 0x19, 0x2b, 0x21, 0x95, 0x30, 0xcb, 0x57, 0x11, 0xd5, 0xda, 0xfe, 0x0c, 0x5b, 0xc5, 0xae, 0xa6,
- 0xd4, 0x50, 0x07, 0xb8, 0xc0, 0x6b, 0x9f, 0x3f, 0xc5, 0x55, 0x6c, 0xb7, 0xe6, 0x38, 0x9d, 0xf3,
- 0xa2, 0xa1, 0x71, 0x41, 0xe3, 0xc5, 0x10, 0xbf, 0x9f, 0x7c, 0x61, 0xa1, 0x79, 0xc7, 0x0c, 0xf5,
- 0xed, 0xd5, 0xba, 0xdf, 0xc8, 0xd7, 0x7d, 0x58, 0xf5, 0x82, 0x5b, 0x57, 0xfb, 0x14, 0x5a, 0x0b,
- 0x1a, 0x65, 0xcc, 0x69, 0xba, 0xc0, 0xb3, 0xfc, 0xce, 0x0e, 0xb6, 0x3e, 0x14, 0xcd, 0x60, 0x3b,
- 0xb3, 0x2f, 0x60, 0x87, 0x47, 0x72, 0x42, 0xa3, 0x11, 0xbb, 0xa4, 0x59, 0x64, 0x9c, 0x13, 0x17,
- 0x78, 0x2d, 0xff, 0xc9, 0x0e, 0xee, 0xbc, 0xde, 0x1f, 0x06, 0x87, 0xac, 0xfd, 0x1c, 0xb6, 0xa7,
- 0x4c, 0x87, 0x4a, 0xa4, 0x46, 0xc8, 0xc4, 0x79, 0xe0, 0x02, 0xef, 0xa1, 0xff, 0x78, 0x27, 0x6d,
- 0x8f, 0xaa, 0x51, 0xb0, 0xcf, 0xd9, 0x1c, 0x76, 0x53, 0xc5, 0x58, 0x5c, 0x56, 0x63, 0x19, 0x89,
- 0x70, 0xe9, 0x58, 0xa5, 0xf6, 0x22, 0x5f, 0xf7, 0xbb, 0xe3, 0xa3, 0xd9, 0xef, 0x75, 0xff, 0xb4,
- 0x7e, 0x03, 0xf0, 0x31, 0x16, 0xd4, 0x4c, 0x07, 0xdf, 0x01, 0x7c, 0x74, 0x90, 0xfa, 0x5b, 0xa1,
- 0x8d, 0xfd, 0xa9, 0x96, 0x3c, 0xbe, 0x5f, 0xf2, 0x85, 0xba, 0xcc, 0xbd, 0xbb, 0x3b, 0x62, 0xeb,
- 0xa6, 0xb3, 0x97, 0x7a, 0x00, 0x2d, 0x61, 0x58, 0xac, 0x9d, 0xa6, 0x7b, 0xe2, 0xb5, 0xcf, 0xcf,
- 0xf0, 0xdd, 0x6f, 0x01, 0x1f, 0xec, 0xaf, 0xfa, 0x49, 0x6f, 0x0a, 0x8f, 0x60, 0x6b, 0xe5, 0x8f,
- 0x56, 0x1b, 0xd4, 0xb8, 0xda, 0xa0, 0xc6, 0xf5, 0x06, 0x35, 0xbe, 0xe6, 0x08, 0xac, 0x72, 0x04,
- 0xae, 0x72, 0x04, 0xae, 0x73, 0x04, 0x7e, 0xe4, 0x08, 0x7c, 0xfb, 0x89, 0x1a, 0x1f, 0xd1, 0xdd,
- 0xaf, 0xf4, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xfe, 0x45, 0x7e, 0xc6, 0x03, 0x00, 0x00,
+ return dAtA[:n], nil
+}
+
+func (m *PodGroupPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodGroupPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Gang != nil {
+ {
+ size, err := m.Gang.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Basic != nil {
+ {
+ size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
}
func (m *PriorityClass) Marshal() (dAtA []byte, err error) {
@@ -247,6 +288,183 @@ func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.APIGroup)
+ copy(dAtA[i:], m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Workload) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Workload) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Workload) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WorkloadList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkloadList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkloadList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *WorkloadSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkloadSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WorkloadSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.PodGroups) > 0 {
+ for iNdEx := len(m.PodGroups) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.PodGroups[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.ControllerRef != nil {
+ {
+ size, err := m.ControllerRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
@@ -258,39 +476,152 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
-func (m *PriorityClass) Size() (n int) {
+func (m *BasicSchedulingPolicy) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- n += 1 + sovGenerated(uint64(m.Value))
- n += 2
- l = len(m.Description)
- n += 1 + l + sovGenerated(uint64(l))
- if m.PreemptionPolicy != nil {
- l = len(*m.PreemptionPolicy)
- n += 1 + l + sovGenerated(uint64(l))
- }
return n
}
-func (m *PriorityClassList) Size() (n int) {
+func (m *GangSchedulingPolicy) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
+ n += 1 + sovGenerated(uint64(m.MinCount))
+ return n
+}
+
+func (m *PodGroup) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Policy.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *PodGroupPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Basic != nil {
+ l = m.Basic.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Gang != nil {
+ l = m.Gang.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PriorityClass) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Value))
+ n += 2
+ l = len(m.Description)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.PreemptionPolicy != nil {
+ l = len(*m.PreemptionPolicy)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *PriorityClassList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *TypedLocalObjectReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *Workload) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *WorkloadList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *WorkloadSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ControllerRef != nil {
+ l = m.ControllerRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.PodGroups) > 0 {
+ for _, e := range m.PodGroups {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
return n
}
@@ -300,6 +631,47 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *BasicSchedulingPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BasicSchedulingPolicy{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *GangSchedulingPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&GangSchedulingPolicy{`,
+ `MinCount:` + fmt.Sprintf("%v", this.MinCount) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodGroup) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodGroup{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "PodGroupPolicy", "PodGroupPolicy", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *PodGroupPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&PodGroupPolicy{`,
+ `Basic:` + strings.Replace(this.Basic.String(), "BasicSchedulingPolicy", "BasicSchedulingPolicy", 1) + `,`,
+ `Gang:` + strings.Replace(this.Gang.String(), "GangSchedulingPolicy", "GangSchedulingPolicy", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *PriorityClass) String() string {
if this == nil {
return "nil"
@@ -330,15 +702,730 @@ func (this *PriorityClassList) String() string {
}, "")
return s
}
+func (this *TypedLocalObjectReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&TypedLocalObjectReference{`,
+ `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Workload) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Workload{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkloadSpec", "WorkloadSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WorkloadList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Workload{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Workload", "Workload", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&WorkloadList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WorkloadSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPodGroups := "[]PodGroup{"
+ for _, f := range this.PodGroups {
+ repeatedStringForPodGroups += strings.Replace(strings.Replace(f.String(), "PodGroup", "PodGroup", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPodGroups += "}"
+ s := strings.Join([]string{`&WorkloadSpec{`,
+ `ControllerRef:` + strings.Replace(this.ControllerRef.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`,
+ `PodGroups:` + repeatedStringForPodGroups + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *BasicSchedulingPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BasicSchedulingPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BasicSchedulingPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *GangSchedulingPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: GangSchedulingPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: GangSchedulingPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinCount", wireType)
+ }
+ m.MinCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinCount |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodGroup) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodGroup: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodGroup: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PodGroupPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PodGroupPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PodGroupPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Basic == nil {
+ m.Basic = &BasicSchedulingPolicy{}
+ }
+ if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Gang", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Gang == nil {
+ m.Gang = &GangSchedulingPolicy{}
+ }
+ if err := m.Gang.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PriorityClass) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ m.Value = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Value |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.GlobalDefault = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex])
+ m.PreemptionPolicy = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, PriorityClass{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
}
-func (m *PriorityClass) Unmarshal(dAtA []byte) error {
+func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -361,17 +1448,17 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group")
+ return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -381,30 +1468,29 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.APIGroup = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
}
- m.Value = 0
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -414,16 +1500,29 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Value |= int32(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
- var v int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -433,17 +1532,79 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.GlobalDefault = bool(v != 0)
- case 4:
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Workload) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Workload: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Workload: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -453,29 +1614,30 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Description = string(dAtA[iNdEx:postIndex])
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 5:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
@@ -485,24 +1647,24 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGenerated
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex])
- m.PreemptionPolicy = &s
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -525,7 +1687,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
+func (m *WorkloadList) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -548,10 +1710,10 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group")
+ return fmt.Errorf("proto: WorkloadList: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: WorkloadList: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
@@ -616,7 +1778,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Items = append(m.Items, PriorityClass{})
+ m.Items = append(m.Items, Workload{})
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -642,6 +1804,126 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *WorkloadSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkloadSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkloadSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ControllerRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ControllerRef == nil {
+ m.ControllerRef = &TypedLocalObjectReference{}
+ }
+ if err := m.ControllerRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodGroups", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PodGroups = append(m.PodGroups, PodGroup{})
+ if err := m.PodGroups[len(m.PodGroups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
index e42dccc6..6014f60e 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
@@ -29,6 +29,52 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "k8s.io/api/scheduling/v1alpha1";
+// BasicSchedulingPolicy indicates that standard Kubernetes
+// scheduling behavior should be used.
+message BasicSchedulingPolicy {
+}
+
+// GangSchedulingPolicy defines the parameters for gang scheduling.
+message GangSchedulingPolicy {
+ // MinCount is the minimum number of pods that must be schedulable or scheduled
+ // at the same time for the scheduler to admit the entire group.
+ // It must be a positive integer.
+ //
+ // +required
+ optional int32 minCount = 1;
+}
+
+// PodGroup represents a set of pods with a common scheduling policy.
+message PodGroup {
+ // Name is a unique identifier for the PodGroup within the Workload.
+ // It must be a DNS label. This field is immutable.
+ //
+ // +required
+ optional string name = 1;
+
+ // Policy defines the scheduling policy for this PodGroup.
+ //
+ // +required
+ optional PodGroupPolicy policy = 3;
+}
+
+// PodGroupPolicy defines the scheduling configuration for a PodGroup.
+message PodGroupPolicy {
+ // Basic specifies that the pods in this group should be scheduled using
+ // standard Kubernetes scheduling behavior.
+ //
+ // +optional
+ // +oneOf=PolicySelection
+ optional BasicSchedulingPolicy basic = 2;
+
+ // Gang specifies that the pods in this group should be scheduled using
+ // all-or-nothing semantics.
+ //
+ // +optional
+ // +oneOf=PolicySelection
+ optional GangSchedulingPolicy gang = 3;
+}
+
// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass.
// PriorityClass defines mapping from a priority class name to the priority
// integer value. The value can be any valid integer.
@@ -73,3 +119,72 @@ message PriorityClassList {
repeated PriorityClass items = 2;
}
+// TypedLocalObjectReference allows to reference typed object inside the same namespace.
+message TypedLocalObjectReference {
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is empty, the specified Kind must be in the core API group.
+ // For any other third-party types, setting APIGroup is required.
+ // It must be a DNS subdomain.
+ //
+ // +optional
+ optional string apiGroup = 1;
+
+ // Kind is the type of resource being referenced.
+ // It must be a path segment name.
+ //
+ // +required
+ optional string kind = 2;
+
+ // Name is the name of resource being referenced.
+ // It must be a path segment name.
+ //
+ // +required
+ optional string name = 3;
+}
+
+// Workload allows for expressing scheduling constraints that should be used
+// when managing lifecycle of workloads from scheduling perspective,
+// including scheduling, preemption, eviction and other phases.
+message Workload {
+ // Standard object's metadata.
+ // Name must be a DNS subdomain.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // Spec defines the desired behavior of a Workload.
+ //
+ // +required
+ optional WorkloadSpec spec = 2;
+}
+
+// WorkloadList contains a list of Workload resources.
+message WorkloadList {
+ // Standard list metadata.
+ //
+ // +optional
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+ // Items is the list of Workloads.
+ repeated Workload items = 2;
+}
+
+// WorkloadSpec defines the desired state of a Workload.
+message WorkloadSpec {
+ // ControllerRef is an optional reference to the controlling object, such as a
+ // Deployment or Job. This field is intended for use by tools like CLIs
+ // to provide a link back to the original workload definition.
+ // When set, it cannot be changed.
+ //
+ // +optional
+ optional TypedLocalObjectReference controllerRef = 1;
+
+ // PodGroups is the list of pod groups that make up the Workload.
+ // The maximum number of pod groups is 8. This field is immutable.
+ //
+ // +required
+ // +listType=map
+ // +listMapKey=name
+ repeated PodGroup podGroups = 2;
+}
+
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..a92e57b1
--- /dev/null
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,42 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*BasicSchedulingPolicy) ProtoMessage() {}
+
+func (*GangSchedulingPolicy) ProtoMessage() {}
+
+func (*PodGroup) ProtoMessage() {}
+
+func (*PodGroupPolicy) ProtoMessage() {}
+
+func (*PriorityClass) ProtoMessage() {}
+
+func (*PriorityClassList) ProtoMessage() {}
+
+func (*TypedLocalObjectReference) ProtoMessage() {}
+
+func (*Workload) ProtoMessage() {}
+
+func (*WorkloadList) ProtoMessage() {}
+
+func (*WorkloadSpec) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/register.go
index 24689f0a..25de55d3 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/register.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/register.go
@@ -46,6 +46,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PriorityClass{},
&PriorityClassList{},
+ &Workload{},
+ &WorkloadList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/types.go
index 26ba8ff5..480b53da 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/types.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/types.go
@@ -72,3 +72,130 @@ type PriorityClassList struct {
// items is the list of PriorityClasses
Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Workload allows for expressing scheduling constraints that should be used
+// when managing lifecycle of workloads from scheduling perspective,
+// including scheduling, preemption, eviction and other phases.
+type Workload struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // Name must be a DNS subdomain.
+ //
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Spec defines the desired behavior of a Workload.
+ //
+ // +required
+ Spec WorkloadSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// WorkloadList contains a list of Workload resources.
+type WorkloadList struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ //
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // Items is the list of Workloads.
+ Items []Workload `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// WorkloadMaxPodGroups is the maximum number of pod groups per Workload.
+const WorkloadMaxPodGroups = 8
+
+// WorkloadSpec defines the desired state of a Workload.
+type WorkloadSpec struct {
+ // ControllerRef is an optional reference to the controlling object, such as a
+ // Deployment or Job. This field is intended for use by tools like CLIs
+ // to provide a link back to the original workload definition.
+ // When set, it cannot be changed.
+ //
+ // +optional
+ ControllerRef *TypedLocalObjectReference `json:"controllerRef,omitempty" protobuf:"bytes,1,opt,name=controllerRef"`
+
+ // PodGroups is the list of pod groups that make up the Workload.
+ // The maximum number of pod groups is 8. This field is immutable.
+ //
+ // +required
+ // +listType=map
+ // +listMapKey=name
+ PodGroups []PodGroup `json:"podGroups" protobuf:"bytes,2,rep,name=podGroups"`
+}
+
+// TypedLocalObjectReference allows to reference typed object inside the same namespace.
+type TypedLocalObjectReference struct {
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is empty, the specified Kind must be in the core API group.
+ // For any other third-party types, setting APIGroup is required.
+ // It must be a DNS subdomain.
+ //
+ // +optional
+ APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
+ // Kind is the type of resource being referenced.
+ // It must be a path segment name.
+ //
+ // +required
+ Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+ // Name is the name of resource being referenced.
+ // It must be a path segment name.
+ //
+ // +required
+ Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// PodGroup represents a set of pods with a common scheduling policy.
+type PodGroup struct {
+ // Name is a unique identifier for the PodGroup within the Workload.
+ // It must be a DNS label. This field is immutable.
+ //
+ // +required
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // Policy defines the scheduling policy for this PodGroup.
+ //
+ // +required
+ Policy PodGroupPolicy `json:"policy" protobuf:"bytes,3,opt,name=policy"`
+}
+
+// PodGroupPolicy defines the scheduling configuration for a PodGroup.
+type PodGroupPolicy struct {
+ // Basic specifies that the pods in this group should be scheduled using
+ // standard Kubernetes scheduling behavior.
+ //
+ // +optional
+ // +oneOf=PolicySelection
+ Basic *BasicSchedulingPolicy `json:"basic,omitempty" protobuf:"bytes,2,opt,name=basic"`
+
+ // Gang specifies that the pods in this group should be scheduled using
+ // all-or-nothing semantics.
+ //
+ // +optional
+ // +oneOf=PolicySelection
+ Gang *GangSchedulingPolicy `json:"gang,omitempty" protobuf:"bytes,3,opt,name=gang"`
+}
+
+// BasicSchedulingPolicy indicates that standard Kubernetes
+// scheduling behavior should be used.
+type BasicSchedulingPolicy struct {
+ // This is intentionally empty. Its presence indicates that the basic
+ // scheduling policy should be applied. In the future, new fields may appear,
+ // describing such constraints on a pod group level without "all or nothing"
+ // (gang) scheduling.
+}
+
+// GangSchedulingPolicy defines the parameters for gang scheduling.
+type GangSchedulingPolicy struct {
+ // MinCount is the minimum number of pods that must be schedulable or scheduled
+ // at the same time for the scheduler to admit the entire group.
+ // It must be a positive integer.
+ //
+ // +required
+ MinCount int32 `json:"minCount" protobuf:"varint,1,opt,name=minCount"`
+}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go
index 557005db..a2915bff 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go
@@ -27,6 +27,43 @@ package v1alpha1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_BasicSchedulingPolicy = map[string]string{
+ "": "BasicSchedulingPolicy indicates that standard Kubernetes scheduling behavior should be used.",
+}
+
+func (BasicSchedulingPolicy) SwaggerDoc() map[string]string {
+ return map_BasicSchedulingPolicy
+}
+
+var map_GangSchedulingPolicy = map[string]string{
+ "": "GangSchedulingPolicy defines the parameters for gang scheduling.",
+ "minCount": "MinCount is the minimum number of pods that must be schedulable or scheduled at the same time for the scheduler to admit the entire group. It must be a positive integer.",
+}
+
+func (GangSchedulingPolicy) SwaggerDoc() map[string]string {
+ return map_GangSchedulingPolicy
+}
+
+var map_PodGroup = map[string]string{
+ "": "PodGroup represents a set of pods with a common scheduling policy.",
+ "name": "Name is a unique identifier for the PodGroup within the Workload. It must be a DNS label. This field is immutable.",
+ "policy": "Policy defines the scheduling policy for this PodGroup.",
+}
+
+func (PodGroup) SwaggerDoc() map[string]string {
+ return map_PodGroup
+}
+
+var map_PodGroupPolicy = map[string]string{
+ "": "PodGroupPolicy defines the scheduling configuration for a PodGroup.",
+ "basic": "Basic specifies that the pods in this group should be scheduled using standard Kubernetes scheduling behavior.",
+ "gang": "Gang specifies that the pods in this group should be scheduled using all-or-nothing semantics.",
+}
+
+func (PodGroupPolicy) SwaggerDoc() map[string]string {
+ return map_PodGroupPolicy
+}
+
var map_PriorityClass = map[string]string{
"": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
@@ -50,4 +87,45 @@ func (PriorityClassList) SwaggerDoc() map[string]string {
return map_PriorityClassList
}
+var map_TypedLocalObjectReference = map[string]string{
+ "": "TypedLocalObjectReference allows to reference typed object inside the same namespace.",
+ "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is empty, the specified Kind must be in the core API group. For any other third-party types, setting APIGroup is required. It must be a DNS subdomain.",
+ "kind": "Kind is the type of resource being referenced. It must be a path segment name.",
+ "name": "Name is the name of resource being referenced. It must be a path segment name.",
+}
+
+func (TypedLocalObjectReference) SwaggerDoc() map[string]string {
+ return map_TypedLocalObjectReference
+}
+
+var map_Workload = map[string]string{
+ "": "Workload allows for expressing scheduling constraints that should be used when managing lifecycle of workloads from scheduling perspective, including scheduling, preemption, eviction and other phases.",
+ "metadata": "Standard object's metadata. Name must be a DNS subdomain.",
+ "spec": "Spec defines the desired behavior of a Workload.",
+}
+
+func (Workload) SwaggerDoc() map[string]string {
+ return map_Workload
+}
+
+var map_WorkloadList = map[string]string{
+ "": "WorkloadList contains a list of Workload resources.",
+ "metadata": "Standard list metadata.",
+ "items": "Items is the list of Workloads.",
+}
+
+func (WorkloadList) SwaggerDoc() map[string]string {
+ return map_WorkloadList
+}
+
+var map_WorkloadSpec = map[string]string{
+ "": "WorkloadSpec defines the desired state of a Workload.",
+ "controllerRef": "ControllerRef is an optional reference to the controlling object, such as a Deployment or Job. This field is intended for use by tools like CLIs to provide a link back to the original workload definition. When set, it cannot be changed.",
+ "podGroups": "PodGroups is the list of pod groups that make up the Workload. The maximum number of pod groups is 8. This field is immutable.",
+}
+
+func (WorkloadSpec) SwaggerDoc() map[string]string {
+ return map_WorkloadSpec
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go
index b130c990..ccaf45d7 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go
@@ -26,6 +26,81 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BasicSchedulingPolicy) DeepCopyInto(out *BasicSchedulingPolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicSchedulingPolicy.
+func (in *BasicSchedulingPolicy) DeepCopy() *BasicSchedulingPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(BasicSchedulingPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GangSchedulingPolicy) DeepCopyInto(out *GangSchedulingPolicy) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GangSchedulingPolicy.
+func (in *GangSchedulingPolicy) DeepCopy() *GangSchedulingPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(GangSchedulingPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodGroup) DeepCopyInto(out *PodGroup) {
+ *out = *in
+ in.Policy.DeepCopyInto(&out.Policy)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroup.
+func (in *PodGroup) DeepCopy() *PodGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(PodGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodGroupPolicy) DeepCopyInto(out *PodGroupPolicy) {
+ *out = *in
+ if in.Basic != nil {
+ in, out := &in.Basic, &out.Basic
+ *out = new(BasicSchedulingPolicy)
+ **out = **in
+ }
+ if in.Gang != nil {
+ in, out := &in.Gang, &out.Gang
+ *out = new(GangSchedulingPolicy)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupPolicy.
+func (in *PodGroupPolicy) DeepCopy() *PodGroupPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(PodGroupPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PriorityClass) DeepCopyInto(out *PriorityClass) {
*out = *in
@@ -89,3 +164,107 @@ func (in *PriorityClassList) DeepCopyObject() runtime.Object {
}
return nil
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference.
+func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
+ if in == nil {
+ return nil
+ }
+ out := new(TypedLocalObjectReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Workload) DeepCopyInto(out *Workload) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload.
+func (in *Workload) DeepCopy() *Workload {
+ if in == nil {
+ return nil
+ }
+ out := new(Workload)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Workload) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkloadList) DeepCopyInto(out *WorkloadList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Workload, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadList.
+func (in *WorkloadList) DeepCopy() *WorkloadList {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkloadList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *WorkloadList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkloadSpec) DeepCopyInto(out *WorkloadSpec) {
+ *out = *in
+ if in.ControllerRef != nil {
+ in, out := &in.ControllerRef, &out.ControllerRef
+ *out = new(TypedLocalObjectReference)
+ **out = **in
+ }
+ if in.PodGroups != nil {
+ in, out := &in.PodGroups, &out.PodGroups
+ *out = make([]PodGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadSpec.
+func (in *WorkloadSpec) DeepCopy() *WorkloadSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkloadSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..25844f41
--- /dev/null
+++ b/operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,72 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in BasicSchedulingPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.BasicSchedulingPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GangSchedulingPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.GangSchedulingPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodGroup) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.PodGroup"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PodGroupPolicy) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.PodGroupPolicy"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityClass) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.PriorityClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityClassList) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.PriorityClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypedLocalObjectReference) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.TypedLocalObjectReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Workload) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.Workload"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WorkloadList) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.WorkloadList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WorkloadSpec) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1alpha1.WorkloadSpec"
+}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/operator/vendor/k8s.io/api/scheduling/v1beta1/doc.go
index 1bc36106..ef4bd57d 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.scheduling.v1beta1
// +groupName=scheduling.k8s.io
diff --git a/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
index 68e8e90d..cd25ded4 100644
--- a/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
@@ -24,126 +24,16 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
-
k8s_io_api_core_v1 "k8s.io/api/core/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *PriorityClass) Reset() { *m = PriorityClass{} }
-func (m *PriorityClass) Reset() { *m = PriorityClass{} }
-func (*PriorityClass) ProtoMessage() {}
-func (*PriorityClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_9edc3acf997efcf2, []int{0}
-}
-func (m *PriorityClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityClass.Merge(m, src)
-}
-func (m *PriorityClass) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityClass) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityClass proto.InternalMessageInfo
-
-func (m *PriorityClassList) Reset() { *m = PriorityClassList{} }
-func (*PriorityClassList) ProtoMessage() {}
-func (*PriorityClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_9edc3acf997efcf2, []int{1}
-}
-func (m *PriorityClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PriorityClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PriorityClassList.Merge(m, src)
-}
-func (m *PriorityClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *PriorityClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_PriorityClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass")
- proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_9edc3acf997efcf2)
-}
-
-var fileDescriptor_9edc3acf997efcf2 = []byte{
- // 481 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x8f, 0xd3, 0x30,
- 0x18, 0x86, 0xe3, 0x1e, 0x91, 0x8a, 0xab, 0x4a, 0x25, 0x08, 0x29, 0xaa, 0x74, 0x69, 0xd4, 0x5b,
- 0x32, 0x70, 0x36, 0x3d, 0x01, 0x42, 0xba, 0x2d, 0x77, 0x12, 0x42, 0x02, 0x51, 0x32, 0x30, 0x20,
- 0x06, 0x9c, 0xd4, 0x97, 0x9a, 0x26, 0x71, 0x64, 0x3b, 0x95, 0xba, 0xf1, 0x13, 0xf8, 0x51, 0x0c,
- 0x1d, 0x6f, 0xbc, 0xa9, 0xa2, 0xe1, 0x27, 0xb0, 0x31, 0xa1, 0xa4, 0xe1, 0xd2, 0x36, 0x50, 0x6e,
- 0xcb, 0xf7, 0x7d, 0xcf, 0xfb, 0xda, 0x7e, 0x63, 0xc3, 0xd3, 0xd9, 0x0b, 0x89, 0x18, 0xc7, 0x24,
- 0x65, 0x58, 0x06, 0x53, 0x3a, 0xc9, 0x22, 0x96, 0x84, 0x78, 0x3e, 0xf2, 0xa9, 0x22, 0x23, 0x1c,
- 0xd2, 0x84, 0x0a, 0xa2, 0xe8, 0x04, 0xa5, 0x82, 0x2b, 0x6e, 0x1c, 0x6f, 0x70, 0x44, 0x52, 0x86,
- 0x6a, 0x1c, 0x55, 0x78, 0xff, 0x34, 0x64, 0x6a, 0x9a, 0xf9, 0x28, 0xe0, 0x31, 0x0e, 0x79, 0xc8,
- 0x71, 0xa9, 0xf2, 0xb3, 0xab, 0xb2, 0x2a, 0x8b, 0xf2, 0x6b, 0xe3, 0xd6, 0x1f, 0x6e, 0x2d, 0x1e,
- 0x70, 0x41, 0xf1, 0xbc, 0xb1, 0x62, 0xff, 0x69, 0xcd, 0xc4, 0x24, 0x98, 0xb2, 0x84, 0x8a, 0x05,
- 0x4e, 0x67, 0x61, 0xd1, 0x90, 0x38, 0xa6, 0x8a, 0xfc, 0x4d, 0x85, 0xff, 0xa5, 0x12, 0x59, 0xa2,
- 0x58, 0x4c, 0x1b, 0x82, 0xe7, 0xff, 0x13, 0x14, 0xa7, 0x8d, 0xc9, 0xbe, 0x6e, 0xf8, 0xb3, 0x05,
- 0xbb, 0x63, 0xc1, 0xb8, 0x60, 0x6a, 0x71, 0x11, 0x11, 0x29, 0x8d, 0x4f, 0xb0, 0x5d, 0xec, 0x6a,
- 0x42, 0x14, 0x31, 0x81, 0x0d, 0x9c, 0xce, 0xd9, 0x13, 0x54, 0xa7, 0x76, 0x6b, 0x8e, 0xd2, 0x59,
- 0x58, 0x34, 0x24, 0x2a, 0x68, 0x34, 0x1f, 0xa1, 0xb7, 0xfe, 0x67, 0x1a, 0xa8, 0x37, 0x54, 0x11,
- 0xd7, 0x58, 0xae, 0x06, 0x5a, 0xbe, 0x1a, 0xc0, 0xba, 0xe7, 0xdd, 0xba, 0x1a, 0x27, 0x50, 0x9f,
- 0x93, 0x28, 0xa3, 0x66, 0xcb, 0x06, 0x8e, 0xee, 0x76, 0x2b, 0x58, 0x7f, 0x5f, 0x34, 0xbd, 0xcd,
- 0xcc, 0x38, 0x87, 0xdd, 0x30, 0xe2, 0x3e, 0x89, 0x2e, 0xe9, 0x15, 0xc9, 0x22, 0x65, 0x1e, 0xd9,
- 0xc0, 0x69, 0xbb, 0x8f, 0x2a, 0xb8, 0xfb, 0x72, 0x7b, 0xe8, 0xed, 0xb2, 0xc6, 0x33, 0xd8, 0x99,
- 0x50, 0x19, 0x08, 0x96, 0x2a, 0xc6, 0x13, 0xf3, 0x9e, 0x0d, 0x9c, 0xfb, 0xee, 0xc3, 0x4a, 0xda,
- 0xb9, 0xac, 0x47, 0xde, 0x36, 0x67, 0x84, 0xb0, 0x97, 0x0a, 0x4a, 0xe3, 0xb2, 0x1a, 0xf3, 0x88,
- 0x05, 0x0b, 0x53, 0x2f, 0xb5, 0xe7, 0xf9, 0x6a, 0xd0, 0x1b, 0xef, 0xcd, 0x7e, 0xad, 0x06, 0x27,
- 0xcd, 0x1b, 0x80, 0xf6, 0x31, 0xaf, 0x61, 0x3a, 0xfc, 0x06, 0xe0, 0x83, 0x9d, 0xd4, 0x5f, 0x33,
- 0xa9, 0x8c, 0x8f, 0x8d, 0xe4, 0xd1, 0xdd, 0x92, 0x2f, 0xd4, 0x65, 0xee, 0xbd, 0xea, 0x88, 0xed,
- 0x3f, 0x9d, 0xad, 0xd4, 0xdf, 0x41, 0x9d, 0x29, 0x1a, 0x4b, 0xb3, 0x65, 0x1f, 0x39, 0x9d, 0xb3,
- 0xc7, 0xe8, 0xe0, 0x53, 0x40, 0x3b, 0xdb, 0xab, 0xff, 0xd1, 0xab, 0xc2, 0xc2, 0xdb, 0x38, 0xb9,
- 0x17, 0xcb, 0xb5, 0xa5, 0x5d, 0xaf, 0x2d, 0xed, 0x66, 0x6d, 0x69, 0x5f, 0x72, 0x0b, 0x2c, 0x73,
- 0x0b, 0x5c, 0xe7, 0x16, 0xb8, 0xc9, 0x2d, 0xf0, 0x3d, 0xb7, 0xc0, 0xd7, 0x1f, 0x96, 0xf6, 0xe1,
- 0xf8, 0xe0, 0x13, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x04, 0x2e, 0xb0, 0xce, 0xc2, 0x03, 0x00,
- 0x00,
-}
+func (m *PriorityClassList) Reset() { *m = PriorityClassList{} }
func (m *PriorityClass) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..0c7339ae
--- /dev/null
+++ b/operator/vendor/k8s.io/api/scheduling/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,26 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*PriorityClass) ProtoMessage() {}
+
+func (*PriorityClassList) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..16f40100
--- /dev/null
+++ b/operator/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,32 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityClass) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1beta1.PriorityClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PriorityClassList) OpenAPIModelName() string {
+ return "io.k8s.api.scheduling.v1beta1.PriorityClassList"
+}
diff --git a/operator/vendor/k8s.io/api/storage/v1/doc.go b/operator/vendor/k8s.io/api/storage/v1/doc.go
index 162a9952..d344f997 100644
--- a/operator/vendor/k8s.io/api/storage/v1/doc.go
+++ b/operator/vendor/k8s.io/api/storage/v1/doc.go
@@ -19,5 +19,6 @@ limitations under the License.
// +groupName=storage.k8s.io
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.storage.v1
package v1
diff --git a/operator/vendor/k8s.io/api/storage/v1/generated.pb.go b/operator/vendor/k8s.io/api/storage/v1/generated.pb.go
index 2b7aea92..52003c31 100644
--- a/operator/vendor/k8s.io/api/storage/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/storage/v1/generated.pb.go
@@ -23,765 +23,59 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CSIDriver) Reset() { *m = CSIDriver{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *CSIDriverList) Reset() { *m = CSIDriverList{} }
-func (m *CSIDriver) Reset() { *m = CSIDriver{} }
-func (*CSIDriver) ProtoMessage() {}
-func (*CSIDriver) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{0}
-}
-func (m *CSIDriver) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIDriver) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIDriver.Merge(m, src)
-}
-func (m *CSIDriver) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIDriver) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIDriver.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSIDriver proto.InternalMessageInfo
+func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} }
-func (m *CSIDriverList) Reset() { *m = CSIDriverList{} }
-func (*CSIDriverList) ProtoMessage() {}
-func (*CSIDriverList) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{1}
-}
-func (m *CSIDriverList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIDriverList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIDriverList.Merge(m, src)
-}
-func (m *CSIDriverList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIDriverList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIDriverList.DiscardUnknown(m)
-}
+func (m *CSINode) Reset() { *m = CSINode{} }
-var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo
+func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} }
-func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} }
-func (*CSIDriverSpec) ProtoMessage() {}
-func (*CSIDriverSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{2}
-}
-func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIDriverSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIDriverSpec.Merge(m, src)
-}
-func (m *CSIDriverSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIDriverSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m)
-}
+func (m *CSINodeList) Reset() { *m = CSINodeList{} }
-var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo
+func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} }
-func (m *CSINode) Reset() { *m = CSINode{} }
-func (*CSINode) ProtoMessage() {}
-func (*CSINode) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{3}
-}
-func (m *CSINode) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINode) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINode.Merge(m, src)
-}
-func (m *CSINode) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINode) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINode.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSINode proto.InternalMessageInfo
-
-func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} }
-func (*CSINodeDriver) ProtoMessage() {}
-func (*CSINodeDriver) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{4}
-}
-func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINodeDriver) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINodeDriver.Merge(m, src)
-}
-func (m *CSINodeDriver) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINodeDriver) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINodeDriver.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo
-
-func (m *CSINodeList) Reset() { *m = CSINodeList{} }
-func (*CSINodeList) ProtoMessage() {}
-func (*CSINodeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{5}
-}
-func (m *CSINodeList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINodeList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINodeList.Merge(m, src)
-}
-func (m *CSINodeList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINodeList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINodeList.DiscardUnknown(m)
-}
+func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} }
-var xxx_messageInfo_CSINodeList proto.InternalMessageInfo
+func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} }
-func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} }
-func (*CSINodeSpec) ProtoMessage() {}
-func (*CSINodeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{6}
-}
-func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINodeSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINodeSpec.Merge(m, src)
-}
-func (m *CSINodeSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINodeSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINodeSpec.DiscardUnknown(m)
-}
+func (m *StorageClass) Reset() { *m = StorageClass{} }
-var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo
+func (m *StorageClassList) Reset() { *m = StorageClassList{} }
-func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} }
-func (*CSIStorageCapacity) ProtoMessage() {}
-func (*CSIStorageCapacity) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{7}
-}
-func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIStorageCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIStorageCapacity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIStorageCapacity.Merge(m, src)
-}
-func (m *CSIStorageCapacity) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIStorageCapacity) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIStorageCapacity.DiscardUnknown(m)
-}
+func (m *TokenRequest) Reset() { *m = TokenRequest{} }
-var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo
+func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} }
-func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} }
-func (*CSIStorageCapacityList) ProtoMessage() {}
-func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{8}
-}
-func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIStorageCapacityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIStorageCapacityList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIStorageCapacityList.Merge(m, src)
-}
-func (m *CSIStorageCapacityList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIStorageCapacityList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIStorageCapacityList.DiscardUnknown(m)
-}
+func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} }
-var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo
+func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} }
-func (m *StorageClass) Reset() { *m = StorageClass{} }
-func (*StorageClass) ProtoMessage() {}
-func (*StorageClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{9}
-}
-func (m *StorageClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageClass.Merge(m, src)
-}
-func (m *StorageClass) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageClass) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageClass.DiscardUnknown(m)
-}
+func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} }
-var xxx_messageInfo_StorageClass proto.InternalMessageInfo
+func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} }
-func (m *StorageClassList) Reset() { *m = StorageClassList{} }
-func (*StorageClassList) ProtoMessage() {}
-func (*StorageClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{10}
-}
-func (m *StorageClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageClassList.Merge(m, src)
-}
-func (m *StorageClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageClassList.DiscardUnknown(m)
-}
+func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
-var xxx_messageInfo_StorageClassList proto.InternalMessageInfo
+func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
-func (m *TokenRequest) Reset() { *m = TokenRequest{} }
-func (*TokenRequest) ProtoMessage() {}
-func (*TokenRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{11}
-}
-func (m *TokenRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenRequest.Merge(m, src)
-}
-func (m *TokenRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenRequest.DiscardUnknown(m)
-}
+func (m *VolumeError) Reset() { *m = VolumeError{} }
-var xxx_messageInfo_TokenRequest proto.InternalMessageInfo
-
-func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} }
-func (*VolumeAttachment) ProtoMessage() {}
-func (*VolumeAttachment) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{12}
-}
-func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachment.Merge(m, src)
-}
-func (m *VolumeAttachment) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachment) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo
-
-func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} }
-func (*VolumeAttachmentList) ProtoMessage() {}
-func (*VolumeAttachmentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{13}
-}
-func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentList.Merge(m, src)
-}
-func (m *VolumeAttachmentList) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentList) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo
-
-func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} }
-func (*VolumeAttachmentSource) ProtoMessage() {}
-func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{14}
-}
-func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentSource.Merge(m, src)
-}
-func (m *VolumeAttachmentSource) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentSource) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo
-
-func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} }
-func (*VolumeAttachmentSpec) ProtoMessage() {}
-func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{15}
-}
-func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src)
-}
-func (m *VolumeAttachmentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo
-
-func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} }
-func (*VolumeAttachmentStatus) ProtoMessage() {}
-func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{16}
-}
-func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src)
-}
-func (m *VolumeAttachmentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo
-
-func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
-func (*VolumeAttributesClass) ProtoMessage() {}
-func (*VolumeAttributesClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{17}
-}
-func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttributesClass.Merge(m, src)
-}
-func (m *VolumeAttributesClass) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttributesClass) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo
-
-func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
-func (*VolumeAttributesClassList) ProtoMessage() {}
-func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{18}
-}
-func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttributesClassList.Merge(m, src)
-}
-func (m *VolumeAttributesClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttributesClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo
-
-func (m *VolumeError) Reset() { *m = VolumeError{} }
-func (*VolumeError) ProtoMessage() {}
-func (*VolumeError) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{19}
-}
-func (m *VolumeError) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeError.Merge(m, src)
-}
-func (m *VolumeError) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeError) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeError proto.InternalMessageInfo
-
-func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} }
-func (*VolumeNodeResources) ProtoMessage() {}
-func (*VolumeNodeResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_662262cc70094b41, []int{20}
-}
-func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeNodeResources) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeNodeResources.Merge(m, src)
-}
-func (m *VolumeNodeResources) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeNodeResources) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1.CSIDriver")
- proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1.CSIDriverList")
- proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1.CSIDriverSpec")
- proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode")
- proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver")
- proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList")
- proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1.CSINodeSpec")
- proto.RegisterType((*CSIStorageCapacity)(nil), "k8s.io.api.storage.v1.CSIStorageCapacity")
- proto.RegisterType((*CSIStorageCapacityList)(nil), "k8s.io.api.storage.v1.CSIStorageCapacityList")
- proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry")
- proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList")
- proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1.TokenRequest")
- proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment")
- proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList")
- proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource")
- proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec")
- proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry")
- proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1.VolumeAttributesClass")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttributesClass.ParametersEntry")
- proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1.VolumeAttributesClassList")
- proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError")
- proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1.VolumeNodeResources")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/storage/v1/generated.proto", fileDescriptor_662262cc70094b41)
-}
-
-var fileDescriptor_662262cc70094b41 = []byte{
- // 1782 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4b, 0x73, 0x2b, 0x47,
- 0x15, 0xf6, 0x58, 0x96, 0x6d, 0xb5, 0xac, 0x6b, 0xbb, 0xaf, 0x1d, 0x26, 0x5e, 0x48, 0xae, 0x49,
- 0x08, 0xce, 0x6b, 0x94, 0xeb, 0x84, 0x54, 0x2a, 0x90, 0x85, 0x47, 0x56, 0x88, 0x0b, 0xcb, 0xd7,
- 0x69, 0x39, 0xa9, 0x14, 0x05, 0x54, 0xda, 0x33, 0x6d, 0xb9, 0x63, 0xcd, 0x23, 0xd3, 0x3d, 0xc2,
- 0x62, 0x05, 0x3f, 0x80, 0x2a, 0xd8, 0xf2, 0x2b, 0xa0, 0x80, 0x0d, 0x4b, 0x16, 0xd4, 0x85, 0x62,
- 0x91, 0x62, 0x95, 0x95, 0x8a, 0x2b, 0xd6, 0xb0, 0x64, 0xe1, 0x15, 0xd5, 0x3d, 0xa3, 0x79, 0x69,
- 0xe4, 0x47, 0xa5, 0x4a, 0x3b, 0xf7, 0x79, 0x7c, 0x7d, 0xba, 0xcf, 0x39, 0x5f, 0x1f, 0x8d, 0xc1,
- 0xb7, 0xaf, 0xde, 0x63, 0x3a, 0x75, 0x9b, 0xd8, 0xa3, 0x4d, 0xc6, 0x5d, 0x1f, 0xf7, 0x48, 0x73,
- 0xf0, 0xa4, 0xd9, 0x23, 0x0e, 0xf1, 0x31, 0x27, 0x96, 0xee, 0xf9, 0x2e, 0x77, 0xe1, 0x76, 0x68,
- 0xa6, 0x63, 0x8f, 0xea, 0x91, 0x99, 0x3e, 0x78, 0xb2, 0xf3, 0x66, 0x8f, 0xf2, 0xcb, 0xe0, 0x5c,
- 0x37, 0x5d, 0xbb, 0xd9, 0x73, 0x7b, 0x6e, 0x53, 0x5a, 0x9f, 0x07, 0x17, 0x72, 0x25, 0x17, 0xf2,
- 0xaf, 0x10, 0x65, 0x47, 0x4b, 0x6d, 0x66, 0xba, 0x7e, 0xd1, 0x4e, 0x3b, 0xef, 0x24, 0x36, 0x36,
- 0x36, 0x2f, 0xa9, 0x43, 0xfc, 0x61, 0xd3, 0xbb, 0xea, 0x49, 0x27, 0x9f, 0x30, 0x37, 0xf0, 0x4d,
- 0xf2, 0x20, 0x2f, 0xd6, 0xb4, 0x09, 0xc7, 0x45, 0x7b, 0x35, 0x67, 0x79, 0xf9, 0x81, 0xc3, 0xa9,
- 0x3d, 0xbd, 0xcd, 0xbb, 0x77, 0x39, 0x30, 0xf3, 0x92, 0xd8, 0x38, 0xef, 0xa7, 0xfd, 0x49, 0x01,
- 0x95, 0x56, 0xf7, 0xe8, 0xd0, 0xa7, 0x03, 0xe2, 0xc3, 0xcf, 0xc1, 0xaa, 0x88, 0xc8, 0xc2, 0x1c,
- 0xab, 0xca, 0xae, 0xb2, 0x57, 0xdd, 0x7f, 0x4b, 0x4f, 0xee, 0x37, 0x06, 0xd6, 0xbd, 0xab, 0x9e,
- 0x10, 0x30, 0x5d, 0x58, 0xeb, 0x83, 0x27, 0xfa, 0xd3, 0xf3, 0x2f, 0x88, 0xc9, 0x3b, 0x84, 0x63,
- 0x03, 0x3e, 0x1b, 0x35, 0x16, 0xc6, 0xa3, 0x06, 0x48, 0x64, 0x28, 0x46, 0x85, 0x1f, 0x82, 0x25,
- 0xe6, 0x11, 0x53, 0x5d, 0x94, 0xe8, 0x2f, 0xeb, 0x85, 0xd9, 0xd3, 0xe3, 0x88, 0xba, 0x1e, 0x31,
- 0x8d, 0xb5, 0x08, 0x71, 0x49, 0xac, 0x90, 0xf4, 0xd7, 0xfe, 0xa8, 0x80, 0x5a, 0x6c, 0x75, 0x4c,
- 0x19, 0x87, 0x3f, 0x9e, 0x8a, 0x5d, 0xbf, 0x5f, 0xec, 0xc2, 0x5b, 0x46, 0xbe, 0x11, 0xed, 0xb3,
- 0x3a, 0x91, 0xa4, 0xe2, 0x6e, 0x83, 0x32, 0xe5, 0xc4, 0x66, 0xea, 0xe2, 0x6e, 0x69, 0xaf, 0xba,
- 0xbf, 0x7b, 0x57, 0xe0, 0x46, 0x2d, 0x02, 0x2b, 0x1f, 0x09, 0x37, 0x14, 0x7a, 0x6b, 0x7f, 0x2f,
- 0xa7, 0xc2, 0x16, 0xc7, 0x81, 0xef, 0x83, 0x47, 0x98, 0x73, 0x6c, 0x5e, 0x22, 0xf2, 0x65, 0x40,
- 0x7d, 0x62, 0xc9, 0xe0, 0x57, 0x0d, 0x38, 0x1e, 0x35, 0x1e, 0x1d, 0x64, 0x34, 0x28, 0x67, 0x29,
- 0x7c, 0x3d, 0xd7, 0x3a, 0x72, 0x2e, 0xdc, 0xa7, 0x4e, 0xc7, 0x0d, 0x1c, 0x2e, 0xaf, 0x35, 0xf2,
- 0x3d, 0xcd, 0x68, 0x50, 0xce, 0x12, 0x9a, 0x60, 0x6b, 0xe0, 0xf6, 0x03, 0x9b, 0x1c, 0xd3, 0x0b,
- 0x62, 0x0e, 0xcd, 0x3e, 0xe9, 0xb8, 0x16, 0x61, 0x6a, 0x69, 0xb7, 0xb4, 0x57, 0x31, 0x9a, 0xe3,
- 0x51, 0x63, 0xeb, 0xd3, 0x02, 0xfd, 0xcd, 0xa8, 0xf1, 0xb8, 0x40, 0x8e, 0x0a, 0xc1, 0xe0, 0x07,
- 0x60, 0x3d, 0xba, 0x9c, 0x16, 0xf6, 0xb0, 0x49, 0xf9, 0x50, 0x5d, 0x92, 0x11, 0x3e, 0x1e, 0x8f,
- 0x1a, 0xeb, 0xdd, 0xac, 0x0a, 0xe5, 0x6d, 0xe1, 0x47, 0xa0, 0x76, 0xc1, 0x7e, 0xe0, 0xbb, 0x81,
- 0x77, 0xea, 0xf6, 0xa9, 0x39, 0x54, 0xcb, 0xbb, 0xca, 0x5e, 0xc5, 0xd0, 0xc6, 0xa3, 0x46, 0xed,
- 0xc3, 0x6e, 0x4a, 0x71, 0x93, 0x17, 0xa0, 0xac, 0x23, 0xfc, 0x1c, 0xd4, 0xb8, 0x7b, 0x45, 0x1c,
- 0x71, 0x75, 0x84, 0x71, 0xa6, 0x2e, 0xcb, 0x34, 0xbe, 0x34, 0x23, 0x8d, 0x67, 0x29, 0x5b, 0x63,
- 0x3b, 0xca, 0x64, 0x2d, 0x2d, 0x65, 0x28, 0x0b, 0x08, 0x5b, 0x60, 0xd3, 0x0f, 0xf3, 0xc2, 0x10,
- 0xf1, 0x82, 0xf3, 0x3e, 0x65, 0x97, 0xea, 0x8a, 0x3c, 0xec, 0xf6, 0x78, 0xd4, 0xd8, 0x44, 0x79,
- 0x25, 0x9a, 0xb6, 0x87, 0xef, 0x80, 0x35, 0x46, 0x8e, 0xa9, 0x13, 0x5c, 0x87, 0xe9, 0x5c, 0x95,
- 0xfe, 0x1b, 0xe3, 0x51, 0x63, 0xad, 0xdb, 0x4e, 0xe4, 0x28, 0x63, 0x05, 0x07, 0x40, 0x73, 0x5c,
- 0x8b, 0x1c, 0xf4, 0xfb, 0xae, 0x89, 0x39, 0x3e, 0xef, 0x93, 0x4f, 0x3c, 0x0b, 0x73, 0x72, 0x4a,
- 0x7c, 0xea, 0x5a, 0x5d, 0x62, 0xba, 0x8e, 0xc5, 0xd4, 0xca, 0xae, 0xb2, 0x57, 0x32, 0x5e, 0x19,
- 0x8f, 0x1a, 0xda, 0xc9, 0x9d, 0xd6, 0xe8, 0x1e, 0x88, 0xda, 0xef, 0x15, 0xb0, 0xd2, 0xea, 0x1e,
- 0x09, 0xb4, 0x39, 0x30, 0xc7, 0x61, 0x86, 0x39, 0xb4, 0xd9, 0x0d, 0x28, 0xe2, 0x99, 0xc9, 0x1b,
- 0xff, 0x0d, 0x79, 0x43, 0xd8, 0x44, 0x9c, 0xb7, 0x0b, 0x96, 0x1c, 0x6c, 0x13, 0x19, 0x75, 0x25,
- 0xf1, 0x39, 0xc1, 0x36, 0x41, 0x52, 0x03, 0x5f, 0x01, 0xcb, 0xe2, 0x36, 0x8e, 0x0e, 0xe5, 0xde,
- 0x15, 0xe3, 0x51, 0x64, 0xb3, 0x7c, 0x22, 0xa5, 0x28, 0xd2, 0x8a, 0xec, 0x71, 0xd7, 0x73, 0xfb,
- 0x6e, 0x6f, 0xf8, 0x43, 0x32, 0x9c, 0xb4, 0x92, 0xcc, 0xde, 0x59, 0x4a, 0x8e, 0x32, 0x56, 0xf0,
- 0x27, 0xa0, 0x8a, 0x93, 0x7b, 0x96, 0xfd, 0x51, 0xdd, 0x7f, 0x6d, 0xc6, 0xf1, 0xc2, 0xd6, 0x13,
- 0xfb, 0xa2, 0xe8, 0xc1, 0x61, 0xc6, 0xfa, 0x78, 0xd4, 0xa8, 0xa6, 0x52, 0x85, 0xd2, 0x78, 0xda,
- 0xef, 0x14, 0x50, 0x8d, 0x0e, 0x3c, 0x07, 0x9a, 0x6c, 0x65, 0x69, 0xb2, 0x7e, 0x7b, 0x96, 0x66,
- 0x90, 0xe4, 0x4f, 0xe3, 0x88, 0x25, 0x43, 0x3e, 0x05, 0x2b, 0x96, 0x4c, 0x15, 0x53, 0x15, 0x89,
- 0xfa, 0xf2, 0xed, 0xa8, 0x11, 0x01, 0xaf, 0x47, 0xd8, 0x2b, 0xe1, 0x9a, 0xa1, 0x09, 0x8a, 0xf6,
- 0xbf, 0x12, 0x80, 0xad, 0xee, 0x51, 0x8e, 0x7e, 0xe6, 0x50, 0xc2, 0x14, 0xac, 0x89, 0x52, 0x99,
- 0x14, 0x43, 0x54, 0xca, 0x6f, 0xdf, 0xf3, 0xfe, 0xf1, 0x39, 0xe9, 0x77, 0x49, 0x9f, 0x98, 0xdc,
- 0xf5, 0xc3, 0xaa, 0x3a, 0x49, 0x81, 0xa1, 0x0c, 0x34, 0x3c, 0x04, 0x1b, 0x13, 0x36, 0xed, 0x63,
- 0xc6, 0x44, 0x35, 0xab, 0x25, 0x59, 0xbd, 0x6a, 0x14, 0xe2, 0x46, 0x37, 0xa7, 0x47, 0x53, 0x1e,
- 0xf0, 0x33, 0xb0, 0x6a, 0xa6, 0x89, 0xfb, 0x8e, 0x62, 0xd1, 0x27, 0x53, 0x90, 0xfe, 0x71, 0x80,
- 0x1d, 0x4e, 0xf9, 0xd0, 0x58, 0x13, 0x85, 0x12, 0x33, 0x7c, 0x8c, 0x06, 0x19, 0xd8, 0xb4, 0xf1,
- 0x35, 0xb5, 0x03, 0x3b, 0x2c, 0xe9, 0x2e, 0xfd, 0x39, 0x91, 0xf4, 0xfe, 0xf0, 0x2d, 0x24, 0xbd,
- 0x76, 0xf2, 0x60, 0x68, 0x1a, 0x5f, 0xfb, 0xab, 0x02, 0x5e, 0x98, 0x4e, 0xfc, 0x1c, 0xda, 0xe2,
- 0x24, 0xdb, 0x16, 0xaf, 0xce, 0x2e, 0xe0, 0x5c, 0x6c, 0x33, 0x3a, 0xe4, 0x57, 0xcb, 0x60, 0x2d,
- 0x9d, 0xbe, 0x39, 0xd4, 0xee, 0x77, 0x41, 0xd5, 0xf3, 0xdd, 0x01, 0x65, 0xd4, 0x75, 0x88, 0x1f,
- 0x31, 0xe1, 0xe3, 0xc8, 0xa5, 0x7a, 0x9a, 0xa8, 0x50, 0xda, 0x0e, 0xf6, 0x00, 0xf0, 0xb0, 0x8f,
- 0x6d, 0xc2, 0x45, 0xff, 0x96, 0xe4, 0xf1, 0xdf, 0x9e, 0x71, 0xfc, 0xf4, 0x89, 0xf4, 0xd3, 0xd8,
- 0xab, 0xed, 0x70, 0x7f, 0x98, 0x44, 0x97, 0x28, 0x50, 0x0a, 0x1a, 0x5e, 0x81, 0x9a, 0x4f, 0xcc,
- 0x3e, 0xa6, 0x76, 0x34, 0x2b, 0x2c, 0xc9, 0x08, 0xdb, 0xe2, 0xe1, 0x46, 0x69, 0xc5, 0xcd, 0xa8,
- 0xf1, 0xd6, 0xf4, 0xb4, 0xaf, 0x9f, 0x12, 0x9f, 0x51, 0xc6, 0x89, 0xc3, 0xc3, 0x82, 0xc9, 0xf8,
- 0xa0, 0x2c, 0xb6, 0x60, 0x7a, 0x5b, 0x3c, 0xbd, 0x4f, 0x3d, 0x4e, 0x5d, 0x87, 0xa9, 0xe5, 0x84,
- 0xe9, 0x3b, 0x29, 0x39, 0xca, 0x58, 0xc1, 0x63, 0xb0, 0x25, 0x98, 0xf9, 0x67, 0xe1, 0x06, 0xed,
- 0x6b, 0x0f, 0x3b, 0xe2, 0x96, 0xd4, 0x65, 0xf9, 0xca, 0xab, 0x62, 0xe4, 0x3a, 0x28, 0xd0, 0xa3,
- 0x42, 0x2f, 0xf8, 0x19, 0xd8, 0x0c, 0x67, 0x2e, 0x83, 0x3a, 0x16, 0x75, 0x7a, 0x62, 0xe2, 0x92,
- 0x03, 0x47, 0xc5, 0x78, 0x4d, 0x74, 0xc4, 0xa7, 0x79, 0xe5, 0x4d, 0x91, 0x10, 0x4d, 0x83, 0xc0,
- 0x2f, 0xc1, 0xa6, 0xdc, 0x91, 0x58, 0x11, 0x9d, 0x50, 0xc2, 0xd4, 0x55, 0x99, 0xba, 0xbd, 0x74,
- 0xea, 0xc4, 0xd5, 0x85, 0xd3, 0x52, 0x48, 0x3a, 0x13, 0x72, 0x3a, 0x23, 0xbe, 0x6d, 0xbc, 0x18,
- 0xe5, 0x6b, 0xf3, 0x20, 0x0f, 0x85, 0xa6, 0xd1, 0x77, 0x3e, 0x00, 0xeb, 0xb9, 0x84, 0xc3, 0x0d,
- 0x50, 0xba, 0x22, 0xc3, 0xf0, 0x59, 0x46, 0xe2, 0x4f, 0xb8, 0x05, 0xca, 0x03, 0xdc, 0x0f, 0x48,
- 0x58, 0x7c, 0x28, 0x5c, 0xbc, 0xbf, 0xf8, 0x9e, 0xa2, 0xfd, 0x59, 0x01, 0x19, 0x3a, 0x9b, 0x43,
- 0x4b, 0x7f, 0x94, 0x6d, 0xe9, 0x97, 0xee, 0x51, 0xd3, 0x33, 0x9a, 0xf9, 0x97, 0x0a, 0x58, 0x4b,
- 0x8f, 0x96, 0xf0, 0x0d, 0xb0, 0x8a, 0x03, 0x8b, 0x12, 0xc7, 0x9c, 0x4c, 0x25, 0x71, 0x20, 0x07,
- 0x91, 0x1c, 0xc5, 0x16, 0x62, 0xf0, 0x24, 0xd7, 0x1e, 0xf5, 0xb1, 0x28, 0xb2, 0xc9, 0xb0, 0xb7,
- 0x28, 0x87, 0x3d, 0xc9, 0x8c, 0xed, 0xbc, 0x12, 0x4d, 0xdb, 0x6b, 0xbf, 0x5d, 0x04, 0x1b, 0x61,
- 0x6d, 0x84, 0x3f, 0x39, 0x6c, 0xe2, 0xf0, 0x39, 0x90, 0x4a, 0x27, 0x33, 0xd3, 0xbd, 0x7e, 0xeb,
- 0xd0, 0x93, 0x04, 0x36, 0x6b, 0xb8, 0x83, 0x9f, 0x80, 0x65, 0xc6, 0x31, 0x0f, 0x98, 0x7c, 0xea,
- 0xaa, 0xfb, 0x6f, 0xde, 0x17, 0x50, 0x3a, 0x25, 0x73, 0x5d, 0xb8, 0x46, 0x11, 0x98, 0xf6, 0x17,
- 0x05, 0x6c, 0xe5, 0x5d, 0xe6, 0x50, 0x61, 0xc7, 0xd9, 0x0a, 0xfb, 0xce, 0x3d, 0x0f, 0x33, 0xa3,
- 0xca, 0xfe, 0xa9, 0x80, 0x17, 0xa6, 0xce, 0x2d, 0x5f, 0x52, 0xc1, 0x4b, 0x5e, 0x8e, 0xfd, 0x4e,
- 0x92, 0x89, 0x58, 0xf2, 0xd2, 0x69, 0x81, 0x1e, 0x15, 0x7a, 0xc1, 0x2f, 0xc0, 0x06, 0x75, 0xfa,
- 0xd4, 0x21, 0xd1, 0xc3, 0x9b, 0xe4, 0xb7, 0x90, 0x3c, 0xf2, 0xc8, 0x32, 0xb9, 0x5b, 0x62, 0x3e,
- 0x39, 0xca, 0xa1, 0xa0, 0x29, 0x5c, 0xed, 0x6f, 0x05, 0x99, 0x91, 0x33, 0xa3, 0x68, 0x21, 0x29,
- 0x21, 0xfe, 0x54, 0x0b, 0x45, 0x72, 0x14, 0x5b, 0xc8, 0xba, 0x91, 0x57, 0x11, 0x05, 0x7a, 0xef,
- 0xba, 0x91, 0x4e, 0xa9, 0xba, 0x91, 0x6b, 0x14, 0x81, 0x89, 0x20, 0xc4, 0x4c, 0x96, 0x9a, 0xbd,
- 0xe2, 0x20, 0x4e, 0x22, 0x39, 0x8a, 0x2d, 0xb4, 0xff, 0x94, 0x0a, 0x12, 0x24, 0x0b, 0x30, 0x75,
- 0x9a, 0xc9, 0xd7, 0x81, 0xfc, 0x69, 0xac, 0xf8, 0x34, 0x16, 0xfc, 0x8d, 0x02, 0x20, 0x8e, 0x21,
- 0x3a, 0x93, 0x02, 0x0d, 0xab, 0xa8, 0xfd, 0xa0, 0x96, 0xd0, 0x0f, 0xa6, 0x70, 0xc2, 0xd7, 0x78,
- 0x27, 0xda, 0x1f, 0x4e, 0x1b, 0xa0, 0x82, 0xcd, 0xa1, 0x05, 0xaa, 0xa1, 0xb4, 0xed, 0xfb, 0xae,
- 0x1f, 0xb5, 0xa7, 0x76, 0x6b, 0x2c, 0xd2, 0xd2, 0xa8, 0xcb, 0x1f, 0x37, 0x89, 0xeb, 0xcd, 0xa8,
- 0x51, 0x4d, 0xe9, 0x51, 0x1a, 0x56, 0xec, 0x62, 0x91, 0x64, 0x97, 0xa5, 0x87, 0xed, 0x72, 0x48,
- 0x66, 0xef, 0x92, 0x82, 0xdd, 0x69, 0x83, 0x6f, 0xcd, 0xb8, 0x96, 0x07, 0xbd, 0x59, 0xa3, 0x45,
- 0xb0, 0x1d, 0xdf, 0xba, 0x4f, 0xcf, 0x03, 0x4e, 0xd8, 0xbc, 0x86, 0xb9, 0x7d, 0x00, 0xc2, 0x1f,
- 0x43, 0xb2, 0x36, 0xc3, 0x59, 0x2e, 0xf6, 0x38, 0x8c, 0x35, 0x28, 0x65, 0x05, 0xbd, 0x82, 0x49,
- 0xee, 0xfb, 0x77, 0x55, 0x53, 0xfa, 0x5c, 0x0f, 0x1d, 0xe9, 0xbe, 0xe9, 0x50, 0xf0, 0x0f, 0x05,
- 0xbc, 0x58, 0x18, 0xc8, 0x1c, 0xb8, 0xfb, 0xe3, 0x2c, 0x77, 0xbf, 0xf1, 0x90, 0x7b, 0x9a, 0x41,
- 0xe0, 0x7f, 0x50, 0x40, 0xba, 0x26, 0xe1, 0x31, 0x58, 0xe2, 0x34, 0x62, 0xe9, 0xec, 0x07, 0x83,
- 0x5b, 0x82, 0x3f, 0xa3, 0x36, 0x49, 0x9e, 0x4e, 0xb1, 0x42, 0x12, 0x05, 0xbe, 0x0a, 0x56, 0x6c,
- 0xc2, 0x18, 0xee, 0x4d, 0xca, 0x21, 0xfe, 0xf9, 0xdc, 0x09, 0xc5, 0x68, 0xa2, 0x87, 0xaf, 0x83,
- 0x0a, 0x11, 0x11, 0xb4, 0xc4, 0xc0, 0x29, 0x3a, 0xb9, 0x6c, 0xd4, 0xc6, 0xa3, 0x46, 0xa5, 0x3d,
- 0x11, 0xa2, 0x44, 0xaf, 0xbd, 0x0b, 0x1e, 0x17, 0x7c, 0xb3, 0x80, 0x0d, 0x50, 0x36, 0xe5, 0x17,
- 0x2e, 0x45, 0xfa, 0x57, 0xc4, 0x69, 0x5b, 0xf2, 0xd3, 0x56, 0x28, 0x37, 0xbe, 0xf7, 0xec, 0x79,
- 0x7d, 0xe1, 0xab, 0xe7, 0xf5, 0x85, 0xaf, 0x9f, 0xd7, 0x17, 0x7e, 0x31, 0xae, 0x2b, 0xcf, 0xc6,
- 0x75, 0xe5, 0xab, 0x71, 0x5d, 0xf9, 0x7a, 0x5c, 0x57, 0xfe, 0x35, 0xae, 0x2b, 0xbf, 0xfe, 0x77,
- 0x7d, 0xe1, 0x47, 0xdb, 0x85, 0xff, 0x23, 0xf8, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0xc6,
- 0x28, 0xb1, 0x3b, 0x18, 0x00, 0x00,
-}
+func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} }
func (m *CSIDriver) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -893,6 +187,16 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.ServiceAccountTokenInSecrets != nil {
+ i--
+ if *m.ServiceAccountTokenInSecrets {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ }
if m.NodeAllocatableUpdatePeriodSeconds != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeAllocatableUpdatePeriodSeconds))
i--
@@ -1355,7 +659,7 @@ func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Parameters {
keysForParameters = append(keysForParameters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Parameters[string(keysForParameters[iNdEx])]
baseI := i
@@ -1706,7 +1010,7 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error)
for k := range m.AttachmentMetadata {
keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata)
+ sort.Strings(keysForAttachmentMetadata)
for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- {
v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])]
baseI := i
@@ -1761,7 +1065,7 @@ func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Parameters {
keysForParameters = append(keysForParameters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Parameters[string(keysForParameters[iNdEx])]
baseI := i
@@ -1997,6 +1301,9 @@ func (m *CSIDriverSpec) Size() (n int) {
if m.NodeAllocatableUpdatePeriodSeconds != nil {
n += 1 + sovGenerated(uint64(*m.NodeAllocatableUpdatePeriodSeconds))
}
+ if m.ServiceAccountTokenInSecrets != nil {
+ n += 2
+ }
return n
}
@@ -2393,6 +1700,7 @@ func (this *CSIDriverSpec) String() string {
`RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`,
`SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`,
`NodeAllocatableUpdatePeriodSeconds:` + valueToStringGenerated(this.NodeAllocatableUpdatePeriodSeconds) + `,`,
+ `ServiceAccountTokenInSecrets:` + valueToStringGenerated(this.ServiceAccountTokenInSecrets) + `,`,
`}`,
}, "")
return s
@@ -2495,7 +1803,7 @@ func (this *StorageClass) String() string {
for k := range this.Parameters {
keysForParameters = append(keysForParameters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
mapStringForParameters := "map[string]string{"
for _, k := range keysForParameters {
mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
@@ -2600,7 +1908,7 @@ func (this *VolumeAttachmentStatus) String() string {
for k := range this.AttachmentMetadata {
keysForAttachmentMetadata = append(keysForAttachmentMetadata, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata)
+ sort.Strings(keysForAttachmentMetadata)
mapStringForAttachmentMetadata := "map[string]string{"
for _, k := range keysForAttachmentMetadata {
mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k])
@@ -2623,7 +1931,7 @@ func (this *VolumeAttributesClass) String() string {
for k := range this.Parameters {
keysForParameters = append(keysForParameters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
mapStringForParameters := "map[string]string{"
for _, k := range keysForParameters {
mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
@@ -3169,6 +2477,27 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error {
}
}
m.NodeAllocatableUpdatePeriodSeconds = &v
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountTokenInSecrets", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ServiceAccountTokenInSecrets = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/operator/vendor/k8s.io/api/storage/v1/generated.proto b/operator/vendor/k8s.io/api/storage/v1/generated.proto
index d6965bf7..d77bea9c 100644
--- a/operator/vendor/k8s.io/api/storage/v1/generated.proto
+++ b/operator/vendor/k8s.io/api/storage/v1/generated.proto
@@ -225,6 +225,30 @@ message CSIDriverSpec {
// +featureGate=MutableCSINodeAllocatableCount
// +optional
optional int64 nodeAllocatableUpdatePeriodSeconds = 9;
+
+ // serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that
+ // service account tokens should be passed via the Secrets field in NodePublishVolumeRequest
+ // instead of the VolumeContext field. The CSI specification provides a dedicated Secrets
+ // field for sensitive information like tokens, which is the appropriate mechanism for
+ // handling credentials. This addresses security concerns where sensitive tokens were being
+ // logged as part of volume context.
+ //
+ // When "true", kubelet will pass the tokens only in the Secrets field with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens". The CSI driver must be updated to read
+ // tokens from the Secrets field instead of VolumeContext.
+ //
+ // When "false" or not set, kubelet will pass the tokens in VolumeContext with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens" (existing behavior). This maintains backward
+ // compatibility with existing CSI drivers.
+ //
+ // This field can only be set when TokenRequests is configured. The API server will reject
+ // CSIDriver specs that set this field without TokenRequests.
+ //
+ // Default behavior if unset is to pass tokens in the VolumeContext field.
+ //
+ // +featureGate=CSIServiceAccountTokenSecrets
+ // +optional
+ optional bool serviceAccountTokenInSecrets = 10;
}
// CSINode holds information about all CSI drivers installed on a node.
@@ -409,6 +433,8 @@ message StorageClass {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// provisioner indicates the type of the provisioner.
+ // +required
+ // +k8s:required
optional string provisioner = 2;
// parameters holds the parameters for the provisioner that should
diff --git a/operator/vendor/k8s.io/api/storage/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/storage/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..37020604
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storage/v1/generated.protomessage.pb.go
@@ -0,0 +1,64 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*CSIDriver) ProtoMessage() {}
+
+func (*CSIDriverList) ProtoMessage() {}
+
+func (*CSIDriverSpec) ProtoMessage() {}
+
+func (*CSINode) ProtoMessage() {}
+
+func (*CSINodeDriver) ProtoMessage() {}
+
+func (*CSINodeList) ProtoMessage() {}
+
+func (*CSINodeSpec) ProtoMessage() {}
+
+func (*CSIStorageCapacity) ProtoMessage() {}
+
+func (*CSIStorageCapacityList) ProtoMessage() {}
+
+func (*StorageClass) ProtoMessage() {}
+
+func (*StorageClassList) ProtoMessage() {}
+
+func (*TokenRequest) ProtoMessage() {}
+
+func (*VolumeAttachment) ProtoMessage() {}
+
+func (*VolumeAttachmentList) ProtoMessage() {}
+
+func (*VolumeAttachmentSource) ProtoMessage() {}
+
+func (*VolumeAttachmentSpec) ProtoMessage() {}
+
+func (*VolumeAttachmentStatus) ProtoMessage() {}
+
+func (*VolumeAttributesClass) ProtoMessage() {}
+
+func (*VolumeAttributesClassList) ProtoMessage() {}
+
+func (*VolumeError) ProtoMessage() {}
+
+func (*VolumeNodeResources) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/storage/v1/types.go b/operator/vendor/k8s.io/api/storage/v1/types.go
index 6d004e5b..b198cb71 100644
--- a/operator/vendor/k8s.io/api/storage/v1/types.go
+++ b/operator/vendor/k8s.io/api/storage/v1/types.go
@@ -41,6 +41,8 @@ type StorageClass struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// provisioner indicates the type of the provisioner.
+ // +required
+ // +k8s:required
Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"`
// parameters holds the parameters for the provisioner that should
@@ -443,6 +445,30 @@ type CSIDriverSpec struct {
// +featureGate=MutableCSINodeAllocatableCount
// +optional
NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty" protobuf:"varint,9,opt,name=nodeAllocatableUpdatePeriodSeconds"`
+
+ // serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that
+ // service account tokens should be passed via the Secrets field in NodePublishVolumeRequest
+ // instead of the VolumeContext field. The CSI specification provides a dedicated Secrets
+ // field for sensitive information like tokens, which is the appropriate mechanism for
+ // handling credentials. This addresses security concerns where sensitive tokens were being
+ // logged as part of volume context.
+ //
+ // When "true", kubelet will pass the tokens only in the Secrets field with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens". The CSI driver must be updated to read
+ // tokens from the Secrets field instead of VolumeContext.
+ //
+ // When "false" or not set, kubelet will pass the tokens in VolumeContext with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens" (existing behavior). This maintains backward
+ // compatibility with existing CSI drivers.
+ //
+ // This field can only be set when TokenRequests is configured. The API server will reject
+ // CSIDriver specs that set this field without TokenRequests.
+ //
+ // Default behavior if unset is to pass tokens in the VolumeContext field.
+ //
+ // +featureGate=CSIServiceAccountTokenSecrets
+ // +optional
+ ServiceAccountTokenInSecrets *bool `json:"serviceAccountTokenInSecrets,omitempty" protobuf:"varint,10,opt,name=serviceAccountTokenInSecrets"`
}
// FSGroupPolicy specifies if a CSI Driver supports modifying
diff --git a/operator/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
index 2e5a8443..7f06c274 100644
--- a/operator/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
@@ -58,6 +58,7 @@ var map_CSIDriverSpec = map[string]string{
"requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.",
"seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".",
"nodeAllocatableUpdatePeriodSeconds": "nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of the CSINode allocatable capacity for this driver. When set, both periodic updates and updates triggered by capacity-related failures are enabled. If not set, no updates occur (neither periodic nor upon detecting capacity-related failures), and the allocatable.count remains static. The minimum allowed value for this field is 10 seconds.\n\nThis is a beta feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.\n\nThis field is mutable.",
+ "serviceAccountTokenInSecrets": "serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that service account tokens should be passed via the Secrets field in NodePublishVolumeRequest instead of the VolumeContext field. The CSI specification provides a dedicated Secrets field for sensitive information like tokens, which is the appropriate mechanism for handling credentials. This addresses security concerns where sensitive tokens were being logged as part of volume context.\n\nWhen \"true\", kubelet will pass the tokens only in the Secrets field with the key \"csi.storage.k8s.io/serviceAccount.tokens\". The CSI driver must be updated to read tokens from the Secrets field instead of VolumeContext.\n\nWhen \"false\" or not set, kubelet will pass the tokens in VolumeContext with the key \"csi.storage.k8s.io/serviceAccount.tokens\" (existing behavior). This maintains backward compatibility with existing CSI drivers.\n\nThis field can only be set when TokenRequests is configured. The API server will reject CSIDriver specs that set this field without TokenRequests.\n\nDefault behavior if unset is to pass tokens in the VolumeContext field.",
}
func (CSIDriverSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
index 3379fc45..b9e0a7d9 100644
--- a/operator/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
@@ -137,6 +137,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
*out = new(int64)
**out = **in
}
+ if in.ServiceAccountTokenInSecrets != nil {
+ in, out := &in.ServiceAccountTokenInSecrets, &out.ServiceAccountTokenInSecrets
+ *out = new(bool)
+ **out = **in
+ }
return
}
diff --git a/operator/vendor/k8s.io/api/storage/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/storage/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..6ef63710
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storage/v1/zz_generated.model_name.go
@@ -0,0 +1,127 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIDriver) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSIDriver"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIDriverList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSIDriverList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIDriverSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSIDriverSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINode) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSINode"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINodeDriver) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSINodeDriver"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINodeList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSINodeList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINodeSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSINodeSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIStorageCapacity) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSIStorageCapacity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIStorageCapacityList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.CSIStorageCapacityList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageClass) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.StorageClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageClassList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.StorageClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenRequest) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.TokenRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachment) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttachment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttachmentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentSource) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttachmentSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttachmentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttachmentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttributesClass) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttributesClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttributesClassList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeAttributesClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeError) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeError"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeNodeResources) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1.VolumeNodeResources"
+}
diff --git a/operator/vendor/k8s.io/api/storage/v1alpha1/doc.go b/operator/vendor/k8s.io/api/storage/v1alpha1/doc.go
index 90af522a..18f9c9c3 100644
--- a/operator/vendor/k8s.io/api/storage/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/storage/v1alpha1/doc.go
@@ -19,5 +19,6 @@ limitations under the License.
// +groupName=storage.k8s.io
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.storage.v1alpha1
package v1alpha1
diff --git a/operator/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
index c0a2f36a..ffcb6b5a 100644
--- a/operator/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
@@ -23,397 +23,36 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} }
-func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} }
-func (*CSIStorageCapacity) ProtoMessage() {}
-func (*CSIStorageCapacity) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{0}
-}
-func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIStorageCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIStorageCapacity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIStorageCapacity.Merge(m, src)
-}
-func (m *CSIStorageCapacity) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIStorageCapacity) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIStorageCapacity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo
-
-func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} }
-func (*CSIStorageCapacityList) ProtoMessage() {}
-func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{1}
-}
-func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIStorageCapacityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIStorageCapacityList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIStorageCapacityList.Merge(m, src)
-}
-func (m *CSIStorageCapacityList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIStorageCapacityList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIStorageCapacityList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo
-
-func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} }
-func (*VolumeAttachment) ProtoMessage() {}
-func (*VolumeAttachment) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{2}
-}
-func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachment.Merge(m, src)
-}
-func (m *VolumeAttachment) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachment) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo
-
-func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} }
-func (*VolumeAttachmentList) ProtoMessage() {}
-func (*VolumeAttachmentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{3}
-}
-func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentList.Merge(m, src)
-}
-func (m *VolumeAttachmentList) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentList) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo
-
-func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} }
-func (*VolumeAttachmentSource) ProtoMessage() {}
-func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{4}
-}
-func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentSource.Merge(m, src)
-}
-func (m *VolumeAttachmentSource) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentSource) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m)
-}
+func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} }
-var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo
+func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} }
-func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} }
-func (*VolumeAttachmentSpec) ProtoMessage() {}
-func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{5}
-}
-func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src)
-}
-func (m *VolumeAttachmentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m)
-}
+func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} }
-var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo
+func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} }
-func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} }
-func (*VolumeAttachmentStatus) ProtoMessage() {}
-func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{6}
-}
-func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src)
-}
-func (m *VolumeAttachmentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m)
-}
+func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} }
-var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo
+func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
-func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
-func (*VolumeAttributesClass) ProtoMessage() {}
-func (*VolumeAttributesClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{7}
-}
-func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttributesClass.Merge(m, src)
-}
-func (m *VolumeAttributesClass) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttributesClass) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo
-
-func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
-func (*VolumeAttributesClassList) ProtoMessage() {}
-func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{8}
-}
-func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttributesClassList.Merge(m, src)
-}
-func (m *VolumeAttributesClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttributesClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo
+func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
-func (m *VolumeError) Reset() { *m = VolumeError{} }
-func (*VolumeError) ProtoMessage() {}
-func (*VolumeError) Descriptor() ([]byte, []int) {
- return fileDescriptor_02e7952e43280c27, []int{9}
-}
-func (m *VolumeError) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeError.Merge(m, src)
-}
-func (m *VolumeError) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeError) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeError proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CSIStorageCapacity)(nil), "k8s.io.api.storage.v1alpha1.CSIStorageCapacity")
- proto.RegisterType((*CSIStorageCapacityList)(nil), "k8s.io.api.storage.v1alpha1.CSIStorageCapacityList")
- proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment")
- proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList")
- proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource")
- proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec")
- proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry")
- proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClass.ParametersEntry")
- proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttributesClassList")
- proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_02e7952e43280c27)
-}
-
-var fileDescriptor_02e7952e43280c27 = []byte{
- // 1031 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0x45,
- 0x18, 0xce, 0xfa, 0xa3, 0x75, 0xc6, 0x29, 0x75, 0x47, 0x6e, 0x31, 0xae, 0xb4, 0xae, 0x7c, 0x32,
- 0x94, 0xee, 0x92, 0x80, 0x50, 0x85, 0xc4, 0xc1, 0x9b, 0xe4, 0x10, 0x91, 0x84, 0x32, 0x8e, 0x00,
- 0x01, 0x07, 0xc6, 0xeb, 0xc1, 0x9e, 0xc4, 0xfb, 0xa1, 0x99, 0x59, 0x0b, 0x73, 0xe2, 0x27, 0x70,
- 0xe3, 0x1f, 0xf0, 0x07, 0xf8, 0x13, 0x39, 0x20, 0x51, 0xf5, 0xd4, 0x93, 0x45, 0x16, 0x7e, 0x03,
- 0x07, 0x2e, 0xa0, 0x9d, 0x1d, 0xef, 0x6e, 0xbc, 0x76, 0x70, 0x72, 0xc8, 0xcd, 0xf3, 0x7e, 0x3c,
- 0xef, 0xd7, 0xf3, 0xbe, 0x9b, 0x80, 0xa7, 0x67, 0xcf, 0xb9, 0x41, 0x3d, 0x13, 0xfb, 0xd4, 0xe4,
- 0xc2, 0x63, 0x78, 0x48, 0xcc, 0xc9, 0x36, 0x1e, 0xfb, 0x23, 0xbc, 0x6d, 0x0e, 0x89, 0x4b, 0x18,
- 0x16, 0x64, 0x60, 0xf8, 0xcc, 0x13, 0x1e, 0x7c, 0x1c, 0x1b, 0x1b, 0xd8, 0xa7, 0x86, 0x32, 0x36,
- 0xe6, 0xc6, 0xcd, 0x67, 0x43, 0x2a, 0x46, 0x41, 0xdf, 0xb0, 0x3d, 0xc7, 0x1c, 0x7a, 0x43, 0xcf,
- 0x94, 0x3e, 0xfd, 0xe0, 0x3b, 0xf9, 0x92, 0x0f, 0xf9, 0x2b, 0xc6, 0x6a, 0xb6, 0x33, 0x81, 0x6d,
- 0x8f, 0x45, 0x51, 0x17, 0xe3, 0x35, 0x3f, 0x48, 0x6d, 0x1c, 0x6c, 0x8f, 0xa8, 0x4b, 0xd8, 0xd4,
- 0xf4, 0xcf, 0x86, 0xd2, 0x89, 0x11, 0xee, 0x05, 0xcc, 0x26, 0xd7, 0xf2, 0xe2, 0xa6, 0x43, 0x04,
- 0x5e, 0x16, 0xcb, 0x5c, 0xe5, 0xc5, 0x02, 0x57, 0x50, 0x27, 0x1f, 0xe6, 0xc3, 0xff, 0x73, 0xe0,
- 0xf6, 0x88, 0x38, 0x78, 0xd1, 0xaf, 0xfd, 0x77, 0x11, 0xc0, 0xdd, 0xde, 0x41, 0x2f, 0xee, 0xdf,
- 0x2e, 0xf6, 0xb1, 0x4d, 0xc5, 0x14, 0x7e, 0x0b, 0x2a, 0x51, 0x6a, 0x03, 0x2c, 0x70, 0x43, 0x7b,
- 0xa2, 0x75, 0xaa, 0x3b, 0xef, 0x19, 0x69, 0xbb, 0x93, 0x08, 0x86, 0x7f, 0x36, 0x8c, 0x04, 0xdc,
- 0x88, 0xac, 0x8d, 0xc9, 0xb6, 0xf1, 0x69, 0xff, 0x94, 0xd8, 0xe2, 0x88, 0x08, 0x6c, 0xc1, 0xf3,
- 0x59, 0x6b, 0x23, 0x9c, 0xb5, 0x40, 0x2a, 0x43, 0x09, 0x2a, 0xa4, 0x60, 0xcb, 0xf5, 0x06, 0xe4,
- 0xc4, 0xf3, 0xbd, 0xb1, 0x37, 0x9c, 0x36, 0x0a, 0x32, 0xca, 0xfb, 0xeb, 0x45, 0x39, 0xc4, 0x7d,
- 0x32, 0xee, 0x91, 0x31, 0xb1, 0x85, 0xc7, 0xac, 0x5a, 0x38, 0x6b, 0x6d, 0x1d, 0x67, 0xc0, 0xd0,
- 0x25, 0x68, 0xb8, 0x07, 0x6a, 0x8a, 0x1f, 0xbb, 0x63, 0xcc, 0xf9, 0x31, 0x76, 0x48, 0xa3, 0xf8,
- 0x44, 0xeb, 0x6c, 0x5a, 0x0d, 0x95, 0x62, 0xad, 0xb7, 0xa0, 0x47, 0x39, 0x0f, 0xf8, 0x25, 0xa8,
- 0xd8, 0xaa, 0x3d, 0x8d, 0x92, 0x4c, 0xd6, 0xb8, 0x2a, 0x59, 0x63, 0xce, 0x08, 0xe3, 0xb3, 0x00,
- 0xbb, 0x82, 0x8a, 0xa9, 0xb5, 0x15, 0xce, 0x5a, 0x95, 0x79, 0x8b, 0x51, 0x82, 0x06, 0x39, 0x78,
- 0xe0, 0xe0, 0xef, 0xa9, 0x13, 0x38, 0x9f, 0x7b, 0xe3, 0xc0, 0x21, 0x3d, 0xfa, 0x03, 0x69, 0x94,
- 0x6f, 0x14, 0xe2, 0x61, 0x38, 0x6b, 0x3d, 0x38, 0x5a, 0x04, 0x43, 0x79, 0xfc, 0xf6, 0x6f, 0x1a,
- 0x78, 0x94, 0x1f, 0xfc, 0x21, 0xe5, 0x02, 0x7e, 0x93, 0x1b, 0xbe, 0xb1, 0xe6, 0x58, 0x28, 0x8f,
- 0x47, 0x5f, 0x53, 0x7d, 0xad, 0xcc, 0x25, 0x99, 0xc1, 0x9f, 0x80, 0x32, 0x15, 0xc4, 0xe1, 0x8d,
- 0xc2, 0x93, 0x62, 0xa7, 0xba, 0x63, 0x1a, 0x57, 0xac, 0xb1, 0x91, 0xcf, 0xd0, 0xba, 0xa7, 0xb0,
- 0xcb, 0x07, 0x11, 0x0a, 0x8a, 0xc1, 0xda, 0xbf, 0x14, 0x40, 0x2d, 0xae, 0xae, 0x2b, 0x04, 0xb6,
- 0x47, 0x0e, 0x71, 0xc5, 0x2d, 0xb0, 0xb8, 0x07, 0x4a, 0xdc, 0x27, 0xb6, 0x62, 0xef, 0xf6, 0x95,
- 0xb5, 0x2c, 0xa6, 0xd7, 0xf3, 0x89, 0x6d, 0x6d, 0x29, 0xf8, 0x52, 0xf4, 0x42, 0x12, 0x0c, 0x7e,
- 0x0d, 0xee, 0x70, 0x81, 0x45, 0xc0, 0x25, 0x4b, 0x2f, 0x2f, 0xc5, 0x1a, 0xb0, 0xd2, 0xd5, 0x7a,
- 0x43, 0x01, 0xdf, 0x89, 0xdf, 0x48, 0x41, 0xb6, 0xcf, 0x35, 0x50, 0x5f, 0x74, 0xb9, 0x85, 0xa9,
- 0xa3, 0xcb, 0x53, 0x7f, 0x76, 0xad, 0x92, 0x56, 0xcc, 0xfc, 0x95, 0x06, 0x1e, 0xe5, 0xaa, 0x97,
- 0x0b, 0x01, 0x0f, 0x41, 0xdd, 0x27, 0x8c, 0x53, 0x2e, 0x88, 0x2b, 0x62, 0x1b, 0xb9, 0xf6, 0x5a,
- 0xbc, 0xf6, 0xe1, 0xac, 0x55, 0x7f, 0xb1, 0x44, 0x8f, 0x96, 0x7a, 0xc1, 0x53, 0x50, 0xa3, 0xee,
- 0x98, 0xba, 0x44, 0xed, 0x4f, 0x3a, 0xf1, 0x4e, 0xb6, 0x8e, 0xe8, 0xc3, 0x11, 0x35, 0x64, 0x11,
- 0x59, 0x0e, 0xba, 0x1e, 0x9d, 0x99, 0x83, 0x05, 0x14, 0x94, 0xc3, 0x6d, 0xff, 0xbe, 0x64, 0x3e,
- 0x91, 0x02, 0xbe, 0x0b, 0x2a, 0x58, 0x4a, 0x08, 0x53, 0x65, 0x24, 0xfd, 0xee, 0x2a, 0x39, 0x4a,
- 0x2c, 0x24, 0x87, 0x64, 0x2b, 0x96, 0x1c, 0xd6, 0x35, 0x38, 0x24, 0x5d, 0x33, 0x1c, 0x92, 0x6f,
- 0xa4, 0x20, 0xa3, 0x54, 0xa2, 0x03, 0x9b, 0x39, 0xa4, 0x49, 0x2a, 0xc7, 0x4a, 0x8e, 0x12, 0x8b,
- 0xf6, 0xbf, 0xc5, 0x25, 0x63, 0x92, 0x64, 0xcc, 0xd4, 0x34, 0x90, 0x35, 0x55, 0x72, 0x35, 0x0d,
- 0x92, 0x9a, 0x06, 0xf0, 0x67, 0x0d, 0x40, 0x9c, 0x40, 0x1c, 0xcd, 0xc9, 0x1a, 0x33, 0xea, 0x93,
- 0x1b, 0x2c, 0x89, 0xd1, 0xcd, 0xa1, 0xed, 0xbb, 0x82, 0x4d, 0xad, 0xa6, 0xca, 0x02, 0xe6, 0x0d,
- 0xd0, 0x92, 0x14, 0xe0, 0x29, 0xa8, 0xc6, 0xd2, 0x7d, 0xc6, 0x3c, 0xa6, 0xd6, 0xb6, 0xb3, 0x46,
- 0x46, 0xd2, 0xde, 0xd2, 0xc3, 0x59, 0xab, 0xda, 0x4d, 0x01, 0xfe, 0x99, 0xb5, 0xaa, 0x19, 0x3d,
- 0xca, 0x82, 0x47, 0xb1, 0x06, 0x24, 0x8d, 0x55, 0xba, 0x49, 0xac, 0x3d, 0xb2, 0x3a, 0x56, 0x06,
- 0xbc, 0xb9, 0x0f, 0xde, 0x5c, 0xd1, 0x22, 0x58, 0x03, 0xc5, 0x33, 0x32, 0x8d, 0x99, 0x88, 0xa2,
- 0x9f, 0xb0, 0x0e, 0xca, 0x13, 0x3c, 0x0e, 0x62, 0xc6, 0x6d, 0xa2, 0xf8, 0xf1, 0x51, 0xe1, 0xb9,
- 0xd6, 0xfe, 0xab, 0x00, 0x1e, 0x26, 0x13, 0x60, 0xb4, 0x1f, 0x08, 0xc2, 0xe5, 0x87, 0xf5, 0x16,
- 0x2e, 0xf4, 0x0e, 0x00, 0x03, 0x46, 0x27, 0x84, 0x49, 0xb6, 0xca, 0xd4, 0x52, 0x8f, 0xbd, 0x44,
- 0x83, 0x32, 0x56, 0x70, 0x02, 0x80, 0x8f, 0x19, 0x76, 0x88, 0x20, 0x2c, 0x3a, 0xc2, 0x11, 0xbf,
- 0xac, 0xf5, 0xf8, 0x95, 0xad, 0xce, 0x78, 0x91, 0x80, 0xc4, 0xb4, 0x4a, 0xe2, 0xa6, 0x0a, 0x94,
- 0x89, 0xd4, 0xfc, 0x18, 0xdc, 0x5f, 0x70, 0xb9, 0x56, 0x9b, 0x5f, 0x69, 0xe0, 0xad, 0xa5, 0x89,
- 0xdc, 0xc2, 0x7d, 0xff, 0xe2, 0xf2, 0x7d, 0xdf, 0xb9, 0x7e, 0xb7, 0x56, 0x1c, 0xf9, 0x5f, 0x35,
- 0x90, 0xe5, 0x27, 0x3c, 0x04, 0xa5, 0xe8, 0xef, 0x59, 0x55, 0xc2, 0x3b, 0xeb, 0x95, 0x70, 0x42,
- 0x1d, 0x92, 0x7e, 0x6a, 0xa3, 0x17, 0x92, 0x28, 0xf0, 0x6d, 0x70, 0xd7, 0x21, 0x9c, 0xe3, 0xe1,
- 0x9c, 0x1a, 0xf7, 0x95, 0xd1, 0xdd, 0xa3, 0x58, 0x8c, 0xe6, 0x7a, 0xf8, 0x14, 0x6c, 0x92, 0x28,
- 0x83, 0x5d, 0x6f, 0x10, 0x5f, 0xbd, 0xb2, 0x75, 0x2f, 0x9c, 0xb5, 0x36, 0xf7, 0xe7, 0x42, 0x94,
- 0xea, 0xad, 0xee, 0xf9, 0x85, 0xbe, 0xf1, 0xf2, 0x42, 0xdf, 0x78, 0x7d, 0xa1, 0x6f, 0xfc, 0x18,
- 0xea, 0xda, 0x79, 0xa8, 0x6b, 0x2f, 0x43, 0x5d, 0x7b, 0x1d, 0xea, 0xda, 0x1f, 0xa1, 0xae, 0xfd,
- 0xf4, 0xa7, 0xbe, 0xf1, 0xd5, 0xe3, 0x2b, 0xfe, 0xdd, 0xf9, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x23,
- 0x8e, 0x6a, 0x20, 0x0c, 0x0d, 0x00, 0x00,
-}
+func (m *VolumeError) Reset() { *m = VolumeError{} }
func (m *CSIStorageCapacity) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -770,7 +409,7 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error)
for k := range m.AttachmentMetadata {
keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata)
+ sort.Strings(keysForAttachmentMetadata)
for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- {
v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])]
baseI := i
@@ -825,7 +464,7 @@ func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Parameters {
keysForParameters = append(keysForParameters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Parameters[string(keysForParameters[iNdEx])]
baseI := i
@@ -1244,7 +883,7 @@ func (this *VolumeAttachmentStatus) String() string {
for k := range this.AttachmentMetadata {
keysForAttachmentMetadata = append(keysForAttachmentMetadata, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata)
+ sort.Strings(keysForAttachmentMetadata)
mapStringForAttachmentMetadata := "map[string]string{"
for _, k := range keysForAttachmentMetadata {
mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k])
@@ -1267,7 +906,7 @@ func (this *VolumeAttributesClass) String() string {
for k := range this.Parameters {
keysForParameters = append(keysForParameters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
mapStringForParameters := "map[string]string{"
for _, k := range keysForParameters {
mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
diff --git a/operator/vendor/k8s.io/api/storage/v1alpha1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/storage/v1alpha1/generated.protomessage.pb.go
new file mode 100644
index 00000000..d58dcac3
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storage/v1alpha1/generated.protomessage.pb.go
@@ -0,0 +1,42 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1alpha1
+
+func (*CSIStorageCapacity) ProtoMessage() {}
+
+func (*CSIStorageCapacityList) ProtoMessage() {}
+
+func (*VolumeAttachment) ProtoMessage() {}
+
+func (*VolumeAttachmentList) ProtoMessage() {}
+
+func (*VolumeAttachmentSource) ProtoMessage() {}
+
+func (*VolumeAttachmentSpec) ProtoMessage() {}
+
+func (*VolumeAttachmentStatus) ProtoMessage() {}
+
+func (*VolumeAttributesClass) ProtoMessage() {}
+
+func (*VolumeAttributesClassList) ProtoMessage() {}
+
+func (*VolumeError) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/storage/v1alpha1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/storage/v1alpha1/zz_generated.model_name.go
new file mode 100644
index 00000000..d35ce682
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storage/v1alpha1/zz_generated.model_name.go
@@ -0,0 +1,72 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIStorageCapacity) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.CSIStorageCapacity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIStorageCapacityList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.CSIStorageCapacityList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachment) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttachment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttachmentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentSource) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttachmentSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttachmentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttachmentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttributesClass) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttributesClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttributesClassList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeError) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1alpha1.VolumeError"
+}
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/doc.go b/operator/vendor/k8s.io/api/storage/v1beta1/doc.go
index 174482b6..f0eac351 100644
--- a/operator/vendor/k8s.io/api/storage/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/doc.go
@@ -19,5 +19,6 @@ limitations under the License.
// +groupName=storage.k8s.io
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.storage.v1beta1
package v1beta1
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
index 6d75868d..ed03dd50 100644
--- a/operator/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
@@ -23,765 +23,59 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
k8s_io_api_core_v1 "k8s.io/api/core/v1"
v11 "k8s.io/api/core/v1"
resource "k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
+func (m *CSIDriver) Reset() { *m = CSIDriver{} }
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *CSIDriverList) Reset() { *m = CSIDriverList{} }
-func (m *CSIDriver) Reset() { *m = CSIDriver{} }
-func (*CSIDriver) ProtoMessage() {}
-func (*CSIDriver) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{0}
-}
-func (m *CSIDriver) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIDriver) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIDriver.Merge(m, src)
-}
-func (m *CSIDriver) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIDriver) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIDriver.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSIDriver proto.InternalMessageInfo
+func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} }
-func (m *CSIDriverList) Reset() { *m = CSIDriverList{} }
-func (*CSIDriverList) ProtoMessage() {}
-func (*CSIDriverList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{1}
-}
-func (m *CSIDriverList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIDriverList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIDriverList.Merge(m, src)
-}
-func (m *CSIDriverList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIDriverList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIDriverList.DiscardUnknown(m)
-}
+func (m *CSINode) Reset() { *m = CSINode{} }
-var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo
+func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} }
-func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} }
-func (*CSIDriverSpec) ProtoMessage() {}
-func (*CSIDriverSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{2}
-}
-func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIDriverSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIDriverSpec.Merge(m, src)
-}
-func (m *CSIDriverSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIDriverSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m)
-}
+func (m *CSINodeList) Reset() { *m = CSINodeList{} }
-var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo
+func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} }
-func (m *CSINode) Reset() { *m = CSINode{} }
-func (*CSINode) ProtoMessage() {}
-func (*CSINode) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{3}
-}
-func (m *CSINode) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINode) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINode.Merge(m, src)
-}
-func (m *CSINode) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINode) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINode.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSINode proto.InternalMessageInfo
-
-func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} }
-func (*CSINodeDriver) ProtoMessage() {}
-func (*CSINodeDriver) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{4}
-}
-func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINodeDriver) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINodeDriver.Merge(m, src)
-}
-func (m *CSINodeDriver) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINodeDriver) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINodeDriver.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo
-
-func (m *CSINodeList) Reset() { *m = CSINodeList{} }
-func (*CSINodeList) ProtoMessage() {}
-func (*CSINodeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{5}
-}
-func (m *CSINodeList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINodeList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINodeList.Merge(m, src)
-}
-func (m *CSINodeList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINodeList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINodeList.DiscardUnknown(m)
-}
+func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} }
-var xxx_messageInfo_CSINodeList proto.InternalMessageInfo
+func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} }
-func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} }
-func (*CSINodeSpec) ProtoMessage() {}
-func (*CSINodeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{6}
-}
-func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSINodeSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSINodeSpec.Merge(m, src)
-}
-func (m *CSINodeSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *CSINodeSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_CSINodeSpec.DiscardUnknown(m)
-}
+func (m *StorageClass) Reset() { *m = StorageClass{} }
-var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo
+func (m *StorageClassList) Reset() { *m = StorageClassList{} }
-func (m *CSIStorageCapacity) Reset() { *m = CSIStorageCapacity{} }
-func (*CSIStorageCapacity) ProtoMessage() {}
-func (*CSIStorageCapacity) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{7}
-}
-func (m *CSIStorageCapacity) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIStorageCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIStorageCapacity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIStorageCapacity.Merge(m, src)
-}
-func (m *CSIStorageCapacity) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIStorageCapacity) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIStorageCapacity.DiscardUnknown(m)
-}
+func (m *TokenRequest) Reset() { *m = TokenRequest{} }
-var xxx_messageInfo_CSIStorageCapacity proto.InternalMessageInfo
+func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} }
-func (m *CSIStorageCapacityList) Reset() { *m = CSIStorageCapacityList{} }
-func (*CSIStorageCapacityList) ProtoMessage() {}
-func (*CSIStorageCapacityList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{8}
-}
-func (m *CSIStorageCapacityList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CSIStorageCapacityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CSIStorageCapacityList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CSIStorageCapacityList.Merge(m, src)
-}
-func (m *CSIStorageCapacityList) XXX_Size() int {
- return m.Size()
-}
-func (m *CSIStorageCapacityList) XXX_DiscardUnknown() {
- xxx_messageInfo_CSIStorageCapacityList.DiscardUnknown(m)
-}
+func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} }
-var xxx_messageInfo_CSIStorageCapacityList proto.InternalMessageInfo
+func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} }
-func (m *StorageClass) Reset() { *m = StorageClass{} }
-func (*StorageClass) ProtoMessage() {}
-func (*StorageClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{9}
-}
-func (m *StorageClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageClass.Merge(m, src)
-}
-func (m *StorageClass) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageClass) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageClass.DiscardUnknown(m)
-}
+func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} }
-var xxx_messageInfo_StorageClass proto.InternalMessageInfo
+func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} }
-func (m *StorageClassList) Reset() { *m = StorageClassList{} }
-func (*StorageClassList) ProtoMessage() {}
-func (*StorageClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{10}
-}
-func (m *StorageClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageClassList.Merge(m, src)
-}
-func (m *StorageClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageClassList.DiscardUnknown(m)
-}
+func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
-var xxx_messageInfo_StorageClassList proto.InternalMessageInfo
+func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
-func (m *TokenRequest) Reset() { *m = TokenRequest{} }
-func (*TokenRequest) ProtoMessage() {}
-func (*TokenRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{11}
-}
-func (m *TokenRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TokenRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TokenRequest.Merge(m, src)
-}
-func (m *TokenRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *TokenRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TokenRequest.DiscardUnknown(m)
-}
+func (m *VolumeError) Reset() { *m = VolumeError{} }
-var xxx_messageInfo_TokenRequest proto.InternalMessageInfo
-
-func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} }
-func (*VolumeAttachment) ProtoMessage() {}
-func (*VolumeAttachment) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{12}
-}
-func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachment) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachment.Merge(m, src)
-}
-func (m *VolumeAttachment) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachment) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachment.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo
-
-func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} }
-func (*VolumeAttachmentList) ProtoMessage() {}
-func (*VolumeAttachmentList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{13}
-}
-func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentList.Merge(m, src)
-}
-func (m *VolumeAttachmentList) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentList) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo
-
-func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} }
-func (*VolumeAttachmentSource) ProtoMessage() {}
-func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{14}
-}
-func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentSource.Merge(m, src)
-}
-func (m *VolumeAttachmentSource) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentSource) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo
-
-func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} }
-func (*VolumeAttachmentSpec) ProtoMessage() {}
-func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{15}
-}
-func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src)
-}
-func (m *VolumeAttachmentSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo
-
-func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} }
-func (*VolumeAttachmentStatus) ProtoMessage() {}
-func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{16}
-}
-func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src)
-}
-func (m *VolumeAttachmentStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo
-
-func (m *VolumeAttributesClass) Reset() { *m = VolumeAttributesClass{} }
-func (*VolumeAttributesClass) ProtoMessage() {}
-func (*VolumeAttributesClass) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{17}
-}
-func (m *VolumeAttributesClass) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttributesClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttributesClass) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttributesClass.Merge(m, src)
-}
-func (m *VolumeAttributesClass) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttributesClass) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttributesClass.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttributesClass proto.InternalMessageInfo
-
-func (m *VolumeAttributesClassList) Reset() { *m = VolumeAttributesClassList{} }
-func (*VolumeAttributesClassList) ProtoMessage() {}
-func (*VolumeAttributesClassList) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{18}
-}
-func (m *VolumeAttributesClassList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeAttributesClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeAttributesClassList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeAttributesClassList.Merge(m, src)
-}
-func (m *VolumeAttributesClassList) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeAttributesClassList) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeAttributesClassList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeAttributesClassList proto.InternalMessageInfo
-
-func (m *VolumeError) Reset() { *m = VolumeError{} }
-func (*VolumeError) ProtoMessage() {}
-func (*VolumeError) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{19}
-}
-func (m *VolumeError) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeError.Merge(m, src)
-}
-func (m *VolumeError) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeError) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeError proto.InternalMessageInfo
-
-func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} }
-func (*VolumeNodeResources) ProtoMessage() {}
-func (*VolumeNodeResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_73e4f72503e71065, []int{20}
-}
-func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *VolumeNodeResources) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VolumeNodeResources.Merge(m, src)
-}
-func (m *VolumeNodeResources) XXX_Size() int {
- return m.Size()
-}
-func (m *VolumeNodeResources) XXX_DiscardUnknown() {
- xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver")
- proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList")
- proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1beta1.CSIDriverSpec")
- proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1beta1.CSINode")
- proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1beta1.CSINodeDriver")
- proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList")
- proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec")
- proto.RegisterType((*CSIStorageCapacity)(nil), "k8s.io.api.storage.v1beta1.CSIStorageCapacity")
- proto.RegisterType((*CSIStorageCapacityList)(nil), "k8s.io.api.storage.v1beta1.CSIStorageCapacityList")
- proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry")
- proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList")
- proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.storage.v1beta1.TokenRequest")
- proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment")
- proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList")
- proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource")
- proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec")
- proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry")
- proto.RegisterType((*VolumeAttributesClass)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClass.ParametersEntry")
- proto.RegisterType((*VolumeAttributesClassList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttributesClassList")
- proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError")
- proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_73e4f72503e71065)
-}
-
-var fileDescriptor_73e4f72503e71065 = []byte{
- // 1787 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x6f, 0x24, 0x47,
- 0x15, 0x77, 0x7b, 0xfc, 0x6f, 0x6a, 0xec, 0xb5, 0x5d, 0xeb, 0x0d, 0x13, 0x1f, 0x66, 0xac, 0x46,
- 0x24, 0xde, 0x25, 0xf4, 0xec, 0x9a, 0x10, 0xad, 0x22, 0x45, 0xc2, 0x6d, 0x1b, 0xe2, 0xc4, 0xf6,
- 0x3a, 0x35, 0x66, 0x15, 0x45, 0x1c, 0xa8, 0xe9, 0xae, 0x1d, 0x57, 0x3c, 0xfd, 0x27, 0x5d, 0xd5,
- 0xc6, 0xc3, 0x09, 0xbe, 0x01, 0xe2, 0xc0, 0x27, 0xe0, 0x1b, 0x20, 0x90, 0xe0, 0xc2, 0x91, 0x95,
- 0x90, 0x20, 0x70, 0xca, 0x69, 0xc4, 0x4e, 0x3e, 0x02, 0x12, 0x07, 0x8b, 0x03, 0xaa, 0xea, 0x9a,
- 0xfe, 0x3f, 0xeb, 0x19, 0x90, 0xe6, 0xe6, 0x7a, 0x7f, 0x7e, 0xf5, 0xaa, 0xde, 0x7b, 0xbf, 0x7a,
- 0x3d, 0x06, 0x8f, 0xae, 0x9e, 0x32, 0x83, 0x7a, 0x2d, 0xec, 0xd3, 0x16, 0xe3, 0x5e, 0x80, 0xbb,
- 0xa4, 0x75, 0xfd, 0xa4, 0x43, 0x38, 0x7e, 0xd2, 0xea, 0x12, 0x97, 0x04, 0x98, 0x13, 0xdb, 0xf0,
- 0x03, 0x8f, 0x7b, 0x70, 0x3b, 0xb2, 0x35, 0xb0, 0x4f, 0x0d, 0x65, 0x6b, 0x28, 0xdb, 0xed, 0xef,
- 0x74, 0x29, 0xbf, 0x0c, 0x3b, 0x86, 0xe5, 0x39, 0xad, 0xae, 0xd7, 0xf5, 0x5a, 0xd2, 0xa5, 0x13,
- 0xbe, 0x90, 0x2b, 0xb9, 0x90, 0x7f, 0x45, 0x50, 0xdb, 0x7a, 0x6a, 0x5b, 0xcb, 0x0b, 0xc4, 0x9e,
- 0xf9, 0xed, 0xb6, 0xdf, 0x4d, 0x6c, 0x1c, 0x6c, 0x5d, 0x52, 0x97, 0x04, 0xfd, 0x96, 0x7f, 0xd5,
- 0x95, 0x4e, 0x01, 0x61, 0x5e, 0x18, 0x58, 0x64, 0x2a, 0x2f, 0xd6, 0x72, 0x08, 0xc7, 0x65, 0x7b,
- 0xb5, 0xc6, 0x79, 0x05, 0xa1, 0xcb, 0xa9, 0x53, 0xdc, 0xe6, 0xbd, 0xbb, 0x1c, 0x98, 0x75, 0x49,
- 0x1c, 0x9c, 0xf7, 0xd3, 0xff, 0xa8, 0x81, 0xea, 0x41, 0xfb, 0xf8, 0x30, 0xa0, 0xd7, 0x24, 0x80,
- 0x3f, 0x01, 0x2b, 0x22, 0x22, 0x1b, 0x73, 0x5c, 0xd7, 0x76, 0xb4, 0xdd, 0xda, 0xde, 0x63, 0x23,
- 0xb9, 0xe4, 0x18, 0xd8, 0xf0, 0xaf, 0xba, 0x42, 0xc0, 0x0c, 0x61, 0x6d, 0x5c, 0x3f, 0x31, 0x9e,
- 0x75, 0x3e, 0x27, 0x16, 0x3f, 0x25, 0x1c, 0x9b, 0xf0, 0xe5, 0xa0, 0x39, 0x37, 0x1c, 0x34, 0x41,
- 0x22, 0x43, 0x31, 0x2a, 0xfc, 0x18, 0x2c, 0x30, 0x9f, 0x58, 0xf5, 0x79, 0x89, 0xfe, 0xd0, 0x18,
- 0x9f, 0x42, 0x23, 0x0e, 0xab, 0xed, 0x13, 0xcb, 0x5c, 0x55, 0xb0, 0x0b, 0x62, 0x85, 0x24, 0x88,
- 0xfe, 0x07, 0x0d, 0xac, 0xc5, 0x56, 0x27, 0x94, 0x71, 0xf8, 0xe3, 0xc2, 0x01, 0x8c, 0xc9, 0x0e,
- 0x20, 0xbc, 0x65, 0xf8, 0x1b, 0x6a, 0x9f, 0x95, 0x91, 0x24, 0x15, 0xfc, 0x47, 0x60, 0x91, 0x72,
- 0xe2, 0xb0, 0xfa, 0xfc, 0x4e, 0x65, 0xb7, 0xb6, 0xf7, 0xad, 0x89, 0xa2, 0x37, 0xd7, 0x14, 0xe2,
- 0xe2, 0xb1, 0xf0, 0x45, 0x11, 0x84, 0xfe, 0xb7, 0xc5, 0x54, 0xec, 0xe2, 0x4c, 0xf0, 0x7d, 0x70,
- 0x0f, 0x73, 0x8e, 0xad, 0x4b, 0x44, 0xbe, 0x08, 0x69, 0x40, 0x6c, 0x79, 0x82, 0x15, 0x13, 0x0e,
- 0x07, 0xcd, 0x7b, 0xfb, 0x19, 0x0d, 0xca, 0x59, 0x0a, 0x5f, 0xdf, 0xb3, 0x8f, 0xdd, 0x17, 0xde,
- 0x33, 0xf7, 0xd4, 0x0b, 0x5d, 0x2e, 0x2f, 0x58, 0xf9, 0x9e, 0x67, 0x34, 0x28, 0x67, 0x09, 0x2d,
- 0xb0, 0x75, 0xed, 0xf5, 0x42, 0x87, 0x9c, 0xd0, 0x17, 0xc4, 0xea, 0x5b, 0x3d, 0x72, 0xea, 0xd9,
- 0x84, 0xd5, 0x2b, 0x3b, 0x95, 0xdd, 0xaa, 0xd9, 0x1a, 0x0e, 0x9a, 0x5b, 0xcf, 0x4b, 0xf4, 0xb7,
- 0x83, 0xe6, 0xfd, 0x12, 0x39, 0x2a, 0x05, 0x83, 0x1f, 0x80, 0x75, 0x75, 0x43, 0x07, 0xd8, 0xc7,
- 0x16, 0xe5, 0xfd, 0xfa, 0x82, 0x8c, 0xf0, 0xfe, 0x70, 0xd0, 0x5c, 0x6f, 0x67, 0x55, 0x28, 0x6f,
- 0x0b, 0x3f, 0x04, 0x6b, 0x2f, 0xd8, 0x0f, 0x03, 0x2f, 0xf4, 0xcf, 0xbd, 0x1e, 0xb5, 0xfa, 0xf5,
- 0xc5, 0x1d, 0x6d, 0xb7, 0x6a, 0xea, 0xc3, 0x41, 0x73, 0xed, 0x07, 0xed, 0x94, 0xe2, 0x36, 0x2f,
- 0x40, 0x59, 0x47, 0x48, 0xc0, 0x1a, 0xf7, 0xae, 0x88, 0x2b, 0xae, 0x8e, 0x30, 0xce, 0xea, 0x4b,
- 0x32, 0x97, 0xbb, 0xaf, 0xcb, 0xe5, 0x45, 0xca, 0xc1, 0x7c, 0xa0, 0xd2, 0xb9, 0x96, 0x96, 0x32,
- 0x94, 0x45, 0x85, 0x07, 0x60, 0x33, 0x88, 0x92, 0xc3, 0x10, 0xf1, 0xc3, 0x4e, 0x8f, 0xb2, 0xcb,
- 0xfa, 0xb2, 0x3c, 0xf1, 0x83, 0xe1, 0xa0, 0xb9, 0x89, 0xf2, 0x4a, 0x54, 0xb4, 0x87, 0xef, 0x82,
- 0x55, 0x46, 0x4e, 0xa8, 0x1b, 0xde, 0x44, 0x39, 0x5d, 0x91, 0xfe, 0x1b, 0xc3, 0x41, 0x73, 0xb5,
- 0x7d, 0x94, 0xc8, 0x51, 0xc6, 0x0a, 0x5e, 0x03, 0xdd, 0xf5, 0x6c, 0xb2, 0xdf, 0xeb, 0x79, 0x16,
- 0xe6, 0xb8, 0xd3, 0x23, 0x3f, 0xf2, 0x6d, 0xcc, 0xc9, 0x39, 0x09, 0xa8, 0x67, 0xb7, 0x89, 0xe5,
- 0xb9, 0x36, 0xab, 0x57, 0x77, 0xb4, 0xdd, 0x8a, 0xf9, 0xd6, 0x70, 0xd0, 0xd4, 0xcf, 0xee, 0xb4,
- 0x46, 0x13, 0x20, 0xea, 0xbf, 0xd7, 0xc0, 0xf2, 0x41, 0xfb, 0x58, 0xa0, 0xcd, 0x80, 0x48, 0x8e,
- 0x33, 0x44, 0xf2, 0xf6, 0x1d, 0xad, 0x28, 0x82, 0x1a, 0x4b, 0x23, 0xff, 0x8a, 0x68, 0x44, 0xd8,
- 0x28, 0x1e, 0xdc, 0x01, 0x0b, 0x2e, 0x76, 0x88, 0x0c, 0xbd, 0x9a, 0xf8, 0x9c, 0x61, 0x87, 0x20,
- 0xa9, 0x81, 0x6f, 0x81, 0x25, 0x71, 0x25, 0xc7, 0x87, 0x32, 0x80, 0xaa, 0x79, 0x4f, 0xd9, 0x2c,
- 0x9d, 0x49, 0x29, 0x52, 0x5a, 0x91, 0x42, 0xee, 0xf9, 0x5e, 0xcf, 0xeb, 0xf6, 0x3f, 0x26, 0xfd,
- 0x51, 0x53, 0xc9, 0x14, 0x5e, 0xa4, 0xe4, 0x28, 0x63, 0x05, 0x3b, 0xa0, 0x86, 0x93, 0xcb, 0x96,
- 0x9d, 0x52, 0xdb, 0x6b, 0xbd, 0xee, 0x8c, 0x51, 0x27, 0x8a, 0xcd, 0x91, 0x7a, 0x89, 0x98, 0xb9,
- 0x3e, 0x1c, 0x34, 0x6b, 0xa9, 0xa4, 0xa1, 0x34, 0xa8, 0xfe, 0x3b, 0x0d, 0xd4, 0xd4, 0xa9, 0x67,
- 0x40, 0x9d, 0x1f, 0x66, 0xa9, 0xf3, 0x9b, 0x13, 0xe4, 0x6b, 0x0c, 0x71, 0x5a, 0x71, 0xd8, 0x92,
- 0x35, 0x2f, 0xc0, 0xb2, 0x2d, 0x93, 0xc6, 0xea, 0x9a, 0x84, 0x7e, 0x38, 0x01, 0xb4, 0x62, 0xe6,
- 0x75, 0xb5, 0xc1, 0x72, 0xb4, 0x66, 0x68, 0x04, 0xa5, 0xff, 0xbb, 0x02, 0xe0, 0x41, 0xfb, 0x38,
- 0xc7, 0x4b, 0x33, 0x28, 0x6b, 0x0a, 0x56, 0x45, 0xe5, 0x8c, 0x6a, 0x43, 0x95, 0xf7, 0x77, 0x27,
- 0xcc, 0x04, 0xee, 0x90, 0x5e, 0x9b, 0xf4, 0x88, 0xc5, 0xbd, 0x20, 0x2a, 0xb2, 0xb3, 0x14, 0x18,
- 0xca, 0x40, 0xc3, 0x43, 0xb0, 0x31, 0xa2, 0xd9, 0x1e, 0x66, 0x4c, 0x14, 0x77, 0xbd, 0x22, 0x8b,
- 0xb9, 0xae, 0x42, 0xdc, 0x68, 0xe7, 0xf4, 0xa8, 0xe0, 0x01, 0x3f, 0x05, 0x2b, 0x56, 0x9a, 0xd1,
- 0xef, 0x28, 0x1b, 0x63, 0x34, 0x28, 0x19, 0x9f, 0x84, 0xd8, 0xe5, 0x94, 0xf7, 0xcd, 0x55, 0x51,
- 0x32, 0x31, 0xf5, 0xc7, 0x68, 0x90, 0x81, 0x4d, 0x07, 0xdf, 0x50, 0x27, 0x74, 0xa2, 0xe2, 0x6e,
- 0xd3, 0x9f, 0x11, 0xc9, 0xfb, 0xd3, 0x6f, 0x21, 0x29, 0xf7, 0x34, 0x0f, 0x86, 0x8a, 0xf8, 0xfa,
- 0x5f, 0x34, 0xf0, 0x46, 0x31, 0xf1, 0x33, 0x68, 0x90, 0x76, 0xb6, 0x41, 0x8c, 0x3b, 0xaa, 0x38,
- 0x17, 0xe0, 0x98, 0x5e, 0xf9, 0xd5, 0x12, 0x58, 0x4d, 0xe7, 0x70, 0x06, 0x05, 0xfc, 0x3d, 0x50,
- 0xf3, 0x03, 0xef, 0x9a, 0x32, 0xea, 0xb9, 0x24, 0x50, 0xec, 0x78, 0x5f, 0xb9, 0xd4, 0xce, 0x13,
- 0x15, 0x4a, 0xdb, 0xc1, 0x1e, 0x00, 0x3e, 0x0e, 0xb0, 0x43, 0xb8, 0xe8, 0xe4, 0x8a, 0xbc, 0x83,
- 0xa7, 0xaf, 0xbb, 0x83, 0xf4, 0xb1, 0x8c, 0xf3, 0xd8, 0xf5, 0xc8, 0xe5, 0x41, 0x3f, 0x09, 0x31,
- 0x51, 0xa0, 0x14, 0x3e, 0xbc, 0x02, 0x6b, 0x01, 0xb1, 0x7a, 0x98, 0x3a, 0x6a, 0x9c, 0x58, 0x90,
- 0x61, 0x1e, 0x89, 0x67, 0x1d, 0xa5, 0x15, 0xb7, 0x83, 0xe6, 0xe3, 0xe2, 0xa7, 0x81, 0x71, 0x4e,
- 0x02, 0x46, 0x19, 0x27, 0x2e, 0x8f, 0x4a, 0x27, 0xe3, 0x83, 0xb2, 0xd8, 0xe2, 0x09, 0x70, 0xc4,
- 0xc3, 0xfc, 0xcc, 0xe7, 0xd4, 0x73, 0x59, 0x7d, 0x31, 0x79, 0x02, 0x4e, 0x53, 0x72, 0x94, 0xb1,
- 0x82, 0x27, 0x60, 0x4b, 0xb0, 0xf5, 0x4f, 0xa3, 0x0d, 0x8e, 0x6e, 0x7c, 0xec, 0x8a, 0xab, 0xaa,
- 0x2f, 0xc9, 0x19, 0xa0, 0x2e, 0xa6, 0xb2, 0xfd, 0x12, 0x3d, 0x2a, 0xf5, 0x82, 0x9f, 0x82, 0xcd,
- 0x68, 0x2c, 0x33, 0xa9, 0x6b, 0x53, 0xb7, 0x2b, 0x86, 0x32, 0x39, 0x8e, 0x54, 0xcd, 0x47, 0xa2,
- 0x37, 0x9e, 0xe7, 0x95, 0xb7, 0x65, 0x42, 0x54, 0x04, 0x81, 0x5f, 0x80, 0x4d, 0xb9, 0x23, 0xb1,
- 0x15, 0xb1, 0x50, 0xc2, 0xea, 0x2b, 0xc5, 0x99, 0x4a, 0x5c, 0x9d, 0x28, 0xa4, 0x11, 0xfd, 0x8c,
- 0x68, 0xea, 0x82, 0x04, 0x8e, 0xf9, 0xa6, 0xca, 0xd7, 0xe6, 0x7e, 0x1e, 0x0a, 0x15, 0xd1, 0xb7,
- 0x3f, 0x00, 0xeb, 0xb9, 0x84, 0xc3, 0x0d, 0x50, 0xb9, 0x22, 0xfd, 0xe8, 0xbd, 0x46, 0xe2, 0x4f,
- 0xb8, 0x05, 0x16, 0xaf, 0x71, 0x2f, 0x24, 0x51, 0x05, 0xa2, 0x68, 0xf1, 0xfe, 0xfc, 0x53, 0x4d,
- 0xff, 0x93, 0x06, 0x32, 0xc4, 0x36, 0x83, 0xe6, 0x3e, 0xcd, 0x36, 0xf7, 0xee, 0xa4, 0x85, 0x3d,
- 0xa6, 0xad, 0x7f, 0xa1, 0x81, 0xd5, 0xf4, 0xf4, 0x09, 0xdf, 0x01, 0x2b, 0x38, 0xb4, 0x29, 0x71,
- 0xad, 0xd1, 0xcc, 0x12, 0x47, 0xb3, 0xaf, 0xe4, 0x28, 0xb6, 0x10, 0xb3, 0x29, 0xb9, 0xf1, 0x69,
- 0x80, 0x45, 0xa5, 0x8d, 0xe6, 0xc1, 0x79, 0x39, 0x0f, 0x4a, 0xa2, 0x3c, 0xca, 0x2b, 0x51, 0xd1,
- 0x5e, 0xff, 0xcd, 0x3c, 0xd8, 0x88, 0x0a, 0x24, 0xfa, 0x34, 0x71, 0x88, 0xcb, 0x67, 0x40, 0x2f,
- 0x28, 0x33, 0xf6, 0x3d, 0xbe, 0x7b, 0x24, 0x4a, 0xa2, 0x1b, 0x37, 0xff, 0xc1, 0xcf, 0xc0, 0x12,
- 0xe3, 0x98, 0x87, 0x4c, 0x3e, 0x7f, 0xb5, 0xbd, 0xbd, 0xa9, 0x50, 0xa5, 0x67, 0x32, 0xff, 0x45,
- 0x6b, 0xa4, 0x10, 0xf5, 0x3f, 0x6b, 0x60, 0x2b, 0xef, 0x32, 0x83, 0x82, 0xfb, 0x24, 0x5b, 0x70,
- 0xef, 0x4c, 0x73, 0xa2, 0x31, 0x45, 0xf7, 0x0f, 0x0d, 0xbc, 0x51, 0x38, 0xbc, 0x7c, 0x67, 0x05,
- 0x57, 0xf9, 0x39, 0x46, 0x3c, 0x4b, 0xc6, 0x67, 0xc9, 0x55, 0xe7, 0x25, 0x7a, 0x54, 0xea, 0x05,
- 0x3f, 0x07, 0x1b, 0xd4, 0xed, 0x51, 0x97, 0xa8, 0x67, 0x39, 0x49, 0x77, 0x29, 0xa1, 0xe4, 0x91,
- 0x65, 0x9a, 0xb7, 0xc4, 0xf4, 0x72, 0x9c, 0x43, 0x41, 0x05, 0x5c, 0xfd, 0xaf, 0x25, 0xe9, 0x91,
- 0x63, 0xa5, 0xe8, 0x28, 0x29, 0x21, 0x41, 0xa1, 0xa3, 0x94, 0x1c, 0xc5, 0x16, 0xb2, 0x82, 0xe4,
- 0x55, 0xa8, 0x40, 0xa7, 0xab, 0x20, 0xe9, 0x99, 0xaa, 0x20, 0xb9, 0x46, 0x0a, 0x51, 0x44, 0x22,
- 0xc6, 0xb6, 0xd4, 0x78, 0x16, 0x47, 0x72, 0xa6, 0xe4, 0x28, 0xb6, 0xd0, 0xff, 0x53, 0x29, 0xc9,
- 0x92, 0x2c, 0xc5, 0xd4, 0x91, 0x46, 0xbf, 0x2c, 0xe4, 0x8f, 0x64, 0xc7, 0x47, 0xb2, 0xe1, 0xaf,
- 0x35, 0x00, 0x71, 0x0c, 0x71, 0x3a, 0x2a, 0xd5, 0xa8, 0x9e, 0x3e, 0x9a, 0xbe, 0x43, 0x8c, 0xfd,
- 0x02, 0x58, 0xf4, 0x56, 0x6f, 0xab, 0x20, 0x60, 0xd1, 0x00, 0x95, 0x44, 0x00, 0x29, 0xa8, 0x45,
- 0xd2, 0xa3, 0x20, 0xf0, 0x02, 0xd5, 0xb2, 0x6f, 0xdf, 0x1d, 0x90, 0x34, 0x37, 0x1b, 0xf2, 0x9b,
- 0x28, 0xf1, 0xbf, 0x1d, 0x34, 0x6b, 0x29, 0x3d, 0x4a, 0x63, 0x8b, 0xad, 0x6c, 0x92, 0x6c, 0xb5,
- 0xf0, 0x3f, 0x6c, 0x75, 0x48, 0xc6, 0x6f, 0x95, 0xc2, 0xde, 0x3e, 0x02, 0xdf, 0x18, 0x73, 0x41,
- 0x53, 0xbd, 0x6d, 0x5f, 0xcf, 0x83, 0x07, 0xf1, 0xfd, 0x07, 0xb4, 0x13, 0x72, 0xc2, 0x66, 0x35,
- 0xf9, 0xed, 0x01, 0x10, 0x7d, 0x3e, 0xc9, 0x52, 0x8d, 0x06, 0xbf, 0xd8, 0xe3, 0x30, 0xd6, 0xa0,
- 0x94, 0x15, 0x0c, 0x4b, 0xc6, 0xbe, 0xfd, 0x89, 0x8a, 0x2b, 0x7d, 0xb8, 0x69, 0xe7, 0xbf, 0xff,
- 0x77, 0x82, 0xf8, 0xbb, 0x06, 0xde, 0x2c, 0x0d, 0x64, 0x06, 0xcc, 0xfe, 0x3c, 0xcb, 0xec, 0x4f,
- 0xa6, 0xbe, 0xac, 0x31, 0xf4, 0xfe, 0x5b, 0x0d, 0xa4, 0xab, 0x13, 0x9e, 0x80, 0x05, 0x4e, 0x15,
- 0x87, 0xd7, 0xf6, 0x1e, 0x4d, 0x76, 0x82, 0x0b, 0xea, 0x90, 0xe4, 0x89, 0x15, 0x2b, 0x24, 0x51,
- 0xe0, 0x43, 0xb0, 0xec, 0x10, 0xc6, 0x70, 0x77, 0x54, 0x18, 0xf1, 0xa7, 0xf7, 0x69, 0x24, 0x46,
- 0x23, 0x3d, 0xfc, 0x36, 0xa8, 0x12, 0x11, 0xc1, 0x81, 0x18, 0x51, 0x45, 0x77, 0x2f, 0x9a, 0x6b,
- 0xc3, 0x41, 0xb3, 0x7a, 0x34, 0x12, 0xa2, 0x44, 0xaf, 0xbf, 0x07, 0xee, 0x97, 0xfc, 0xf2, 0x01,
- 0x9b, 0x60, 0xd1, 0x92, 0xbf, 0x98, 0x69, 0xd2, 0xbf, 0x2a, 0x4e, 0x7b, 0x20, 0x7f, 0x2a, 0x8b,
- 0xe4, 0xe6, 0xf7, 0x5f, 0xbe, 0x6a, 0xcc, 0x7d, 0xf9, 0xaa, 0x31, 0xf7, 0xd5, 0xab, 0xc6, 0xdc,
- 0xcf, 0x87, 0x0d, 0xed, 0xe5, 0xb0, 0xa1, 0x7d, 0x39, 0x6c, 0x68, 0x5f, 0x0d, 0x1b, 0xda, 0x3f,
- 0x87, 0x0d, 0xed, 0x97, 0x5f, 0x37, 0xe6, 0x3e, 0xdb, 0x1e, 0xff, 0xcf, 0x88, 0xff, 0x06, 0x00,
- 0x00, 0xff, 0xff, 0x4a, 0x00, 0x2b, 0x10, 0xa9, 0x18, 0x00, 0x00,
-}
+func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} }
func (m *CSIDriver) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -893,6 +187,16 @@ func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.ServiceAccountTokenInSecrets != nil {
+ i--
+ if *m.ServiceAccountTokenInSecrets {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ }
if m.NodeAllocatableUpdatePeriodSeconds != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.NodeAllocatableUpdatePeriodSeconds))
i--
@@ -1355,7 +659,7 @@ func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Parameters {
keysForParameters = append(keysForParameters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Parameters[string(keysForParameters[iNdEx])]
baseI := i
@@ -1706,7 +1010,7 @@ func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error)
for k := range m.AttachmentMetadata {
keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata)
+ sort.Strings(keysForAttachmentMetadata)
for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- {
v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])]
baseI := i
@@ -1761,7 +1065,7 @@ func (m *VolumeAttributesClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Parameters {
keysForParameters = append(keysForParameters, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- {
v := m.Parameters[string(keysForParameters[iNdEx])]
baseI := i
@@ -1997,6 +1301,9 @@ func (m *CSIDriverSpec) Size() (n int) {
if m.NodeAllocatableUpdatePeriodSeconds != nil {
n += 1 + sovGenerated(uint64(*m.NodeAllocatableUpdatePeriodSeconds))
}
+ if m.ServiceAccountTokenInSecrets != nil {
+ n += 2
+ }
return n
}
@@ -2393,6 +1700,7 @@ func (this *CSIDriverSpec) String() string {
`RequiresRepublish:` + valueToStringGenerated(this.RequiresRepublish) + `,`,
`SELinuxMount:` + valueToStringGenerated(this.SELinuxMount) + `,`,
`NodeAllocatableUpdatePeriodSeconds:` + valueToStringGenerated(this.NodeAllocatableUpdatePeriodSeconds) + `,`,
+ `ServiceAccountTokenInSecrets:` + valueToStringGenerated(this.ServiceAccountTokenInSecrets) + `,`,
`}`,
}, "")
return s
@@ -2495,7 +1803,7 @@ func (this *StorageClass) String() string {
for k := range this.Parameters {
keysForParameters = append(keysForParameters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
mapStringForParameters := "map[string]string{"
for _, k := range keysForParameters {
mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
@@ -2600,7 +1908,7 @@ func (this *VolumeAttachmentStatus) String() string {
for k := range this.AttachmentMetadata {
keysForAttachmentMetadata = append(keysForAttachmentMetadata, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata)
+ sort.Strings(keysForAttachmentMetadata)
mapStringForAttachmentMetadata := "map[string]string{"
for _, k := range keysForAttachmentMetadata {
mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k])
@@ -2623,7 +1931,7 @@ func (this *VolumeAttributesClass) String() string {
for k := range this.Parameters {
keysForParameters = append(keysForParameters, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForParameters)
+ sort.Strings(keysForParameters)
mapStringForParameters := "map[string]string{"
for _, k := range keysForParameters {
mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k])
@@ -3169,6 +2477,27 @@ func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error {
}
}
m.NodeAllocatableUpdatePeriodSeconds = &v
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountTokenInSecrets", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.ServiceAccountTokenInSecrets = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/generated.proto b/operator/vendor/k8s.io/api/storage/v1beta1/generated.proto
index fe597c9e..33b904ee 100644
--- a/operator/vendor/k8s.io/api/storage/v1beta1/generated.proto
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/generated.proto
@@ -227,6 +227,30 @@ message CSIDriverSpec {
// +featureGate=MutableCSINodeAllocatableCount
// +optional
optional int64 nodeAllocatableUpdatePeriodSeconds = 9;
+
+ // serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that
+ // service account tokens should be passed via the Secrets field in NodePublishVolumeRequest
+ // instead of the VolumeContext field. The CSI specification provides a dedicated Secrets
+ // field for sensitive information like tokens, which is the appropriate mechanism for
+ // handling credentials. This addresses security concerns where sensitive tokens were being
+ // logged as part of volume context.
+ //
+ // When "true", kubelet will pass the tokens only in the Secrets field with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens". The CSI driver must be updated to read
+ // tokens from the Secrets field instead of VolumeContext.
+ //
+ // When "false" or not set, kubelet will pass the tokens in VolumeContext with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens" (existing behavior). This maintains backward
+ // compatibility with existing CSI drivers.
+ //
+ // This field can only be set when TokenRequests is configured. The API server will reject
+ // CSIDriver specs that set this field without TokenRequests.
+ //
+ // Default behavior if unset is to pass tokens in the VolumeContext field.
+ //
+ // +featureGate=CSIServiceAccountTokenSecrets
+ // +optional
+ optional bool serviceAccountTokenInSecrets = 10;
}
// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode.
@@ -411,6 +435,8 @@ message StorageClass {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// provisioner indicates the type of the provisioner.
+ // +required
+ // +k8s:required
optional string provisioner = 2;
// parameters holds the parameters for the provisioner that should
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/storage/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..a288b7fb
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,64 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*CSIDriver) ProtoMessage() {}
+
+func (*CSIDriverList) ProtoMessage() {}
+
+func (*CSIDriverSpec) ProtoMessage() {}
+
+func (*CSINode) ProtoMessage() {}
+
+func (*CSINodeDriver) ProtoMessage() {}
+
+func (*CSINodeList) ProtoMessage() {}
+
+func (*CSINodeSpec) ProtoMessage() {}
+
+func (*CSIStorageCapacity) ProtoMessage() {}
+
+func (*CSIStorageCapacityList) ProtoMessage() {}
+
+func (*StorageClass) ProtoMessage() {}
+
+func (*StorageClassList) ProtoMessage() {}
+
+func (*TokenRequest) ProtoMessage() {}
+
+func (*VolumeAttachment) ProtoMessage() {}
+
+func (*VolumeAttachmentList) ProtoMessage() {}
+
+func (*VolumeAttachmentSource) ProtoMessage() {}
+
+func (*VolumeAttachmentSpec) ProtoMessage() {}
+
+func (*VolumeAttachmentStatus) ProtoMessage() {}
+
+func (*VolumeAttributesClass) ProtoMessage() {}
+
+func (*VolumeAttributesClassList) ProtoMessage() {}
+
+func (*VolumeError) ProtoMessage() {}
+
+func (*VolumeNodeResources) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/types.go b/operator/vendor/k8s.io/api/storage/v1beta1/types.go
index 4f350b0b..b5dde72a 100644
--- a/operator/vendor/k8s.io/api/storage/v1beta1/types.go
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/types.go
@@ -43,6 +43,8 @@ type StorageClass struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// provisioner indicates the type of the provisioner.
+ // +required
+ // +k8s:required
Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"`
// parameters holds the parameters for the provisioner that should
@@ -456,6 +458,30 @@ type CSIDriverSpec struct {
// +featureGate=MutableCSINodeAllocatableCount
// +optional
NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty" protobuf:"varint,9,opt,name=nodeAllocatableUpdatePeriodSeconds"`
+
+ // serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that
+ // service account tokens should be passed via the Secrets field in NodePublishVolumeRequest
+ // instead of the VolumeContext field. The CSI specification provides a dedicated Secrets
+ // field for sensitive information like tokens, which is the appropriate mechanism for
+ // handling credentials. This addresses security concerns where sensitive tokens were being
+ // logged as part of volume context.
+ //
+ // When "true", kubelet will pass the tokens only in the Secrets field with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens". The CSI driver must be updated to read
+ // tokens from the Secrets field instead of VolumeContext.
+ //
+ // When "false" or not set, kubelet will pass the tokens in VolumeContext with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens" (existing behavior). This maintains backward
+ // compatibility with existing CSI drivers.
+ //
+ // This field can only be set when TokenRequests is configured. The API server will reject
+ // CSIDriver specs that set this field without TokenRequests.
+ //
+ // Default behavior if unset is to pass tokens in the VolumeContext field.
+ //
+ // +featureGate=CSIServiceAccountTokenSecrets
+ // +optional
+ ServiceAccountTokenInSecrets *bool `json:"serviceAccountTokenInSecrets,omitempty" protobuf:"varint,10,opt,name=serviceAccountTokenInSecrets"`
}
// FSGroupPolicy specifies if a CSI Driver supports modifying
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
index 7c5276e4..78da9266 100644
--- a/operator/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
@@ -58,6 +58,7 @@ var map_CSIDriverSpec = map[string]string{
"requiresRepublish": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.",
"seLinuxMount": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".",
"nodeAllocatableUpdatePeriodSeconds": "nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of the CSINode allocatable capacity for this driver. When set, both periodic updates and updates triggered by capacity-related failures are enabled. If not set, no updates occur (neither periodic nor upon detecting capacity-related failures), and the allocatable.count remains static. The minimum allowed value for this field is 10 seconds.\n\nThis is a beta feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.\n\nThis field is mutable.",
+ "serviceAccountTokenInSecrets": "serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that service account tokens should be passed via the Secrets field in NodePublishVolumeRequest instead of the VolumeContext field. The CSI specification provides a dedicated Secrets field for sensitive information like tokens, which is the appropriate mechanism for handling credentials. This addresses security concerns where sensitive tokens were being logged as part of volume context.\n\nWhen \"true\", kubelet will pass the tokens only in the Secrets field with the key \"csi.storage.k8s.io/serviceAccount.tokens\". The CSI driver must be updated to read tokens from the Secrets field instead of VolumeContext.\n\nWhen \"false\" or not set, kubelet will pass the tokens in VolumeContext with the key \"csi.storage.k8s.io/serviceAccount.tokens\" (existing behavior). This maintains backward compatibility with existing CSI drivers.\n\nThis field can only be set when TokenRequests is configured. The API server will reject CSIDriver specs that set this field without TokenRequests.\n\nDefault behavior if unset is to pass tokens in the VolumeContext field.",
}
func (CSIDriverSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
index a5ef9b5c..94cc629d 100644
--- a/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go
@@ -137,6 +137,11 @@ func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
*out = new(int64)
**out = **in
}
+ if in.ServiceAccountTokenInSecrets != nil {
+ in, out := &in.ServiceAccountTokenInSecrets, &out.ServiceAccountTokenInSecrets
+ *out = new(bool)
+ **out = **in
+ }
return
}
diff --git a/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..8453d86b
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,127 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIDriver) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSIDriver"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIDriverList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSIDriverList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIDriverSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSIDriverSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINode) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSINode"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINodeDriver) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSINodeDriver"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINodeList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSINodeList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSINodeSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSINodeSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIStorageCapacity) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSIStorageCapacity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CSIStorageCapacityList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.CSIStorageCapacityList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageClass) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.StorageClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageClassList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.StorageClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TokenRequest) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.TokenRequest"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachment) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttachment"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttachmentList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentSource) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttachmentSource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttachmentSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttachmentStatus) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttachmentStatus"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttributesClass) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttributesClass"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeAttributesClassList) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeAttributesClassList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeError) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeError"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in VolumeNodeResources) OpenAPIModelName() string {
+ return "io.k8s.api.storage.v1beta1.VolumeNodeResources"
+}
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go b/operator/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go
deleted file mode 100644
index ed57f34b..00000000
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go
+++ /dev/null
@@ -1,1688 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: k8s.io/api/storagemigration/v1alpha1/generated.proto
-
-package v1alpha1
-
-import (
- fmt "fmt"
-
- io "io"
-
- proto "github.com/gogo/protobuf/proto"
-
- k8s_io_api_core_v1 "k8s.io/api/core/v1"
-
- math "math"
- math_bits "math/bits"
- reflect "reflect"
- strings "strings"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
-func (*GroupVersionResource) ProtoMessage() {}
-func (*GroupVersionResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_0117377a57b172b9, []int{0}
-}
-func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionResource.Merge(m, src)
-}
-func (m *GroupVersionResource) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionResource) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionResource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
-
-func (m *MigrationCondition) Reset() { *m = MigrationCondition{} }
-func (*MigrationCondition) ProtoMessage() {}
-func (*MigrationCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_0117377a57b172b9, []int{1}
-}
-func (m *MigrationCondition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MigrationCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *MigrationCondition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MigrationCondition.Merge(m, src)
-}
-func (m *MigrationCondition) XXX_Size() int {
- return m.Size()
-}
-func (m *MigrationCondition) XXX_DiscardUnknown() {
- xxx_messageInfo_MigrationCondition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MigrationCondition proto.InternalMessageInfo
-
-func (m *StorageVersionMigration) Reset() { *m = StorageVersionMigration{} }
-func (*StorageVersionMigration) ProtoMessage() {}
-func (*StorageVersionMigration) Descriptor() ([]byte, []int) {
- return fileDescriptor_0117377a57b172b9, []int{2}
-}
-func (m *StorageVersionMigration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionMigration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionMigration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionMigration.Merge(m, src)
-}
-func (m *StorageVersionMigration) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionMigration) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionMigration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionMigration proto.InternalMessageInfo
-
-func (m *StorageVersionMigrationList) Reset() { *m = StorageVersionMigrationList{} }
-func (*StorageVersionMigrationList) ProtoMessage() {}
-func (*StorageVersionMigrationList) Descriptor() ([]byte, []int) {
- return fileDescriptor_0117377a57b172b9, []int{3}
-}
-func (m *StorageVersionMigrationList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionMigrationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionMigrationList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionMigrationList.Merge(m, src)
-}
-func (m *StorageVersionMigrationList) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionMigrationList) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionMigrationList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionMigrationList proto.InternalMessageInfo
-
-func (m *StorageVersionMigrationSpec) Reset() { *m = StorageVersionMigrationSpec{} }
-func (*StorageVersionMigrationSpec) ProtoMessage() {}
-func (*StorageVersionMigrationSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_0117377a57b172b9, []int{4}
-}
-func (m *StorageVersionMigrationSpec) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionMigrationSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionMigrationSpec) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionMigrationSpec.Merge(m, src)
-}
-func (m *StorageVersionMigrationSpec) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionMigrationSpec) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionMigrationSpec.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionMigrationSpec proto.InternalMessageInfo
-
-func (m *StorageVersionMigrationStatus) Reset() { *m = StorageVersionMigrationStatus{} }
-func (*StorageVersionMigrationStatus) ProtoMessage() {}
-func (*StorageVersionMigrationStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_0117377a57b172b9, []int{5}
-}
-func (m *StorageVersionMigrationStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StorageVersionMigrationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StorageVersionMigrationStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StorageVersionMigrationStatus.Merge(m, src)
-}
-func (m *StorageVersionMigrationStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *StorageVersionMigrationStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_StorageVersionMigrationStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StorageVersionMigrationStatus proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.api.storagemigration.v1alpha1.GroupVersionResource")
- proto.RegisterType((*MigrationCondition)(nil), "k8s.io.api.storagemigration.v1alpha1.MigrationCondition")
- proto.RegisterType((*StorageVersionMigration)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigration")
- proto.RegisterType((*StorageVersionMigrationList)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationList")
- proto.RegisterType((*StorageVersionMigrationSpec)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationSpec")
- proto.RegisterType((*StorageVersionMigrationStatus)(nil), "k8s.io.api.storagemigration.v1alpha1.StorageVersionMigrationStatus")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/api/storagemigration/v1alpha1/generated.proto", fileDescriptor_0117377a57b172b9)
-}
-
-var fileDescriptor_0117377a57b172b9 = []byte{
- // 719 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x4f, 0x13, 0x4f,
- 0x14, 0xef, 0x42, 0x0b, 0x7c, 0xa7, 0x5f, 0xc0, 0x4c, 0x14, 0x1a, 0x8c, 0x5b, 0x53, 0x09, 0x41,
- 0xa3, 0xb3, 0xd2, 0x10, 0x43, 0x30, 0x1e, 0x28, 0x07, 0xa3, 0x81, 0x98, 0x0c, 0xc8, 0xc1, 0x78,
- 0x70, 0xba, 0x1d, 0xb7, 0x43, 0xd9, 0x9d, 0xcd, 0xce, 0x6c, 0x13, 0x6e, 0xfe, 0x09, 0x1e, 0xfc,
- 0x93, 0x3c, 0x70, 0x31, 0xe1, 0xc8, 0xc5, 0x2a, 0xf5, 0xbf, 0xe0, 0x64, 0x66, 0x76, 0x76, 0xfb,
- 0x8b, 0x62, 0x13, 0x6e, 0x3b, 0xef, 0xbd, 0xcf, 0x67, 0xde, 0x7b, 0x9f, 0x79, 0x6f, 0xc1, 0x66,
- 0x6b, 0x4b, 0x20, 0xc6, 0x1d, 0x12, 0x32, 0x47, 0x48, 0x1e, 0x11, 0x8f, 0xfa, 0xcc, 0x8b, 0x88,
- 0x64, 0x3c, 0x70, 0xda, 0x1b, 0xe4, 0x24, 0x6c, 0x92, 0x0d, 0xc7, 0xa3, 0x01, 0x8d, 0x88, 0xa4,
- 0x0d, 0x14, 0x46, 0x5c, 0x72, 0xb8, 0x9a, 0xa0, 0x10, 0x09, 0x19, 0x1a, 0x46, 0xa1, 0x14, 0xb5,
- 0xf2, 0xcc, 0x63, 0xb2, 0x19, 0xd7, 0x91, 0xcb, 0x7d, 0xc7, 0xe3, 0x1e, 0x77, 0x34, 0xb8, 0x1e,
- 0x7f, 0xd6, 0x27, 0x7d, 0xd0, 0x5f, 0x09, 0xe9, 0x4a, 0xa5, 0x2f, 0x15, 0x97, 0x47, 0xd4, 0x69,
- 0x8f, 0x5c, 0xbc, 0xd2, 0x97, 0xae, 0x4f, 0xdc, 0x26, 0x0b, 0x68, 0x74, 0xea, 0x84, 0x2d, 0x4f,
- 0x19, 0x84, 0xe3, 0x53, 0x49, 0xae, 0x43, 0x39, 0xe3, 0x50, 0x51, 0x1c, 0x48, 0xe6, 0xd3, 0x11,
- 0xc0, 0x8b, 0x7f, 0x01, 0x84, 0xdb, 0xa4, 0x3e, 0x19, 0xc6, 0x55, 0xbe, 0x59, 0xe0, 0xee, 0xeb,
- 0x88, 0xc7, 0xe1, 0x11, 0x8d, 0x04, 0xe3, 0x01, 0xa6, 0x82, 0xc7, 0x91, 0x4b, 0xe1, 0x23, 0x50,
- 0xf0, 0x94, 0xbd, 0x64, 0x3d, 0xb4, 0xd6, 0xff, 0xab, 0xcd, 0x9f, 0x75, 0xca, 0xb9, 0x6e, 0xa7,
- 0x5c, 0xd0, 0xc1, 0x38, 0xf1, 0xc1, 0xc7, 0x60, 0xb6, 0x9d, 0xe0, 0x4a, 0x53, 0x3a, 0x6c, 0xd1,
- 0x84, 0xcd, 0xa6, 0x74, 0xa9, 0x1f, 0x3e, 0x05, 0x73, 0x91, 0xe1, 0x2e, 0x4d, 0xeb, 0xd8, 0x3b,
- 0x26, 0x76, 0x2e, 0xbd, 0x13, 0x67, 0x11, 0x95, 0x9f, 0x53, 0x00, 0xee, 0xa7, 0xfa, 0xec, 0xf2,
- 0xa0, 0xc1, 0xd4, 0x07, 0xdc, 0x06, 0x79, 0x79, 0x1a, 0x52, 0x93, 0xd3, 0x9a, 0x21, 0xc8, 0x1f,
- 0x9e, 0x86, 0xf4, 0xaa, 0x53, 0x5e, 0x1a, 0x45, 0x28, 0x0f, 0xd6, 0x18, 0xb8, 0x07, 0x66, 0x84,
- 0x24, 0x32, 0x16, 0x26, 0xd5, 0x4d, 0x83, 0x9e, 0x39, 0xd0, 0xd6, 0xab, 0x4e, 0xf9, 0x1a, 0x39,
- 0x51, 0xc6, 0x94, 0x44, 0x61, 0xc3, 0x01, 0x8f, 0xc1, 0xc2, 0x09, 0x11, 0xf2, 0x7d, 0xd8, 0x20,
- 0x92, 0x1e, 0x32, 0x3f, 0x29, 0xaa, 0x58, 0x7d, 0x82, 0x7a, 0x0f, 0x2d, 0x13, 0x02, 0x85, 0x2d,
- 0x4f, 0x19, 0x04, 0x52, 0x7a, 0xa3, 0xf6, 0x06, 0x52, 0x88, 0xda, 0x92, 0xc9, 0x60, 0x61, 0x6f,
- 0x80, 0x09, 0x0f, 0x31, 0xc3, 0x35, 0x30, 0x13, 0x51, 0x22, 0x78, 0x50, 0xca, 0xeb, 0xcc, 0x17,
- 0xd2, 0xcc, 0xb1, 0xb6, 0x62, 0xe3, 0x55, 0x6a, 0xf8, 0x54, 0x08, 0xe2, 0xd1, 0x52, 0x61, 0x50,
- 0x8d, 0xfd, 0xc4, 0x8c, 0x53, 0x7f, 0xe5, 0xc7, 0x14, 0x58, 0x3e, 0x48, 0xc6, 0xc0, 0x28, 0x95,
- 0xf5, 0x0e, 0x7e, 0x02, 0x73, 0x2a, 0xcd, 0x06, 0x91, 0x44, 0x37, 0xba, 0x58, 0x7d, 0x3e, 0x59,
- 0x51, 0xef, 0xea, 0xc7, 0xd4, 0x95, 0xfb, 0x54, 0x92, 0x1a, 0x34, 0x37, 0x83, 0x9e, 0x0d, 0x67,
- 0xac, 0xd0, 0x05, 0x79, 0x11, 0x52, 0x57, 0x0b, 0x51, 0xac, 0xee, 0xa0, 0x49, 0x66, 0x13, 0x8d,
- 0x49, 0xf7, 0x20, 0xa4, 0x6e, 0xed, 0xff, 0xf4, 0x25, 0xa8, 0x13, 0xd6, 0xe4, 0xb0, 0x95, 0xe9,
- 0x9d, 0x28, 0xb3, 0x7b, 0xbb, 0x6b, 0x34, 0x55, 0xaf, 0xf5, 0x83, 0xcf, 0xa1, 0xf2, 0xcb, 0x02,
- 0xf7, 0xc7, 0x20, 0xf7, 0x98, 0x90, 0xf0, 0xe3, 0x48, 0x4f, 0xd1, 0x64, 0x3d, 0x55, 0x68, 0xdd,
- 0xd1, 0x6c, 0x5a, 0x52, 0x4b, 0x5f, 0x3f, 0xeb, 0xa0, 0xc0, 0x24, 0xf5, 0xd5, 0xcb, 0x9e, 0x5e,
- 0x2f, 0x56, 0x5f, 0xdd, 0xaa, 0xd2, 0xde, 0xa8, 0xbf, 0x51, 0x9c, 0x38, 0xa1, 0xae, 0x7c, 0x1f,
- 0x5f, 0xa1, 0x6a, 0x3a, 0x6c, 0xf6, 0xcd, 0x77, 0x52, 0xe1, 0xf6, 0x64, 0x69, 0x5c, 0xb7, 0x7d,
- 0x6e, 0xda, 0x0d, 0xf0, 0x25, 0x98, 0x77, 0x79, 0x20, 0x59, 0x10, 0xd3, 0x43, 0xde, 0xa2, 0xe9,
- 0xea, 0xb9, 0x67, 0x20, 0xf3, 0xbb, 0xfd, 0x4e, 0x3c, 0x18, 0x5b, 0x39, 0xb7, 0xc0, 0x83, 0x1b,
- 0x25, 0x86, 0x27, 0x00, 0xb8, 0xe9, 0xd0, 0x8b, 0x92, 0xa5, 0x3b, 0xba, 0x35, 0x59, 0x29, 0xa3,
- 0xfb, 0xa7, 0x37, 0x08, 0x99, 0x49, 0xe0, 0x3e, 0x7e, 0xb8, 0x03, 0x16, 0xd3, 0xc2, 0x8e, 0x06,
- 0x36, 0xe9, 0xb2, 0x01, 0x2e, 0xe2, 0x41, 0x37, 0x1e, 0x8e, 0xaf, 0xbd, 0x3d, 0xbb, 0xb4, 0x73,
- 0xe7, 0x97, 0x76, 0xee, 0xe2, 0xd2, 0xce, 0x7d, 0xe9, 0xda, 0xd6, 0x59, 0xd7, 0xb6, 0xce, 0xbb,
- 0xb6, 0x75, 0xd1, 0xb5, 0xad, 0xdf, 0x5d, 0xdb, 0xfa, 0xfa, 0xc7, 0xce, 0x7d, 0x58, 0x9d, 0xe4,
- 0xb7, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0x01, 0xc1, 0xb1, 0xd8, 0x5d, 0x07, 0x00, 0x00,
-}
-
-func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.Resource)
- copy(dAtA[i:], m.Resource)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Group)
- copy(dAtA[i:], m.Group)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *MigrationCondition) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MigrationCondition) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MigrationCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.Message)
- copy(dAtA[i:], m.Message)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
- i--
- dAtA[i] = 0x2a
- i -= len(m.Reason)
- copy(dAtA[i:], m.Reason)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
- i--
- dAtA[i] = 0x22
- {
- size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- i -= len(m.Status)
- copy(dAtA[i:], m.Status)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
- i--
- dAtA[i] = 0x12
- i -= len(m.Type)
- copy(dAtA[i:], m.Type)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *StorageVersionMigration) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StorageVersionMigration) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StorageVersionMigration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- {
- size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- {
- size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *StorageVersionMigrationList) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StorageVersionMigrationList) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StorageVersionMigrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Items) > 0 {
- for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *StorageVersionMigrationSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StorageVersionMigrationSpec) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StorageVersionMigrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.ContinueToken)
- copy(dAtA[i:], m.ContinueToken)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContinueToken)))
- i--
- dAtA[i] = 0x12
- {
- size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *StorageVersionMigrationStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StorageVersionMigrationStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StorageVersionMigrationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- i -= len(m.ResourceVersion)
- copy(dAtA[i:], m.ResourceVersion)
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
- i--
- dAtA[i] = 0x12
- if len(m.Conditions) > 0 {
- for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
- offset -= sovGenerated(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *GroupVersionResource) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Group)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Version)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Resource)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *MigrationCondition) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Type)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Status)
- n += 1 + l + sovGenerated(uint64(l))
- l = m.LastUpdateTime.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Reason)
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.Message)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *StorageVersionMigration) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Spec.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = m.Status.Size()
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *StorageVersionMigrationList) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ListMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.Items) > 0 {
- for _, e := range m.Items {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
-func (m *StorageVersionMigrationSpec) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.Resource.Size()
- n += 1 + l + sovGenerated(uint64(l))
- l = len(m.ContinueToken)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func (m *StorageVersionMigrationStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Conditions) > 0 {
- for _, e := range m.Conditions {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- l = len(m.ResourceVersion)
- n += 1 + l + sovGenerated(uint64(l))
- return n
-}
-
-func sovGenerated(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozGenerated(x uint64) (n int) {
- return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *GroupVersionResource) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&GroupVersionResource{`,
- `Group:` + fmt.Sprintf("%v", this.Group) + `,`,
- `Version:` + fmt.Sprintf("%v", this.Version) + `,`,
- `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *MigrationCondition) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&MigrationCondition{`,
- `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
- `Status:` + fmt.Sprintf("%v", this.Status) + `,`,
- `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
- `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
- `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *StorageVersionMigration) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&StorageVersionMigration{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionMigrationSpec", "StorageVersionMigrationSpec", 1), `&`, ``, 1) + `,`,
- `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionMigrationStatus", "StorageVersionMigrationStatus", 1), `&`, ``, 1) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *StorageVersionMigrationList) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForItems := "[]StorageVersionMigration{"
- for _, f := range this.Items {
- repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersionMigration", "StorageVersionMigration", 1), `&`, ``, 1) + ","
- }
- repeatedStringForItems += "}"
- s := strings.Join([]string{`&StorageVersionMigrationList{`,
- `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
- `Items:` + repeatedStringForItems + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *StorageVersionMigrationSpec) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&StorageVersionMigrationSpec{`,
- `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "GroupVersionResource", "GroupVersionResource", 1), `&`, ``, 1) + `,`,
- `ContinueToken:` + fmt.Sprintf("%v", this.ContinueToken) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *StorageVersionMigrationStatus) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForConditions := "[]MigrationCondition{"
- for _, f := range this.Conditions {
- repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "MigrationCondition", "MigrationCondition", 1), `&`, ``, 1) + ","
- }
- repeatedStringForConditions += "}"
- s := strings.Join([]string{`&StorageVersionMigrationStatus{`,
- `Conditions:` + repeatedStringForConditions + `,`,
- `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringGenerated(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
- }
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
-}
-func (m *GroupVersionResource) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Group = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Resource = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MigrationCondition) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MigrationCondition: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MigrationCondition: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Type = MigrationConditionType(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Reason = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Message = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StorageVersionMigration) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StorageVersionMigration: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StorageVersionMigration: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StorageVersionMigrationList) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StorageVersionMigrationList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StorageVersionMigrationList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Items = append(m.Items, StorageVersionMigration{})
- if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StorageVersionMigrationSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StorageVersionMigrationSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StorageVersionMigrationSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ContinueToken", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ContinueToken = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StorageVersionMigrationStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StorageVersionMigrationStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StorageVersionMigrationStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Conditions = append(m.Conditions, MigrationCondition{})
- if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ResourceVersion = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipGenerated(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- depth := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- case 1:
- iNdEx += 8
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthGenerated
- }
- iNdEx += length
- case 3:
- depth++
- case 4:
- if depth == 0 {
- return 0, ErrUnexpectedEndOfGroupGenerated
- }
- depth--
- case 5:
- iNdEx += 4
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- if iNdEx < 0 {
- return 0, ErrInvalidLengthGenerated
- }
- if depth == 0 {
- return iNdEx, nil
- }
- }
- return 0, io.ErrUnexpectedEOF
-}
-
-var (
- ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
- ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
-)
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/doc.go
similarity index 89%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/doc.go
index df8f3a65..a0809939 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/doc.go
@@ -18,6 +18,8 @@ limitations under the License.
// +k8s:protobuf-gen=package
// +k8s:openapi-gen=true
// +k8s:prerelease-lifecycle-gen=true
+// +k8s:openapi-model-package=io.k8s.api.storagemigration.v1beta1
+
// +groupName=storagemigration.k8s.io
-package v1alpha1
+package v1beta1
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.pb.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.pb.go
new file mode 100644
index 00000000..309d61c9
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.pb.go
@@ -0,0 +1,904 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/api/storagemigration/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+func (m *StorageVersionMigration) Reset() { *m = StorageVersionMigration{} }
+
+func (m *StorageVersionMigrationList) Reset() { *m = StorageVersionMigrationList{} }
+
+func (m *StorageVersionMigrationSpec) Reset() { *m = StorageVersionMigrationSpec{} }
+
+func (m *StorageVersionMigrationStatus) Reset() { *m = StorageVersionMigrationStatus{} }
+
+func (m *StorageVersionMigration) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StorageVersionMigration) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StorageVersionMigration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StorageVersionMigrationList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StorageVersionMigrationList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StorageVersionMigrationList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StorageVersionMigrationSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StorageVersionMigrationSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StorageVersionMigrationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *StorageVersionMigrationStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StorageVersionMigrationStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StorageVersionMigrationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.ResourceVersion)
+ copy(dAtA[i:], m.ResourceVersion)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Conditions) > 0 {
+ for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *StorageVersionMigration) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StorageVersionMigrationList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *StorageVersionMigrationSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.Resource.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *StorageVersionMigrationStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Conditions) > 0 {
+ for _, e := range m.Conditions {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.ResourceVersion)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *StorageVersionMigration) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StorageVersionMigration{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StorageVersionMigrationSpec", "StorageVersionMigrationSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StorageVersionMigrationStatus", "StorageVersionMigrationStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StorageVersionMigrationList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]StorageVersionMigration{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageVersionMigration", "StorageVersionMigration", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&StorageVersionMigrationList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StorageVersionMigrationSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StorageVersionMigrationSpec{`,
+ `Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupResource", "v1.GroupResource", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StorageVersionMigrationStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForConditions := "[]Condition{"
+ for _, f := range this.Conditions {
+ repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForConditions += "}"
+ s := strings.Join([]string{`&StorageVersionMigrationStatus{`,
+ `Conditions:` + repeatedStringForConditions + `,`,
+ `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *StorageVersionMigration) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StorageVersionMigration: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StorageVersionMigration: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StorageVersionMigrationList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StorageVersionMigrationList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StorageVersionMigrationList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, StorageVersionMigration{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StorageVersionMigrationSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StorageVersionMigrationSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StorageVersionMigrationSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StorageVersionMigrationStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StorageVersionMigrationStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StorageVersionMigrationStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Conditions = append(m.Conditions, v1.Condition{})
+ if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto b/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.proto
similarity index 64%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.proto
index 341e0bc5..d7d08d88 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.proto
@@ -19,48 +19,14 @@ limitations under the License.
syntax = "proto2";
-package k8s.io.api.storagemigration.v1alpha1;
+package k8s.io.api.storagemigration.v1beta1;
-import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
-option go_package = "k8s.io/api/storagemigration/v1alpha1";
-
-// The names of the group, the version, and the resource.
-message GroupVersionResource {
- // The name of the group.
- optional string group = 1;
-
- // The name of the version.
- optional string version = 2;
-
- // The name of the resource.
- optional string resource = 3;
-}
-
-// Describes the state of a migration at a certain point.
-message MigrationCondition {
- // Type of the condition.
- optional string type = 1;
-
- // Status of the condition, one of True, False, Unknown.
- optional string status = 2;
-
- // The last time this condition was updated.
- // +optional
- optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3;
-
- // The reason for the condition's last transition.
- // +optional
- optional string reason = 4;
-
- // A human readable message indicating details about the transition.
- // +optional
- optional string message = 5;
-}
+option go_package = "k8s.io/api/storagemigration/v1beta1";
// StorageVersionMigration represents a migration of stored data to the latest
// storage version.
@@ -87,10 +53,6 @@ message StorageVersionMigrationList {
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of StorageVersionMigration
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
repeated StorageVersionMigration items = 2;
}
@@ -99,14 +61,7 @@ message StorageVersionMigrationSpec {
// The resource that is being migrated. The migrator sends requests to
// the endpoint serving the resource.
// Immutable.
- optional GroupVersionResource resource = 1;
-
- // The token used in the list options to get the next chunk of objects
- // to migrate. When the .status.conditions indicates the migration is
- // "Running", users can use this token to check the progress of the
- // migration.
- // +optional
- optional string continueToken = 2;
+ optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource resource = 1;
}
// Status of the storage version migration.
@@ -117,7 +72,7 @@ message StorageVersionMigrationStatus {
// +listType=map
// +listMapKey=type
// +optional
- repeated MigrationCondition conditions = 1;
+ repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
// ResourceVersion to compare with the GC cache for performing the migration.
// This is the current resource version of given group, version and resource when
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..39cafd8a
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,30 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*StorageVersionMigration) ProtoMessage() {}
+
+func (*StorageVersionMigrationList) ProtoMessage() {}
+
+func (*StorageVersionMigrationSpec) ProtoMessage() {}
+
+func (*StorageVersionMigrationStatus) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/register.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/register.go
similarity index 97%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/register.go
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/register.go
index c9706050..dcc8dee3 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/register.go
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/register.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
@@ -27,7 +27,7 @@ import (
const GroupName = "storagemigration.k8s.io"
// SchemeGroupVersion is group version used to register these objects
-var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/types.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/types.go
similarity index 57%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/types.go
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/types.go
index 0f343d1e..9655f737 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/types.go
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/types.go
@@ -14,17 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1beta1
import (
- corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.30
+// +k8s:prerelease-lifecycle-gen:introduced=1.35
// StorageVersionMigration represents a migration of stored data to the latest
// storage version.
@@ -47,25 +46,7 @@ type StorageVersionMigrationSpec struct {
// The resource that is being migrated. The migrator sends requests to
// the endpoint serving the resource.
// Immutable.
- Resource GroupVersionResource `json:"resource" protobuf:"bytes,1,opt,name=resource"`
- // The token used in the list options to get the next chunk of objects
- // to migrate. When the .status.conditions indicates the migration is
- // "Running", users can use this token to check the progress of the
- // migration.
- // +optional
- ContinueToken string `json:"continueToken,omitempty" protobuf:"bytes,2,opt,name=continueToken"`
- // TODO: consider recording the storage version hash when the migration
- // is created. It can avoid races.
-}
-
-// The names of the group, the version, and the resource.
-type GroupVersionResource struct {
- // The name of the group.
- Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
- // The name of the version.
- Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"`
- // The name of the resource.
- Resource string `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"`
+ Resource metav1.GroupResource `json:"resource" protobuf:"bytes,1,opt,name=resource"`
}
type MigrationConditionType string
@@ -79,23 +60,6 @@ const (
MigrationFailed MigrationConditionType = "Failed"
)
-// Describes the state of a migration at a certain point.
-type MigrationCondition struct {
- // Type of the condition.
- Type MigrationConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=MigrationConditionType"`
- // Status of the condition, one of True, False, Unknown.
- Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"`
- // The last time this condition was updated.
- // +optional
- LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,3,opt,name=lastUpdateTime"`
- // The reason for the condition's last transition.
- // +optional
- Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
- // A human readable message indicating details about the transition.
- // +optional
- Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
-}
-
// Status of the storage version migration.
type StorageVersionMigrationStatus struct {
// The latest available observations of the migration's current state.
@@ -104,7 +68,7 @@ type StorageVersionMigrationStatus struct {
// +listType=map
// +listMapKey=type
// +optional
- Conditions []MigrationCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
// ResourceVersion to compare with the GC cache for performing the migration.
// This is the current resource version of given group, version and resource when
// kube-controller-manager first observes this StorageVersionMigration resource.
@@ -112,7 +76,7 @@ type StorageVersionMigrationStatus struct {
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +k8s:prerelease-lifecycle-gen:introduced=1.30
+// +k8s:prerelease-lifecycle-gen:introduced=1.35
// StorageVersionMigrationList is a collection of storage version migrations.
type StorageVersionMigrationList struct {
@@ -123,9 +87,5 @@ type StorageVersionMigrationList struct {
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of StorageVersionMigration
- // +patchMergeKey=type
- // +patchStrategy=merge
- // +listType=map
- // +listMapKey=type
- Items []StorageVersionMigration `json:"items" listType:"map" listMapKey:"type" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=items"`
+ Items []StorageVersionMigration `json:"items" protobuf:"bytes,2,rep,name=items"`
}
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/types_swagger_doc_generated.go
similarity index 68%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/types_swagger_doc_generated.go
index 257d72a2..90e4ccc3 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/types_swagger_doc_generated.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package v1alpha1
+package v1beta1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
@@ -27,30 +27,6 @@ package v1alpha1
// Those methods can be generated by using hack/update-codegen.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
-var map_GroupVersionResource = map[string]string{
- "": "The names of the group, the version, and the resource.",
- "group": "The name of the group.",
- "version": "The name of the version.",
- "resource": "The name of the resource.",
-}
-
-func (GroupVersionResource) SwaggerDoc() map[string]string {
- return map_GroupVersionResource
-}
-
-var map_MigrationCondition = map[string]string{
- "": "Describes the state of a migration at a certain point.",
- "type": "Type of the condition.",
- "status": "Status of the condition, one of True, False, Unknown.",
- "lastUpdateTime": "The last time this condition was updated.",
- "reason": "The reason for the condition's last transition.",
- "message": "A human readable message indicating details about the transition.",
-}
-
-func (MigrationCondition) SwaggerDoc() map[string]string {
- return map_MigrationCondition
-}
-
var map_StorageVersionMigration = map[string]string{
"": "StorageVersionMigration represents a migration of stored data to the latest storage version.",
"metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
@@ -73,9 +49,8 @@ func (StorageVersionMigrationList) SwaggerDoc() map[string]string {
}
var map_StorageVersionMigrationSpec = map[string]string{
- "": "Spec of the storage version migration.",
- "resource": "The resource that is being migrated. The migrator sends requests to the endpoint serving the resource. Immutable.",
- "continueToken": "The token used in the list options to get the next chunk of objects to migrate. When the .status.conditions indicates the migration is \"Running\", users can use this token to check the progress of the migration.",
+ "": "Spec of the storage version migration.",
+ "resource": "The resource that is being migrated. The migrator sends requests to the endpoint serving the resource. Immutable.",
}
func (StorageVersionMigrationSpec) SwaggerDoc() map[string]string {
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.deepcopy.go
similarity index 78%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.deepcopy.go
index 9d35011d..2da553da 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.deepcopy.go
@@ -19,45 +19,13 @@ limitations under the License.
// Code generated by deepcopy-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource.
-func (in *GroupVersionResource) DeepCopy() *GroupVersionResource {
- if in == nil {
- return nil
- }
- out := new(GroupVersionResource)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *MigrationCondition) DeepCopyInto(out *MigrationCondition) {
- *out = *in
- in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationCondition.
-func (in *MigrationCondition) DeepCopy() *MigrationCondition {
- if in == nil {
- return nil
- }
- out := new(MigrationCondition)
- in.DeepCopyInto(out)
- return out
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StorageVersionMigration) DeepCopyInto(out *StorageVersionMigration) {
*out = *in
@@ -141,7 +109,7 @@ func (in *StorageVersionMigrationStatus) DeepCopyInto(out *StorageVersionMigrati
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
- *out = make([]MigrationCondition, len(*in))
+ *out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..5a6701c3
--- /dev/null
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,42 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionMigration) OpenAPIModelName() string {
+ return "io.k8s.api.storagemigration.v1beta1.StorageVersionMigration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionMigrationList) OpenAPIModelName() string {
+ return "io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionMigrationSpec) OpenAPIModelName() string {
+ return "io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StorageVersionMigrationStatus) OpenAPIModelName() string {
+ return "io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationStatus"
+}
diff --git a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go b/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.prerelease-lifecycle.go
similarity index 96%
rename from operator/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go
rename to operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.prerelease-lifecycle.go
index acdb5743..f52614f7 100644
--- a/operator/vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go
+++ b/operator/vendor/k8s.io/api/storagemigration/v1beta1/zz_generated.prerelease-lifecycle.go
@@ -19,40 +19,40 @@ limitations under the License.
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *StorageVersionMigration) APILifecycleIntroduced() (major, minor int) {
- return 1, 30
+ return 1, 35
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *StorageVersionMigration) APILifecycleDeprecated() (major, minor int) {
- return 1, 33
+ return 1, 38
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *StorageVersionMigration) APILifecycleRemoved() (major, minor int) {
- return 1, 36
+ return 1, 41
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
func (in *StorageVersionMigrationList) APILifecycleIntroduced() (major, minor int) {
- return 1, 30
+ return 1, 35
}
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *StorageVersionMigrationList) APILifecycleDeprecated() (major, minor int) {
- return 1, 33
+ return 1, 38
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *StorageVersionMigrationList) APILifecycleRemoved() (major, minor int) {
- return 1, 36
+ return 1, 41
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/operator/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index 2c101ab3..7b57a9eb 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -258,7 +258,8 @@ func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError
}
// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
-// DEPRECATED: Please use NewResourceExpired instead.
+//
+// Deprecated: Please use NewResourceExpired instead.
func NewGone(message string) *StatusError {
return &StatusError{metav1.Status{
Status: metav1.StatusFailure,
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
index c3a27216..9e1a5c0e 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
@@ -19,95 +19,6 @@ limitations under the License.
package resource
-import (
- fmt "fmt"
+func (m *Quantity) Reset() { *m = Quantity{} }
- math "math"
-
- proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *Quantity) Reset() { *m = Quantity{} }
-func (*Quantity) ProtoMessage() {}
-func (*Quantity) Descriptor() ([]byte, []int) {
- return fileDescriptor_7288c78ff45111e9, []int{0}
-}
-func (m *Quantity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantity.Unmarshal(m, b)
-}
-func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantity.Marshal(b, m, deterministic)
-}
-func (m *Quantity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantity.Merge(m, src)
-}
-func (m *Quantity) XXX_Size() int {
- return xxx_messageInfo_Quantity.Size(m)
-}
-func (m *Quantity) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Quantity proto.InternalMessageInfo
-
-func (m *QuantityValue) Reset() { *m = QuantityValue{} }
-func (*QuantityValue) ProtoMessage() {}
-func (*QuantityValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_7288c78ff45111e9, []int{1}
-}
-func (m *QuantityValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_QuantityValue.Unmarshal(m, b)
-}
-func (m *QuantityValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_QuantityValue.Marshal(b, m, deterministic)
-}
-func (m *QuantityValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QuantityValue.Merge(m, src)
-}
-func (m *QuantityValue) XXX_Size() int {
- return xxx_messageInfo_QuantityValue.Size(m)
-}
-func (m *QuantityValue) XXX_DiscardUnknown() {
- xxx_messageInfo_QuantityValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QuantityValue proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity")
- proto.RegisterType((*QuantityValue)(nil), "k8s.io.apimachinery.pkg.api.resource.QuantityValue")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_7288c78ff45111e9)
-}
-
-var fileDescriptor_7288c78ff45111e9 = []byte{
- // 234 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xc9, 0xb6, 0x28, 0xd6,
- 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4,
- 0x2f, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0x17, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0xea, 0xa7,
- 0xa7, 0xe6, 0xa5, 0x16, 0x25, 0x96, 0xa4, 0xa6, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xa9,
- 0x40, 0x74, 0xe9, 0x21, 0xeb, 0xd2, 0x2b, 0xc8, 0x4e, 0x07, 0x09, 0xe8, 0xc1, 0x74, 0x49, 0xe9,
- 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb,
- 0x83, 0x35, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x54, 0xc9, 0x82, 0x8b,
- 0x23, 0xb0, 0x34, 0x31, 0xaf, 0x24, 0xb3, 0xa4, 0x52, 0x48, 0x8c, 0x8b, 0xad, 0xb8, 0xa4, 0x28,
- 0x33, 0x2f, 0x5d, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xca, 0xb3, 0x12, 0x99, 0xb1, 0x40,
- 0x9e, 0xa1, 0x63, 0xa1, 0x3c, 0xc3, 0x84, 0x85, 0xf2, 0x0c, 0x0b, 0x16, 0xca, 0x33, 0x34, 0xdc,
- 0x51, 0x60, 0x50, 0xb2, 0xe5, 0xe2, 0x85, 0xe9, 0x0c, 0x4b, 0xcc, 0x29, 0x4d, 0x25, 0x4d, 0xbb,
- 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0,
- 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x6f, 0x3c, 0x92, 0x63, 0x7c,
- 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x15, 0x62, 0x42, 0x0a, 0x10, 0x00, 0x00,
- 0xff, 0xff, 0x50, 0x91, 0xd0, 0x9c, 0x50, 0x01, 0x00, 0x00,
-}
+func (m *QuantityValue) Reset() { *m = QuantityValue{} }
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
index ddd0db8f..875ad857 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto
@@ -93,6 +93,7 @@ option go_package = "k8s.io/apimachinery/pkg/api/resource";
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
message Quantity {
optional string string = 1;
}
@@ -105,6 +106,7 @@ message Quantity {
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
message QuantityValue {
optional string string = 1;
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.protomessage.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.protomessage.pb.go
new file mode 100644
index 00000000..712e155c
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.protomessage.pb.go
@@ -0,0 +1,26 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package resource
+
+func (*Quantity) ProtoMessage() {}
+
+func (*QuantityValue) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index d0aada9d..f3cd6006 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -99,6 +99,7 @@ import (
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
type Quantity struct {
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
@@ -858,6 +859,7 @@ func (q *Quantity) SetScaled(value int64, scale Scale) {
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:deepcopy-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.api.resource
type QuantityValue struct {
Quantity
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
index 3e0cdb10..364ec80d 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go
@@ -20,12 +20,8 @@ import (
"fmt"
"io"
"math/bits"
-
- "github.com/gogo/protobuf/proto"
)
-var _ proto.Sizer = &Quantity{}
-
func (m *Quantity) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.model_name.go b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.model_name.go
new file mode 100644
index 00000000..2575a2e8
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.model_name.go
@@ -0,0 +1,32 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package resource
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Quantity) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.api.resource.Quantity"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in QuantityValue) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.api.resource.QuantityValue"
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/decimal_int.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/decimal_int.go
new file mode 100644
index 00000000..5622ca15
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/decimal_int.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+const decimalIntegerErrMsg string = "must be a valid decimal integer in canonical form"
+
+// IsDecimalInteger validates that a string represents a decimal integer in strict canonical form.
+// This means the string must be formatted exactly as a human would naturally write an integer,
+// without any programming language conventions like leading zeros, plus signs, or alternate bases.
+//
+// valid values:"0" or Non-zero integers (i.e., "123", "-456") where the first digit is 1-9,
+// followed by any digits 0-9.
+//
+// This validator is stricter than strconv.ParseInt, which accepts leading zeros values (i.e, "0700")
+// and interprets them as decimal 700, potentially causing confusion with octal notation.
+func IsDecimalInteger(value string) []string {
+ n := len(value)
+ if n == 0 {
+ return []string{EmptyError()}
+ }
+
+ i := 0
+ if value[0] == '-' {
+ if n == 1 {
+ return []string{decimalIntegerErrMsg}
+ }
+ i = 1
+ }
+
+ if value[i] == '0' {
+ if n == 1 && i == 0 {
+ return nil
+ }
+ return []string{decimalIntegerErrMsg}
+ }
+
+ if value[i] < '1' || value[i] > '9' {
+ return []string{decimalIntegerErrMsg}
+ }
+
+ for i++; i < n; i++ {
+ if value[i] < '0' || value[i] > '9' {
+ return []string{decimalIntegerErrMsg}
+ }
+ }
+
+ return nil
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/dns.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/dns.go
new file mode 100644
index 00000000..bd207207
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/dns.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "regexp"
+)
+
+const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+
+const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
+
+// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
+const DNS1123LabelMaxLength int = 63
+
+var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
+
+// IsDNS1123Label tests for a string that conforms to the definition of a label in
+// DNS (RFC 1123).
+func IsDNS1123Label(value string) []string {
+ var errs []string
+ if len(value) > DNS1123LabelMaxLength {
+ errs = append(errs, MaxLenError(DNS1123LabelMaxLength))
+ }
+ if !dns1123LabelRegexp.MatchString(value) {
+ if dns1123SubdomainRegexp.MatchString(value) {
+ // It was a valid subdomain and not a valid label. Since we
+ // already checked length, it must be dots.
+ errs = append(errs, "must not contain dots")
+ } else {
+ errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc"))
+ }
+ }
+ return errs
+}
+
+const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
+const dns1123SubdomainFmtCaseless string = "(?i)" + dns1123SubdomainFmt
+const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+const dns1123SubdomainCaselessErrorMsg string = "an RFC 1123 subdomain must consist of alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
+
+// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
+const DNS1123SubdomainMaxLength int = 253
+
+var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+var dns1123SubdomainCaselessRegexp = regexp.MustCompile("^" + dns1123SubdomainFmtCaseless + "$")
+
+// IsDNS1123Subdomain tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123) lowercase.
+func IsDNS1123Subdomain(value string) []string {
+ return isDNS1123Subdomain(value, false)
+}
+
+// IsDNS1123SubdomainCaseless tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123).
+//
+// Deprecated: API validation should never be caseless. Caseless validation is a vector
+// for bugs and failed uniqueness assumptions. For example, names like "foo.com" and
+// "FOO.COM" are both accepted as valid, but they are typically not treated as equal by
+// consumers (e.g. CSI and DRA driver names). This fails the "least surprise" principle and
+// can cause inconsistent behaviors.
+//
+// Note: This allows uppercase names but is not caseless — uppercase and lowercase are
+// treated as different values. Use IsDNS1123Subdomain for strict, lowercase validation
+// instead.
+func IsDNS1123SubdomainCaseless(value string) []string {
+ return isDNS1123Subdomain(value, true)
+}
+
+func isDNS1123Subdomain(value string, caseless bool) []string {
+ var errs []string
+ if len(value) > DNS1123SubdomainMaxLength {
+ errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+ }
+ errorMsg := dns1123SubdomainErrorMsg
+ example := "example.com"
+ regexp := dns1123SubdomainRegexp
+ if caseless {
+ errorMsg = dns1123SubdomainCaselessErrorMsg
+ example = "Example.com"
+ regexp = dns1123SubdomainCaselessRegexp
+ }
+ if !regexp.MatchString(value) {
+ errs = append(errs, RegexError(errorMsg, dns1123SubdomainFmt, example))
+ }
+ return errs
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go
index 3370df48..a4a1b557 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/errors.go
@@ -29,6 +29,33 @@ func MinError[T constraints.Integer](min T) string {
return fmt.Sprintf("must be greater than or equal to %d", min)
}
+// MaxLenError returns a string explanation of a "string too long" validation
+// failure.
+func MaxLenError(length int) string {
+ return fmt.Sprintf("must be no more than %d bytes", length)
+}
+
+// EmptyError returns a string explanation of an "empty string" validation.
+func EmptyError() string {
+ return "must be non-empty"
+}
+
+// RegexError returns a string explanation of a regex validation failure.
+func RegexError(msg string, re string, examples ...string) string {
+ if len(examples) == 0 {
+ return msg + " (regex used for validation is '" + re + "')"
+ }
+ msg += " (e.g. "
+ for i := range examples {
+ if i > 0 {
+ msg += " or "
+ }
+ msg += "'" + examples[i] + "', "
+ }
+ msg += "regex used for validation is '" + re + "')"
+ return msg
+}
+
// NEQError returns a string explanation of a "must not be equal to" validation failure.
func NEQError[T any](disallowed T) string {
format := "%v"
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/identifier.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/identifier.go
new file mode 100644
index 00000000..3913ec99
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/identifier.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "regexp"
+)
+
+const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
+const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
+
+var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
+
+// IsCIdentifier tests for a string that conforms the definition of an identifier
+// in C. This checks the format, but not the length.
+func IsCIdentifier(value string) []string {
+ if !cIdentifierRegexp.MatchString(value) {
+ return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
+ }
+ return nil
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/kube.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/kube.go
new file mode 100644
index 00000000..44e82eef
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/content/kube.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package content
+
+import (
+ "regexp"
+ "strings"
+)
+
+const labelKeyCharFmt string = "[A-Za-z0-9]"
+const labelKeyExtCharFmt string = "[-A-Za-z0-9_.]"
+const labelKeyFmt string = "(" + labelKeyCharFmt + labelKeyExtCharFmt + "*)?" + labelKeyCharFmt
+const labelKeyErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+const labelKeyMaxLength int = 63
+
+var labelKeyRegexp = regexp.MustCompile("^" + labelKeyFmt + "$")
+
+// IsQualifiedName tests whether the value passed is what Kubernetes calls a
+// "qualified name", which is the same as a label key.
+//
+// Deprecated: use IsLabelKey instead.
+var IsQualifiedName = IsLabelKey
+
+// IsLabelKey tests whether the value passed is a valid label key. This format
+// is used to validate many fields in the Kubernetes API.
+// Label keys consist of an optional prefix and a name, separated by a '/'.
+// If the value is not valid, a list of error strings is returned. Otherwise, an
+// empty list (or nil) is returned.
+func IsLabelKey(value string) []string {
+ var errs []string
+ parts := strings.Split(value, "/")
+ var name string
+ switch len(parts) {
+ case 1:
+ name = parts[0]
+ case 2:
+ var prefix string
+ prefix, name = parts[0], parts[1]
+ if len(prefix) == 0 {
+ errs = append(errs, "prefix part "+EmptyError())
+ } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
+ errs = append(errs, prefixEach(msgs, "prefix part ")...)
+ }
+ default:
+ return append(errs, "a valid label key "+RegexError(labelKeyErrMsg, labelKeyFmt, "MyName", "my.name", "123-abc")+
+ " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
+ }
+
+ if len(name) == 0 {
+ errs = append(errs, "name part "+EmptyError())
+ } else if len(name) > labelKeyMaxLength {
+ errs = append(errs, "name part "+MaxLenError(labelKeyMaxLength))
+ }
+ if !labelKeyRegexp.MatchString(name) {
+ errs = append(errs, "name part "+RegexError(labelKeyErrMsg, labelKeyFmt, "MyName", "my.name", "123-abc"))
+ }
+ return errs
+}
+
+const labelValueFmt string = "(" + labelKeyFmt + ")?"
+const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+
+// LabelValueMaxLength is a label's max length
+const LabelValueMaxLength int = 63
+
+var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+
+// IsLabelValue tests whether the value passed is a valid label value. If
+// the value is not valid, a list of error strings is returned. Otherwise an
+// empty list (or nil) is returned.
+func IsLabelValue(value string) []string {
+ var errs []string
+ if len(value) > LabelValueMaxLength {
+ errs = append(errs, MaxLenError(LabelValueMaxLength))
+ }
+ if !labelValueRegexp.MatchString(value) {
+ errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
+ }
+ return errs
+}
+
+func prefixEach(msgs []string, prefix string) []string {
+ for i := range msgs {
+ msgs[i] = prefix + msgs[i]
+ }
+ return msgs
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/each.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/each.go
index 3d5bd3c1..c815d6d9 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/each.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/each.go
@@ -169,3 +169,18 @@ func SemanticDeepEqual[T any](a, b T) bool {
func DirectEqual[T comparable](a, b T) bool {
return a == b
}
+
+// DirectEqualPtr is a MatchFunc that dereferences two pointers and uses the ==
+// operator to compare the values. If both pointers are nil, it returns true.
+// If one pointer is nil and the other is not, it returns false.
+// It can be used by any other function that needs to compare two pointees
+// directly.
+func DirectEqualPtr[T comparable](a, b *T) bool {
+ if a == b {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ return *a == *b
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/enum.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/enum.go
index fc2167a4..6e5bcf37 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/enum.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/enum.go
@@ -25,16 +25,50 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
-// Enum verifies that the specified value is one of the valid symbols.
-// This is for string enums only.
-func Enum[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T, symbols sets.Set[T]) field.ErrorList {
+// Enum verifies that a given value is a member of a set of enum values.
+// Exclude Rules that apply when options are enabled or disabled are also considered.
+// If ANY exclude rule matches for a value, that value is excluded from the enum when validating.
+func Enum[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T, validValues sets.Set[T], exclusions []EnumExclusion[T]) field.ErrorList {
if value == nil {
return nil
}
- if !symbols.Has(*value) {
- symbolList := symbols.UnsortedList()
- slices.Sort(symbolList)
- return field.ErrorList{field.NotSupported[T](fldPath, *value, symbolList)}
+ if !validValues.Has(*value) || isExcluded(op, exclusions, *value) {
+ return field.ErrorList{field.NotSupported[T](fldPath, *value, supportedValues(op, validValues, exclusions))}
}
return nil
}
+
+// supportedValues returns a sorted list of supported values.
+// Excluded enum values are not included in the list.
+func supportedValues[T ~string](op operation.Operation, values sets.Set[T], exclusions []EnumExclusion[T]) []T {
+ res := make([]T, 0, len(values))
+ for key := range values {
+ if isExcluded(op, exclusions, key) {
+ continue
+ }
+ res = append(res, key)
+ }
+ slices.Sort(res)
+ return res
+}
+
+// EnumExclusion represents a single enum exclusion rule.
+type EnumExclusion[T ~string] struct {
+ // Value specifies the enum value to be conditionally excluded.
+ Value T
+ // ExcludeWhen determines the condition for exclusion.
+ // If true, the value is excluded if the option is present.
+ // If false, the value is excluded if the option is NOT present.
+ ExcludeWhen bool
+ // Option is the name of the feature option that controls the exclusion.
+ Option string
+}
+
+func isExcluded[T ~string](op operation.Operation, exclusions []EnumExclusion[T], value T) bool {
+ for _, rule := range exclusions {
+ if rule.Value == value && rule.ExcludeWhen == op.HasOption(rule.Option) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/immutable.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/immutable.go
index 5a9d3da2..01a879c9 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/immutable.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/immutable.go
@@ -19,46 +19,22 @@ package validate
import (
"context"
- "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/operation"
"k8s.io/apimachinery/pkg/util/validation/field"
)
-// ImmutableByCompare verifies that the specified value has not changed in the
-// course of an update operation. It does nothing if the old value is not
-// provided. If the caller needs to compare types that are not trivially
-// comparable, they should use ImmutableByReflect instead.
+// Immutable verifies that the specified value has not changed in the course of
+// an update operation. It does nothing if the old value is not provided.
//
-// Caution: structs with pointer fields satisfy comparable, but this function
-// will only compare pointer values. It does not compare the pointed-to
-// values.
-func ImmutableByCompare[T comparable](_ context.Context, op operation.Operation, fldPath *field.Path, value, oldValue *T) field.ErrorList {
+// This function unconditionally returns a validation error as it
+// relies on the default ratcheting mechanism to only be called when a
+// change to the field has already been detected. This avoids a redundant
+// equivalence check across ratcheting and this function.
+func Immutable[T any](_ context.Context, op operation.Operation, fldPath *field.Path, _, _ T) field.ErrorList {
if op.Type != operation.Update {
return nil
}
- if value == nil && oldValue == nil {
- return nil
- }
- if value == nil || oldValue == nil || *value != *oldValue {
- return field.ErrorList{
- field.Forbidden(fldPath, "field is immutable"),
- }
- }
- return nil
-}
-
-// ImmutableByReflect verifies that the specified value has not changed in
-// the course of an update operation. It does nothing if the old value is not
-// provided. Unlike ImmutableByCompare, this function can be used with types that are
-// not directly comparable, at the cost of performance.
-func ImmutableByReflect[T any](_ context.Context, op operation.Operation, fldPath *field.Path, value, oldValue T) field.ErrorList {
- if op.Type != operation.Update {
- return nil
- }
- if !equality.Semantic.DeepEqual(value, oldValue) {
- return field.ErrorList{
- field.Forbidden(fldPath, "field is immutable"),
- }
+ return field.ErrorList{
+ field.Invalid(fldPath, nil, "field is immutable").WithOrigin("immutable"),
}
- return nil
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/item.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/item.go
index 9cc61dd3..aba417fa 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/item.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/item.go
@@ -26,16 +26,21 @@ import (
// MatchItemFn takes a pointer to an item and returns true if it matches the criteria.
type MatchItemFn[T any] func(*T) bool
-// SliceItem finds the first item in newList that satisfies the 'matches' predicate,
-// and if found, also looks for a matching item in oldList. It then invokes
-// 'itemValidator' on these items.
-// The fldPath passed to itemValidator is indexed to the matched item's position in newList.
-// This function processes only the *first* matching item found in newList.
-// It assumes that the 'matches' predicate targets a unique identifier (primary key) and
-// will match at most one element per list.
-// If this assumption is violated, changes in list order can lead this function
-// to have inconsistent behavior.
-// This function does not validate items that were removed (present in oldList but not in newList).
+// SliceItem finds the first item in newList that satisfies the match function,
+// and if found, also looks for a matching item in oldList. If the value of the
+// item is the same as the previous value, as per the equiv function, then no
+// validation is performed. Otherwise, it invokes 'itemValidator' on these items.
+//
+// This function processes only the *first* matching item found in newList. It
+// assumes that the match functions targets a unique identifier (primary key)
+// and will match at most one element per list. If this assumption is violated,
+// changes in list order can lead this function to have inconsistent behavior.
+//
+// The fldPath passed to itemValidator is indexed to the matched item's
+// position in newList.
+//
+// This function does not validate items that were removed (present in oldList
+// but not in newList).
func SliceItem[TList ~[]TItem, TItem any](
ctx context.Context, op operation.Operation, fldPath *field.Path,
newList, oldList TList,
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/limits.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/limits.go
index 5f5fe83a..b6db5e08 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/limits.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/limits.go
@@ -25,6 +25,26 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
)
+// MaxLength verifies that the specified value is not longer than max
+// characters.
+func MaxLength[T ~string](_ context.Context, _ operation.Operation, fldPath *field.Path, value, _ *T, max int) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ if len(*value) > max {
+ return field.ErrorList{field.TooLong(fldPath, *value, max).WithOrigin("maxLength")}
+ }
+ return nil
+}
+
+// MaxItems verifies that the specified slice is not longer than max items.
+func MaxItems[T any](_ context.Context, _ operation.Operation, fldPath *field.Path, value, _ []T, max int) field.ErrorList {
+ if len(value) > max {
+ return field.ErrorList{field.TooMany(fldPath, len(value), max).WithOrigin("maxItems")}
+ }
+ return nil
+}
+
// Minimum verifies that the specified value is greater than or equal to min.
func Minimum[T constraints.Integer](_ context.Context, _ operation.Operation, fldPath *field.Path, value, _ *T, min T) field.ErrorList {
if value == nil {
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/options.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/options.go
new file mode 100644
index 00000000..44236550
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/options.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validate
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/operation"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// IfOption conditionally evaluates a validation function. If the option and enabled are both true the validator
+// is called. If the option and enabled are both false the validator is called. Otherwise, the validator is not called.
+func IfOption[T any](ctx context.Context, op operation.Operation, fldPath *field.Path, value, oldValue *T,
+ optionName string, enabled bool, validator func(context.Context, operation.Operation, *field.Path, *T, *T) field.ErrorList,
+) field.ErrorList {
+ if op.HasOption(optionName) == enabled {
+ return validator(ctx, op, fldPath, value, oldValue)
+ }
+ return nil
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/strfmt.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/strfmt.go
new file mode 100644
index 00000000..9a214730
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/strfmt.go
@@ -0,0 +1,290 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validate
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/api/operation"
+ "k8s.io/apimachinery/pkg/api/validate/content"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+const (
+ uuidErrorMessage = "must be a lowercase UUID in 8-4-4-4-12 format"
+ defaultResourceRequestsPrefix = "requests."
+ // Default namespace prefix.
+ resourceDefaultNamespacePrefix = "kubernetes.io/"
+ resourceDeviceMaxLength = 32
+)
+
+// ShortName verifies that the specified value is a valid "short name"
+// (sometimes known as a "DNS label").
+// - must not be empty
+// - must be less than 64 characters long
+// - must start and end with lower-case alphanumeric characters
+// - must contain only lower-case alphanumeric characters or dashes
+//
+// All errors returned by this function will be "invalid" type errors. If the
+// caller wants better errors, it must take responsibility for checking things
+// like required/optional and max-length.
+func ShortName[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ for _, msg := range content.IsDNS1123Label((string)(*value)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, *value, msg).WithOrigin("format=k8s-short-name"))
+ }
+ return allErrs
+}
+
+// LongName verifies that the specified value is a valid "long name"
+// (sometimes known as a "DNS subdomain").
+// - must not be empty
+// - must be less than 254 characters long
+// - each element must start and end with lower-case alphanumeric characters
+// - each element must contain only lower-case alphanumeric characters or dashes
+//
+// All errors returned by this function will be "invalid" type errors. If the
+// caller wants better errors, it must take responsibility for checking things
+// like required/optional and max-length.
+func LongName[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ for _, msg := range content.IsDNS1123Subdomain((string)(*value)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, *value, msg).WithOrigin("format=k8s-long-name"))
+ }
+ return allErrs
+}
+
+// LabelKey verifies that the specified value is a valid label key.
+// A label key is composed of an optional prefix and a name, separated by a '/'.
+// The name part is required and must:
+// - be 63 characters or less
+// - begin and end with an alphanumeric character ([a-z0-9A-Z])
+// - contain only alphanumeric characters, dashes (-), underscores (_), or dots (.)
+//
+// The prefix is optional and must:
+// - be a DNS subdomain
+// - be no more than 253 characters
+func LabelKey[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ for _, msg := range content.IsLabelKey((string)(*value)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, *value, msg).WithOrigin("format=k8s-label-key"))
+ }
+ return allErrs
+}
+
+// LongNameCaseless verifies that the specified value is a valid "long name"
+// (sometimes known as a "DNS subdomain"), but is case-insensitive.
+// - must not be empty
+// - must be less than 254 characters long
+// - each element must start and end with alphanumeric characters
+// - each element must contain only alphanumeric characters or dashes
+//
+// Deprecated: Case-insensitive names are not recommended as they can lead to ambiguity
+// (e.g., 'Foo', 'FOO', and 'foo' would be allowed names for foo). Use LongName for strict, lowercase validation.
+func LongNameCaseless[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ for _, msg := range content.IsDNS1123SubdomainCaseless((string)(*value)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, *value, msg).WithOrigin("format=k8s-long-name-caseless"))
+ }
+ return allErrs
+}
+
+// LabelValue verifies that the specified value is a valid label value.
+// - can be empty
+// - must be no more than 63 characters
+// - must start and end with alphanumeric characters
+// - must contain only alphanumeric characters, dashes, underscores, or dots
+func LabelValue[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ for _, msg := range content.IsLabelValue((string)(*value)) {
+ allErrs = append(allErrs, field.Invalid(fldPath, *value, msg).WithOrigin("format=k8s-label-value"))
+ }
+ return allErrs
+}
+
+// UUID verifies that the specified value is a valid UUID (RFC 4122).
+// - must be 36 characters long
+// - must be in the normalized form `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
+// - must use only lowercase hexadecimal characters
+func UUID[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ val := (string)(*value)
+ if len(val) != 36 {
+ return field.ErrorList{field.Invalid(fldPath, val, uuidErrorMessage).WithOrigin("format=k8s-uuid")}
+ }
+ for idx := 0; idx < len(val); idx++ {
+ character := val[idx]
+ switch idx {
+ case 8, 13, 18, 23:
+ if character != '-' {
+ return field.ErrorList{field.Invalid(fldPath, val, uuidErrorMessage).WithOrigin("format=k8s-uuid")}
+ }
+ default:
+ // should be lower case hexadecimal.
+ if (character < '0' || character > '9') && (character < 'a' || character > 'f') {
+ return field.ErrorList{field.Invalid(fldPath, val, uuidErrorMessage).WithOrigin("format=k8s-uuid")}
+ }
+ }
+ }
+ return nil
+}
+
+// ResourcePoolName verifies that the specified value is one or more valid "long name"
+// parts separated by a '/' and no longer than 253 characters.
+func ResourcePoolName[T ~string](ctx context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ val := (string)(*value)
+ var allErrs field.ErrorList
+ if len(val) > 253 {
+ allErrs = append(allErrs, field.TooLong(fldPath, val, 253))
+ }
+ parts := strings.Split(val, "/")
+ for i, part := range parts {
+ if len(part) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, val, fmt.Sprintf("segment %d: must not be empty", i)))
+ continue
+ }
+ // Note that we are overwriting the origin from the underlying LongName validation.
+ allErrs = append(allErrs, LongName(ctx, op, fldPath, &part, nil).PrefixDetail(fmt.Sprintf("segment %d: ", i))...)
+ }
+ return allErrs.WithOrigin("format=k8s-resource-pool-name")
+}
+
+// ExtendedResourceName verifies that the specified value is a valid extended resource name.
+// An extended resource name is a domain-prefixed name that does not use the "kubernetes.io"
+// or "requests." prefixes. Must be a valid label key when appended to "requests.", as in quota.
+//
+// - must have slash domain and name.
+// - must not have the "kubernetes.io" domain
+// - must not have the "requests." prefix
+// - name must be 63 characters or less
+// - must be a valid label key when appended to "requests.", as in quota
+// -- must contain only alphanumeric characters, dashes, underscores, or dots
+// -- must end with an alphanumeric character
+func ExtendedResourceName[T ~string](_ context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ val := string(*value)
+ allErrs := field.ErrorList{}
+ if !strings.Contains(val, "/") {
+ allErrs = append(allErrs, field.Invalid(fldPath, val, "a name must be a domain-prefixed path, such as 'example.com/my-prop'"))
+ } else if strings.Contains(val, resourceDefaultNamespacePrefix) {
+ allErrs = append(allErrs, field.Invalid(fldPath, val, fmt.Sprintf("must not have %q domain", resourceDefaultNamespacePrefix)))
+ }
+ // Ensure extended resource is not type of quota.
+ if strings.HasPrefix(val, defaultResourceRequestsPrefix) {
+ allErrs = append(allErrs, field.Invalid(fldPath, val, fmt.Sprintf("must not have %q prefix", defaultResourceRequestsPrefix)))
+ }
+
+ // Ensure it satisfies the rules in IsLabelKey() after converted into quota resource name
+ nameForQuota := fmt.Sprintf("%s%s", defaultResourceRequestsPrefix, val)
+ for _, msg := range content.IsLabelKey(nameForQuota) {
+ allErrs = append(allErrs, field.Invalid(fldPath, val, msg))
+ }
+ return allErrs.WithOrigin("format=k8s-extended-resource-name")
+}
+
+// resourcesQualifiedName verifies that the specified value is a valid Kubernetes resources
+// qualified name.
+// - must not be empty
+// - must be composed of an optional prefix and a name, separated by a slash (e.g., "prefix/name")
+// - the prefix, if specified, must be a DNS subdomain
+// - the name part must be a C identifier
+// - the name part must be no more than 32 characters
+func resourcesQualifiedName[T ~string](ctx context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ s := string(*value)
+ parts := strings.Split(s, "/")
+ // TODO: This validation and the corresponding handwritten validation validateQualifiedName in
+ // pkg/apis/resource/validation/validation.go are not validating whether there are more than 1
+ // slash. This should be fixed in both places.
+ switch len(parts) {
+ case 1:
+ allErrs = append(allErrs, validateCIdentifier(parts[0], resourceDeviceMaxLength, fldPath)...)
+ case 2:
+ if len(parts[0]) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "prefix must not be empty"))
+ } else {
+ if len(parts[0]) > 63 {
+ allErrs = append(allErrs, field.TooLong(fldPath, parts[0], 63))
+ }
+ allErrs = append(allErrs, LongName(ctx, op, fldPath, &parts[0], nil).PrefixDetail("prefix: ")...)
+ }
+ if len(parts[1]) == 0 {
+ allErrs = append(allErrs, field.Invalid(fldPath, "", "name must not be empty"))
+ } else {
+ allErrs = append(allErrs, validateCIdentifier(parts[1], resourceDeviceMaxLength, fldPath)...)
+ }
+ }
+ return allErrs
+}
+
+// ResourceFullyQualifiedName verifies that the specified value is a valid Kubernetes
+// fully qualified name.
+// - must not be empty
+// - must be composed of a prefix and a name, separated by a slash (e.g., "prefix/name")
+// - the prefix must be a DNS subdomain
+// - the name part must be a C identifier
+// - the name part must be no more than 32 characters
+func ResourceFullyQualifiedName[T ~string](ctx context.Context, op operation.Operation, fldPath *field.Path, value, _ *T) field.ErrorList {
+ if value == nil {
+ return nil
+ }
+ var allErrs field.ErrorList
+ s := string(*value)
+ allErrs = append(allErrs, resourcesQualifiedName(ctx, op, fldPath, &s, nil)...)
+ if !strings.Contains(s, "/") {
+ allErrs = append(allErrs, field.Invalid(fldPath, s, "a fully qualified name must be a domain and a name separated by a slash"))
+ }
+ return allErrs.WithOrigin("format=k8s-resource-fully-qualified-name")
+}
+
+func validateCIdentifier(id string, length int, fldPath *field.Path) field.ErrorList {
+ var allErrs field.ErrorList
+ if len(id) > length {
+ allErrs = append(allErrs, field.TooLong(fldPath, id, length))
+ }
+ for _, msg := range content.IsCIdentifier(id) {
+ allErrs = append(allErrs, field.Invalid(fldPath, id, msg))
+ }
+ return allErrs
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/subfield.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/subfield.go
index 844a2890..896f3c3f 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/subfield.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/subfield.go
@@ -27,18 +27,25 @@ import (
// nilable value.
type GetFieldFunc[Tstruct any, Tfield any] func(*Tstruct) Tfield
-// Subfield validates a subfield of a struct against a validator function.
-func Subfield[Tstruct any, Tfield any](ctx context.Context, op operation.Operation, fldPath *field.Path, newStruct, oldStruct *Tstruct,
- fldName string, getField GetFieldFunc[Tstruct, Tfield], validator ValidateFunc[Tfield]) field.ErrorList {
+// Subfield validates a subfield of a struct against a validator function. If
+// the value of the subfield is the same as the previous value, as per the
+// equiv function, then no validation is performed.
+//
+// The fldPath passed to the validator includes the subfield name.
+func Subfield[Tstruct any, Tfield any](
+ ctx context.Context, op operation.Operation, fldPath *field.Path,
+ newStruct, oldStruct *Tstruct,
+ fldName string, getField GetFieldFunc[Tstruct, Tfield],
+ equiv MatchFunc[Tfield],
+ validator ValidateFunc[Tfield],
+) field.ErrorList {
var errs field.ErrorList
newVal := getField(newStruct)
var oldVal Tfield
if oldStruct != nil {
oldVal = getField(oldStruct)
}
- // TODO: passing an equiv function to Subfield for direct comparison instead of
- // SemanticDeepEqual if fields can be compared directly, to improve performance.
- if op.Type == operation.Update && SemanticDeepEqual(newVal, oldVal) {
+ if op.Type == operation.Update && oldStruct != nil && equiv(newVal, oldVal) {
return nil
}
errs = append(errs, validator(ctx, op, fldPath.Child(fldName), newVal, oldVal)...)
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/union.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/union.go
index af5e933e..03f45f86 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/union.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/union.go
@@ -47,11 +47,15 @@ type UnionValidationOptions struct {
//
// For example:
//
-// var UnionMembershipForABC := validate.NewUnionMembership([2]string{"a", "A"}, [2]string{"b", "B"}, [2]string{"c", "C"})
-// func ValidateABC(ctx context.Context, op operation.Operation, fldPath *field.Path, in *ABC) (errs fields.ErrorList) {
+// var UnionMembershipForABC := validate.NewUnionMembership(
+// validate.NewUnionMember("a"),
+// validate.NewUnionMember("b"),
+// validate.NewUnionMember("c"),
+// )
+// func ValidateABC(ctx context.Context, op operation.Operation, fldPath *field.Path, in *ABC) (errs field.ErrorList) {
// errs = append(errs, Union(ctx, op, fldPath, in, oldIn, UnionMembershipForABC,
// func(in *ABC) bool { return in.A != nil },
-// func(in *ABC) bool { return in.B != ""},
+// func(in *ABC) bool { return in.B != "" },
// func(in *ABC) bool { return in.C != 0 },
// )...)
// return errs
@@ -77,12 +81,16 @@ func Union[T any](_ context.Context, op operation.Operation, fldPath *field.Path
//
// For example:
//
-// var UnionMembershipForABC = validate.NewDiscriminatedUnionMembership("type", [2]string{"a", "A"}, [2]string{"b", "B"}, [2]string{"c", "C"})
+// var UnionMembershipForABC = validate.NewDiscriminatedUnionMembership("type",
+// validate.NewDiscriminatedUnionMember("a", "A"),
+// validate.NewDiscriminatedUnionMember("b", "B"),
+// validate.NewDiscriminatedUnionMember("c", "C"),
+// )
// func ValidateABC(ctx context.Context, op operation.Operation, fldPath *field.Path, in *ABC) (errs field.ErrorList) {
// errs = append(errs, DiscriminatedUnion(ctx, op, fldPath, in, oldIn, UnionMembershipForABC,
// func(in *ABC) string { return string(in.Type) },
// func(in *ABC) bool { return in.A != nil },
-// func(in *ABC) bool { return in.B != ""},
+// func(in *ABC) bool { return in.B != "" },
// func(in *ABC) bool { return in.C != 0 },
// )...)
// return errs
@@ -129,35 +137,42 @@ func DiscriminatedUnion[T any, D ~string](_ context.Context, op operation.Operat
return errs
}
-type member struct {
- fieldName, discriminatorValue string
+// UnionMember represents a member of a union.
+type UnionMember struct {
+ fieldName string
+ discriminatorValue string
+}
+
+// NewUnionMember returns a new UnionMember for the given field name.
+func NewUnionMember(fieldName string) UnionMember {
+ return UnionMember{fieldName: fieldName}
+}
+
+// NewDiscriminatedUnionMember returns a new UnionMember for the given field
+// name and discriminator value.
+func NewDiscriminatedUnionMember(fieldName, discriminatorValue string) UnionMember {
+ return UnionMember{fieldName: fieldName, discriminatorValue: discriminatorValue}
}
// UnionMembership represents an ordered list of field union memberships.
type UnionMembership struct {
discriminatorName string
- members []member
+ members []UnionMember
}
// NewUnionMembership returns a new UnionMembership for the given list of members.
-//
-// Each member is a [2]string to provide a fieldName and discriminatorValue pair, where
-// [0] identifies the field name and [1] identifies the union member Name.
-//
-// Field names must be unique.
-func NewUnionMembership(member ...[2]string) *UnionMembership {
+// Member names must be unique.
+func NewUnionMembership(member ...UnionMember) *UnionMembership {
return NewDiscriminatedUnionMembership("", member...)
}
// NewDiscriminatedUnionMembership returns a new UnionMembership for the given discriminator field and list of members.
// members are provided in the same way as for NewUnionMembership.
-func NewDiscriminatedUnionMembership(discriminatorFieldName string, members ...[2]string) *UnionMembership {
- u := &UnionMembership{}
- u.discriminatorName = discriminatorFieldName
- for _, fieldName := range members {
- u.members = append(u.members, member{fieldName: fieldName[0], discriminatorValue: fieldName[1]})
+func NewDiscriminatedUnionMembership(discriminatorFieldName string, members ...UnionMember) *UnionMembership {
+ return &UnionMembership{
+ discriminatorName: discriminatorFieldName,
+ members: members,
}
- return u
}
// allFields returns a string listing all the field names of the member of a union for use in error reporting.
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/update.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/update.go
new file mode 100644
index 00000000..e67ee28d
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/update.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validate
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ "k8s.io/apimachinery/pkg/api/operation"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// UpdateConstraint represents a constraint on update operations
+type UpdateConstraint int
+
+const (
+ // NoSet prevents unset->set transitions
+ NoSet UpdateConstraint = iota
+ // NoUnset prevents set->unset transitions
+ NoUnset
+ // NoModify prevents value changes but allows set/unset transitions
+ NoModify
+)
+
+// UpdateValueByCompare verifies update constraints for comparable value types.
+func UpdateValueByCompare[T comparable](_ context.Context, op operation.Operation, fldPath *field.Path, value, oldValue *T, constraints ...UpdateConstraint) field.ErrorList {
+ if op.Type != operation.Update {
+ return nil
+ }
+
+ var errs field.ErrorList
+ var zero T
+
+ for _, constraint := range constraints {
+ switch constraint {
+ case NoSet:
+ if *oldValue == zero && *value != zero {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be set once created").WithOrigin("update"))
+ }
+ case NoUnset:
+ if *oldValue != zero && *value == zero {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be cleared once set").WithOrigin("update"))
+ }
+ case NoModify:
+ // Rely on validation ratcheting to detect that the value has changed.
+ // This check only verifies that the field was set in both the old and
+ // new objects, confirming it was a modification, not a set/unset.
+ if *oldValue != zero && *value != zero {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be modified once set").WithOrigin("update"))
+ }
+ }
+ }
+
+ return errs
+}
+
+// UpdatePointer verifies update constraints for pointer types.
+func UpdatePointer[T any](_ context.Context, op operation.Operation, fldPath *field.Path, value, oldValue *T, constraints ...UpdateConstraint) field.ErrorList {
+ if op.Type != operation.Update {
+ return nil
+ }
+
+ var errs field.ErrorList
+
+ for _, constraint := range constraints {
+ switch constraint {
+ case NoSet:
+ if oldValue == nil && value != nil {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be set once created").WithOrigin("update"))
+ }
+ case NoUnset:
+ if oldValue != nil && value == nil {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be cleared once set").WithOrigin("update"))
+ }
+ case NoModify:
+ // Rely on validation ratcheting to detect that the value has changed.
+ // This check only verifies that the field was non-nil in both the old
+ // and new objects, confirming it was a modification, not a set/unset.
+ if oldValue != nil && value != nil {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be modified once set").WithOrigin("update"))
+ }
+ }
+ }
+
+ return errs
+}
+
+// UpdateValueByReflect verifies update constraints for non-comparable value types using reflection.
+func UpdateValueByReflect[T any](_ context.Context, op operation.Operation, fldPath *field.Path, value, oldValue *T, constraints ...UpdateConstraint) field.ErrorList {
+ if op.Type != operation.Update {
+ return nil
+ }
+
+ var errs field.ErrorList
+ var zero T
+ valueIsZero := equality.Semantic.DeepEqual(*value, zero)
+ oldValueIsZero := equality.Semantic.DeepEqual(*oldValue, zero)
+
+ for _, constraint := range constraints {
+ switch constraint {
+ case NoSet:
+ if oldValueIsZero && !valueIsZero {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be set once created").WithOrigin("update"))
+ }
+ case NoUnset:
+ if !oldValueIsZero && valueIsZero {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be cleared once set").WithOrigin("update"))
+ }
+ case NoModify:
+ // Rely on validation ratcheting to detect that the value has changed.
+ // This check only verifies that the field was set in both the old and
+ // new objects, confirming it was a modification, not a set/unset.
+ if !oldValueIsZero && !valueIsZero {
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be modified once set").WithOrigin("update"))
+ }
+ }
+ }
+
+ return errs
+}
+
+// UpdateStruct verifies update constraints for non-pointer struct types.
+// Non-pointer structs are always considered "set" and never "unset".
+func UpdateStruct[T any](_ context.Context, op operation.Operation, fldPath *field.Path, value, oldValue *T, constraints ...UpdateConstraint) field.ErrorList {
+ if op.Type != operation.Update {
+ return nil
+ }
+
+ var errs field.ErrorList
+
+ for _, constraint := range constraints {
+ switch constraint {
+ case NoSet, NoUnset:
+ // These constraints don't apply to non-pointer structs
+ // as they can't be unset. This should be caught at generation time.
+ continue
+ case NoModify:
+ // Non-pointer structs are always considered "set". Therefore, any
+ // change detected by validation ratcheting is a modification.
+ // The deep equality check is redundant and has been removed.
+ errs = append(errs, field.Invalid(fldPath, nil, "field cannot be modified once set").WithOrigin("update"))
+ }
+ }
+
+ return errs
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/zeroorone.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/zeroorone.go
index 81cef54a..6a5df4ca 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validate/zeroorone.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validate/zeroorone.go
@@ -31,9 +31,14 @@ import (
//
// For example:
//
-// var ZeroOrOneOfMembershipForABC = validate.NewUnionMembership([2]string{"a", "A"}, [2]string{"b", "B"}, [2]string{"c", "C"})
+// var ZeroOrOneOfMembershipForABC = validate.NewUnionMembership(
+// validate.NewUnionMember("a"),
+// validate.NewUnionMember("b"),
+// validate.NewUnionMember("c"),
+// )
// func ValidateABC(ctx context.Context, op operation.Operation, fldPath *field.Path, in *ABC) (errs field.ErrorList) {
-// errs = append(errs, ZeroOrOneOfUnion(ctx, op, fldPath, in, oldIn, UnionMembershipForABC,
+// errs = append(errs, validate.ZeroOrOneOfUnion(ctx, op, fldPath, in, oldIn,
+// ZeroOrOneOfMembershipForABC,
// func(in *ABC) bool { return in.A != nil },
// func(in *ABC) bool { return in.B != ""},
// func(in *ABC) bool { return in.C != 0 },
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
index f9cada1f..35ea723a 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
@@ -33,6 +33,12 @@ const IsNegativeErrorMsg string = `must be greater than or equal to 0`
// value that were not valid. Otherwise this returns an empty list or nil.
type ValidateNameFunc func(name string, prefix bool) []string
+// ValidateNameFuncWithErrors validates that the provided name is valid for a
+// given resource type.
+//
+// This is similar to ValidateNameFunc, except that it produces an ErrorList.
+type ValidateNameFuncWithErrors func(fldPath *field.Path, name string) field.ErrorList
+
// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain.
func NameIsDNSSubdomain(name string, prefix bool) []string {
if prefix {
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/operator/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
index 7e891fdf..839fcbc2 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
@@ -46,7 +46,7 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie
for k := range annotations {
// The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking.
for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
- allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
+ allErrs = append(allErrs, field.Invalid(fldPath, k, msg)).WithOrigin("format=k8s-label-key")
}
}
if err := ValidateAnnotationsSize(annotations); err != nil {
@@ -138,7 +138,6 @@ func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) fie
// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
// been performed.
-// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
metadata, err := meta.Accessor(objMeta)
if err != nil {
@@ -149,9 +148,37 @@ func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, name
return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath)
}
+// objectMetaValidationOptions defines behavioral modifications for validating
+// an ObjectMeta.
+type objectMetaValidationOptions struct {
+ /* nothing here yet */
+}
+
+// ObjectMetaValidationOption specifies a behavioral modifier for
+// ValidateObjectMetaWithOpts and ValidateObjectMetaAccessorWithOpts.
+type ObjectMetaValidationOption func(opts *objectMetaValidationOptions)
+
+// ValidateObjectMetaWithOpts validates an object's metadata on creation. It
+// expects that name generation has already been performed, so name validation
+// is always executed.
+//
+// This is similar to ValidateObjectMeta, but uses options to buy future-safety
+// and uses different signature for the name validation function. It also does
+// not directly validate the generateName field, because name generation
+// should have already been performed and it is the result of that generastion
+// that must conform to the nameFn.
+func ValidateObjectMetaWithOpts(objMeta *metav1.ObjectMeta, isNamespaced bool, nameFn ValidateNameFuncWithErrors, fldPath *field.Path, options ...ObjectMetaValidationOption) field.ErrorList {
+ metadata, err := meta.Accessor(objMeta)
+ if err != nil {
+ var allErrs field.ErrorList
+ allErrs = append(allErrs, field.InternalError(fldPath, err))
+ return allErrs
+ }
+ return ValidateObjectMetaAccessorWithOpts(metadata, isNamespaced, nameFn, fldPath, options...)
+}
+
// ValidateObjectMetaAccessor validates an object's metadata on creation. It expects that name generation has already
// been performed.
-// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
@@ -170,7 +197,57 @@ func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, name
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg))
}
}
- if requiresNamespace {
+
+ return append(allErrs, validateObjectMetaAccessorWithOptsCommon(meta, requiresNamespace, fldPath, nil)...)
+}
+
+// ValidateObjectMetaAccessorWithOpts validates an object's metadata on
+// creation. It expects that name generation has already been performed, so
+// name validation is always executed.
+//
+// This is similar to ValidateObjectMetaAccessor, but uses options to buy
+// future-safety and uses different signature for the name validation function.
+// It also does not directly validate the generateName field, because name
+// generation should have already been performed and it is the result of that
+// generastion that must conform to the nameFn.
+func ValidateObjectMetaAccessorWithOpts(meta metav1.Object, isNamespaced bool, nameFn ValidateNameFuncWithErrors, fldPath *field.Path, options ...ObjectMetaValidationOption) field.ErrorList {
+ opts := objectMetaValidationOptions{}
+ for _, opt := range options {
+ opt(&opts)
+ }
+
+ var allErrs field.ErrorList
+
+ // generateName is not directly validated here. Types can have
+ // different rules for name generation, and the nameFn is for validating
+ // the post-generation data, not the input. In the past we assumed that
+ // name generation was always "append 5 random characters", but that's not
+ // NECESSARILY true. Also, the nameFn should always be considering the max
+ // length of the name, and it doesn't know enough about the name generation
+ // to do that. Also, given a bad generateName, the user will get errors
+ // for both the generateName and name fields. We will focus validation on
+ // the name field, which should give a better UX overall.
+ // TODO(thockin): should we do a max-length check here? e.g. 1K or 4K?
+
+ if len(meta.GetGenerateName()) != 0 && len(meta.GetName()) == 0 {
+ allErrs = append(allErrs,
+ field.InternalError(fldPath.Child("name"), fmt.Errorf("generateName was specified (%q), but no name was generated", meta.GetGenerateName())))
+ }
+ if len(meta.GetName()) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required"))
+ } else {
+ allErrs = append(allErrs, nameFn(fldPath.Child("name"), meta.GetName())...)
+ }
+
+ return append(allErrs, validateObjectMetaAccessorWithOptsCommon(meta, isNamespaced, fldPath, &opts)...)
+}
+
+// validateObjectMetaAccessorWithOptsCommon is a shared function for validating
+// the parts of an ObjectMeta with are handled the same in both paths..
+func validateObjectMetaAccessorWithOptsCommon(meta metav1.Object, isNamespaced bool, fldPath *field.Path, _ *objectMetaValidationOptions) field.ErrorList {
+ var allErrs field.ErrorList
+
+ if isNamespaced {
if len(meta.GetNamespace()) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
} else {
@@ -180,6 +257,7 @@ func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, name
}
} else {
if len(meta.GetNamespace()) != 0 {
+ // TODO(thockin): change to "may not be specified on this type" or something
allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type"))
}
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go
new file mode 100644
index 00000000..2734a8f3
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/apis/meta/internalversion"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// ValidateListOptions returns all validation errors found while validating the ListOptions.
+func ValidateListOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList {
+ if options.Watch {
+ return validateWatchOptions(options, isWatchListFeatureEnabled)
+ }
+ allErrs := field.ErrorList{}
+ if match := options.ResourceVersionMatch; len(match) > 0 {
+ if len(options.ResourceVersion) == 0 {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden unless resourceVersion is provided"))
+ }
+ if len(options.Continue) > 0 {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided"))
+ }
+ if match != metav1.ResourceVersionMatchExact && match != metav1.ResourceVersionMatchNotOlderThan {
+ allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchExact), string(metav1.ResourceVersionMatchNotOlderThan), ""}))
+ }
+ if match == metav1.ResourceVersionMatchExact && options.ResourceVersion == "0" {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch \"exact\" is forbidden for resourceVersion \"0\""))
+ }
+ }
+ if options.SendInitialEvents != nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for list"))
+ }
+ return allErrs
+}
+
+func validateWatchOptions(options *internalversion.ListOptions, isWatchListFeatureEnabled bool) field.ErrorList {
+ allErrs := field.ErrorList{}
+ match := options.ResourceVersionMatch
+ if options.SendInitialEvents != nil {
+ if match != metav1.ResourceVersionMatchNotOlderThan {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), fmt.Sprintf("sendInitialEvents requires setting resourceVersionMatch to %s", metav1.ResourceVersionMatchNotOlderThan)))
+ }
+ if !isWatchListFeatureEnabled {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("sendInitialEvents"), "sendInitialEvents is forbidden for watch unless the WatchList feature gate is enabled"))
+ }
+ }
+ if len(match) > 0 {
+ if options.SendInitialEvents == nil {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden for watch unless sendInitialEvents is provided"))
+ }
+ if match != metav1.ResourceVersionMatchNotOlderThan {
+ allErrs = append(allErrs, field.NotSupported(field.NewPath("resourceVersionMatch"), match, []string{string(metav1.ResourceVersionMatchNotOlderThan)}))
+ }
+ if len(options.Continue) > 0 {
+ allErrs = append(allErrs, field.Forbidden(field.NewPath("resourceVersionMatch"), "resourceVersionMatch is forbidden when continue is provided"))
+ }
+ }
+ return allErrs
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
index 617b9a5d..31c87361 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.apis.meta.v1
// +groupName=meta.k8s.io
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 9ee6c059..f6b1a6a4 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -23,12 +23,10 @@ import (
fmt "fmt"
io "io"
+ "sort"
- proto "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
runtime "k8s.io/apimachinery/pkg/runtime"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
@@ -37,1509 +35,95 @@ import (
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-var _ = time.Kitchen
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *APIGroup) Reset() { *m = APIGroup{} }
-func (*APIGroup) ProtoMessage() {}
-func (*APIGroup) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{0}
-}
-func (m *APIGroup) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroup) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroup.Merge(m, src)
-}
-func (m *APIGroup) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroup) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroup.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroup proto.InternalMessageInfo
-
-func (m *APIGroupList) Reset() { *m = APIGroupList{} }
-func (*APIGroupList) ProtoMessage() {}
-func (*APIGroupList) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{1}
-}
-func (m *APIGroupList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIGroupList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIGroupList.Merge(m, src)
-}
-func (m *APIGroupList) XXX_Size() int {
- return m.Size()
-}
-func (m *APIGroupList) XXX_DiscardUnknown() {
- xxx_messageInfo_APIGroupList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIGroupList proto.InternalMessageInfo
-
-func (m *APIResource) Reset() { *m = APIResource{} }
-func (*APIResource) ProtoMessage() {}
-func (*APIResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{2}
-}
-func (m *APIResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIResource.Merge(m, src)
-}
-func (m *APIResource) XXX_Size() int {
- return m.Size()
-}
-func (m *APIResource) XXX_DiscardUnknown() {
- xxx_messageInfo_APIResource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIResource proto.InternalMessageInfo
-
-func (m *APIResourceList) Reset() { *m = APIResourceList{} }
-func (*APIResourceList) ProtoMessage() {}
-func (*APIResourceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{3}
-}
-func (m *APIResourceList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIResourceList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIResourceList.Merge(m, src)
-}
-func (m *APIResourceList) XXX_Size() int {
- return m.Size()
-}
-func (m *APIResourceList) XXX_DiscardUnknown() {
- xxx_messageInfo_APIResourceList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIResourceList proto.InternalMessageInfo
-
-func (m *APIVersions) Reset() { *m = APIVersions{} }
-func (*APIVersions) ProtoMessage() {}
-func (*APIVersions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{4}
-}
-func (m *APIVersions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *APIVersions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_APIVersions.Merge(m, src)
-}
-func (m *APIVersions) XXX_Size() int {
- return m.Size()
-}
-func (m *APIVersions) XXX_DiscardUnknown() {
- xxx_messageInfo_APIVersions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_APIVersions proto.InternalMessageInfo
-
-func (m *ApplyOptions) Reset() { *m = ApplyOptions{} }
-func (*ApplyOptions) ProtoMessage() {}
-func (*ApplyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{5}
-}
-func (m *ApplyOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ApplyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ApplyOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplyOptions.Merge(m, src)
-}
-func (m *ApplyOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *ApplyOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplyOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplyOptions proto.InternalMessageInfo
-
-func (m *Condition) Reset() { *m = Condition{} }
-func (*Condition) ProtoMessage() {}
-func (*Condition) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{6}
-}
-func (m *Condition) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Condition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Condition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Condition.Merge(m, src)
-}
-func (m *Condition) XXX_Size() int {
- return m.Size()
-}
-func (m *Condition) XXX_DiscardUnknown() {
- xxx_messageInfo_Condition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Condition proto.InternalMessageInfo
-
-func (m *CreateOptions) Reset() { *m = CreateOptions{} }
-func (*CreateOptions) ProtoMessage() {}
-func (*CreateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{7}
-}
-func (m *CreateOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *CreateOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CreateOptions.Merge(m, src)
-}
-func (m *CreateOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *CreateOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_CreateOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CreateOptions proto.InternalMessageInfo
-
-func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
-func (*DeleteOptions) ProtoMessage() {}
-func (*DeleteOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{8}
-}
-func (m *DeleteOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *DeleteOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteOptions.Merge(m, src)
-}
-func (m *DeleteOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *DeleteOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo
-
-func (m *Duration) Reset() { *m = Duration{} }
-func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{9}
-}
-func (m *Duration) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Duration) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Duration.Merge(m, src)
-}
-func (m *Duration) XXX_Size() int {
- return m.Size()
-}
-func (m *Duration) XXX_DiscardUnknown() {
- xxx_messageInfo_Duration.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Duration proto.InternalMessageInfo
-
-func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} }
-func (*FieldSelectorRequirement) ProtoMessage() {}
-func (*FieldSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{10}
-}
-func (m *FieldSelectorRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FieldSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FieldSelectorRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldSelectorRequirement.Merge(m, src)
-}
-func (m *FieldSelectorRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *FieldSelectorRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldSelectorRequirement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldSelectorRequirement proto.InternalMessageInfo
-
-func (m *FieldsV1) Reset() { *m = FieldsV1{} }
-func (*FieldsV1) ProtoMessage() {}
-func (*FieldsV1) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{11}
-}
-func (m *FieldsV1) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *FieldsV1) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FieldsV1.Merge(m, src)
-}
-func (m *FieldsV1) XXX_Size() int {
- return m.Size()
-}
-func (m *FieldsV1) XXX_DiscardUnknown() {
- xxx_messageInfo_FieldsV1.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo
-
-func (m *GetOptions) Reset() { *m = GetOptions{} }
-func (*GetOptions) ProtoMessage() {}
-func (*GetOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{12}
-}
-func (m *GetOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GetOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetOptions.Merge(m, src)
-}
-func (m *GetOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *GetOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_GetOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetOptions proto.InternalMessageInfo
-
-func (m *GroupKind) Reset() { *m = GroupKind{} }
-func (*GroupKind) ProtoMessage() {}
-func (*GroupKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{13}
-}
-func (m *GroupKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupKind.Merge(m, src)
-}
-func (m *GroupKind) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupKind) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupKind.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupKind proto.InternalMessageInfo
-
-func (m *GroupResource) Reset() { *m = GroupResource{} }
-func (*GroupResource) ProtoMessage() {}
-func (*GroupResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{14}
-}
-func (m *GroupResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupResource.Merge(m, src)
-}
-func (m *GroupResource) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupResource) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupResource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupResource proto.InternalMessageInfo
-
-func (m *GroupVersion) Reset() { *m = GroupVersion{} }
-func (*GroupVersion) ProtoMessage() {}
-func (*GroupVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{15}
-}
-func (m *GroupVersion) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersion.Merge(m, src)
-}
-func (m *GroupVersion) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersion.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupVersion proto.InternalMessageInfo
-
-func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
-func (*GroupVersionForDiscovery) ProtoMessage() {}
-func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{16}
-}
-func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src)
-}
-func (m *GroupVersionForDiscovery) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo
-
-func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
-func (*GroupVersionKind) ProtoMessage() {}
-func (*GroupVersionKind) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{17}
-}
-func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionKind) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionKind.Merge(m, src)
-}
-func (m *GroupVersionKind) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionKind) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionKind.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo
-
-func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
-func (*GroupVersionResource) ProtoMessage() {}
-func (*GroupVersionResource) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{18}
-}
-func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *GroupVersionResource) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GroupVersionResource.Merge(m, src)
-}
-func (m *GroupVersionResource) XXX_Size() int {
- return m.Size()
-}
-func (m *GroupVersionResource) XXX_DiscardUnknown() {
- xxx_messageInfo_GroupVersionResource.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo
-
-func (m *LabelSelector) Reset() { *m = LabelSelector{} }
-func (*LabelSelector) ProtoMessage() {}
-func (*LabelSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{19}
-}
-func (m *LabelSelector) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LabelSelector) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelSelector.Merge(m, src)
-}
-func (m *LabelSelector) XXX_Size() int {
- return m.Size()
-}
-func (m *LabelSelector) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelSelector.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelSelector proto.InternalMessageInfo
-
-func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
-func (*LabelSelectorRequirement) ProtoMessage() {}
-func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{20}
-}
-func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelSelectorRequirement.Merge(m, src)
-}
-func (m *LabelSelectorRequirement) XXX_Size() int {
- return m.Size()
-}
-func (m *LabelSelectorRequirement) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m)
-}
+func (m *APIGroup) Reset() { *m = APIGroup{} }
-var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo
+func (m *APIGroupList) Reset() { *m = APIGroupList{} }
-func (m *List) Reset() { *m = List{} }
-func (*List) ProtoMessage() {}
-func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{21}
-}
-func (m *List) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *List) XXX_Merge(src proto.Message) {
- xxx_messageInfo_List.Merge(m, src)
-}
-func (m *List) XXX_Size() int {
- return m.Size()
-}
-func (m *List) XXX_DiscardUnknown() {
- xxx_messageInfo_List.DiscardUnknown(m)
-}
+func (m *APIResource) Reset() { *m = APIResource{} }
-var xxx_messageInfo_List proto.InternalMessageInfo
+func (m *APIResourceList) Reset() { *m = APIResourceList{} }
-func (m *ListMeta) Reset() { *m = ListMeta{} }
-func (*ListMeta) ProtoMessage() {}
-func (*ListMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{22}
-}
-func (m *ListMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ListMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListMeta.Merge(m, src)
-}
-func (m *ListMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *ListMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_ListMeta.DiscardUnknown(m)
-}
+func (m *APIVersions) Reset() { *m = APIVersions{} }
-var xxx_messageInfo_ListMeta proto.InternalMessageInfo
+func (m *ApplyOptions) Reset() { *m = ApplyOptions{} }
-func (m *ListOptions) Reset() { *m = ListOptions{} }
-func (*ListOptions) ProtoMessage() {}
-func (*ListOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{23}
-}
-func (m *ListOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ListOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListOptions.Merge(m, src)
-}
-func (m *ListOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *ListOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_ListOptions.DiscardUnknown(m)
-}
+func (m *Condition) Reset() { *m = Condition{} }
-var xxx_messageInfo_ListOptions proto.InternalMessageInfo
+func (m *CreateOptions) Reset() { *m = CreateOptions{} }
-func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
-func (*ManagedFieldsEntry) ProtoMessage() {}
-func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{24}
-}
-func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ManagedFieldsEntry.Merge(m, src)
-}
-func (m *ManagedFieldsEntry) XXX_Size() int {
- return m.Size()
-}
-func (m *ManagedFieldsEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m)
-}
+func (m *DeleteOptions) Reset() { *m = DeleteOptions{} }
-var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo
+func (m *Duration) Reset() { *m = Duration{} }
-func (m *MicroTime) Reset() { *m = MicroTime{} }
-func (*MicroTime) ProtoMessage() {}
-func (*MicroTime) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{25}
-}
-func (m *MicroTime) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MicroTime.Unmarshal(m, b)
-}
-func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic)
-}
-func (m *MicroTime) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MicroTime.Merge(m, src)
-}
-func (m *MicroTime) XXX_Size() int {
- return xxx_messageInfo_MicroTime.Size(m)
-}
-func (m *MicroTime) XXX_DiscardUnknown() {
- xxx_messageInfo_MicroTime.DiscardUnknown(m)
-}
+func (m *FieldSelectorRequirement) Reset() { *m = FieldSelectorRequirement{} }
-var xxx_messageInfo_MicroTime proto.InternalMessageInfo
+func (m *FieldsV1) Reset() { *m = FieldsV1{} }
-func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
-func (*ObjectMeta) ProtoMessage() {}
-func (*ObjectMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{26}
-}
-func (m *ObjectMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ObjectMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ObjectMeta.Merge(m, src)
-}
-func (m *ObjectMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *ObjectMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_ObjectMeta.DiscardUnknown(m)
-}
+func (m *GetOptions) Reset() { *m = GetOptions{} }
-var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo
+func (m *GroupKind) Reset() { *m = GroupKind{} }
-func (m *OwnerReference) Reset() { *m = OwnerReference{} }
-func (*OwnerReference) ProtoMessage() {}
-func (*OwnerReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{27}
-}
-func (m *OwnerReference) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *OwnerReference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_OwnerReference.Merge(m, src)
-}
-func (m *OwnerReference) XXX_Size() int {
- return m.Size()
-}
-func (m *OwnerReference) XXX_DiscardUnknown() {
- xxx_messageInfo_OwnerReference.DiscardUnknown(m)
-}
+func (m *GroupResource) Reset() { *m = GroupResource{} }
-var xxx_messageInfo_OwnerReference proto.InternalMessageInfo
+func (m *GroupVersion) Reset() { *m = GroupVersion{} }
-func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
-func (*PartialObjectMetadata) ProtoMessage() {}
-func (*PartialObjectMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{28}
-}
-func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PartialObjectMetadata.Merge(m, src)
-}
-func (m *PartialObjectMetadata) XXX_Size() int {
- return m.Size()
-}
-func (m *PartialObjectMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m)
-}
+func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} }
-var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo
+func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} }
-func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
-func (*PartialObjectMetadataList) ProtoMessage() {}
-func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{29}
-}
-func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
-}
-func (m *PartialObjectMetadataList) XXX_Size() int {
- return m.Size()
-}
-func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
- xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
-}
+func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} }
-var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
+func (m *LabelSelector) Reset() { *m = LabelSelector{} }
-func (m *Patch) Reset() { *m = Patch{} }
-func (*Patch) ProtoMessage() {}
-func (*Patch) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{30}
-}
-func (m *Patch) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Patch) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Patch.Merge(m, src)
-}
-func (m *Patch) XXX_Size() int {
- return m.Size()
-}
-func (m *Patch) XXX_DiscardUnknown() {
- xxx_messageInfo_Patch.DiscardUnknown(m)
-}
+func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} }
-var xxx_messageInfo_Patch proto.InternalMessageInfo
+func (m *List) Reset() { *m = List{} }
-func (m *PatchOptions) Reset() { *m = PatchOptions{} }
-func (*PatchOptions) ProtoMessage() {}
-func (*PatchOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{31}
-}
-func (m *PatchOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PatchOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PatchOptions.Merge(m, src)
-}
-func (m *PatchOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *PatchOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_PatchOptions.DiscardUnknown(m)
-}
+func (m *ListMeta) Reset() { *m = ListMeta{} }
-var xxx_messageInfo_PatchOptions proto.InternalMessageInfo
+func (m *ListOptions) Reset() { *m = ListOptions{} }
-func (m *Preconditions) Reset() { *m = Preconditions{} }
-func (*Preconditions) ProtoMessage() {}
-func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{32}
-}
-func (m *Preconditions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Preconditions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Preconditions.Merge(m, src)
-}
-func (m *Preconditions) XXX_Size() int {
- return m.Size()
-}
-func (m *Preconditions) XXX_DiscardUnknown() {
- xxx_messageInfo_Preconditions.DiscardUnknown(m)
-}
+func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} }
-var xxx_messageInfo_Preconditions proto.InternalMessageInfo
+func (m *MicroTime) Reset() { *m = MicroTime{} }
-func (m *RootPaths) Reset() { *m = RootPaths{} }
-func (*RootPaths) ProtoMessage() {}
-func (*RootPaths) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{33}
-}
-func (m *RootPaths) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RootPaths) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RootPaths.Merge(m, src)
-}
-func (m *RootPaths) XXX_Size() int {
- return m.Size()
-}
-func (m *RootPaths) XXX_DiscardUnknown() {
- xxx_messageInfo_RootPaths.DiscardUnknown(m)
-}
+func (m *ObjectMeta) Reset() { *m = ObjectMeta{} }
-var xxx_messageInfo_RootPaths proto.InternalMessageInfo
+func (m *OwnerReference) Reset() { *m = OwnerReference{} }
-func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
-func (*ServerAddressByClientCIDR) ProtoMessage() {}
-func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{34}
-}
-func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src)
-}
-func (m *ServerAddressByClientCIDR) XXX_Size() int {
- return m.Size()
-}
-func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m)
-}
+func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} }
-var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo
+func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
-func (m *Status) Reset() { *m = Status{} }
-func (*Status) ProtoMessage() {}
-func (*Status) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{35}
-}
-func (m *Status) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Status) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Status.Merge(m, src)
-}
-func (m *Status) XXX_Size() int {
- return m.Size()
-}
-func (m *Status) XXX_DiscardUnknown() {
- xxx_messageInfo_Status.DiscardUnknown(m)
-}
+func (m *Patch) Reset() { *m = Patch{} }
-var xxx_messageInfo_Status proto.InternalMessageInfo
+func (m *PatchOptions) Reset() { *m = PatchOptions{} }
-func (m *StatusCause) Reset() { *m = StatusCause{} }
-func (*StatusCause) ProtoMessage() {}
-func (*StatusCause) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{36}
-}
-func (m *StatusCause) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatusCause) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusCause.Merge(m, src)
-}
-func (m *StatusCause) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusCause) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusCause.DiscardUnknown(m)
-}
+func (m *Preconditions) Reset() { *m = Preconditions{} }
-var xxx_messageInfo_StatusCause proto.InternalMessageInfo
+func (m *RootPaths) Reset() { *m = RootPaths{} }
-func (m *StatusDetails) Reset() { *m = StatusDetails{} }
-func (*StatusDetails) ProtoMessage() {}
-func (*StatusDetails) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{37}
-}
-func (m *StatusDetails) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *StatusDetails) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusDetails.Merge(m, src)
-}
-func (m *StatusDetails) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusDetails) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusDetails.DiscardUnknown(m)
-}
+func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} }
-var xxx_messageInfo_StatusDetails proto.InternalMessageInfo
+func (m *Status) Reset() { *m = Status{} }
-func (m *TableOptions) Reset() { *m = TableOptions{} }
-func (*TableOptions) ProtoMessage() {}
-func (*TableOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{38}
-}
-func (m *TableOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TableOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TableOptions.Merge(m, src)
-}
-func (m *TableOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *TableOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_TableOptions.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TableOptions proto.InternalMessageInfo
-
-func (m *Time) Reset() { *m = Time{} }
-func (*Time) ProtoMessage() {}
-func (*Time) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{39}
-}
-func (m *Time) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Time.Unmarshal(m, b)
-}
-func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Time.Marshal(b, m, deterministic)
-}
-func (m *Time) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Time.Merge(m, src)
-}
-func (m *Time) XXX_Size() int {
- return xxx_messageInfo_Time.Size(m)
-}
-func (m *Time) XXX_DiscardUnknown() {
- xxx_messageInfo_Time.DiscardUnknown(m)
-}
+func (m *StatusCause) Reset() { *m = StatusCause{} }
-var xxx_messageInfo_Time proto.InternalMessageInfo
+func (m *StatusDetails) Reset() { *m = StatusDetails{} }
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{40}
-}
-func (m *Timestamp) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Timestamp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Timestamp.Merge(m, src)
-}
-func (m *Timestamp) XXX_Size() int {
- return m.Size()
-}
-func (m *Timestamp) XXX_DiscardUnknown() {
- xxx_messageInfo_Timestamp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Timestamp proto.InternalMessageInfo
-
-func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (*TypeMeta) ProtoMessage() {}
-func (*TypeMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{41}
-}
-func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeMeta.Merge(m, src)
-}
-func (m *TypeMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeMeta.DiscardUnknown(m)
-}
+func (m *TableOptions) Reset() { *m = TableOptions{} }
-var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
+func (m *Time) Reset() { *m = Time{} }
-func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
-func (*UpdateOptions) ProtoMessage() {}
-func (*UpdateOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{42}
-}
-func (m *UpdateOptions) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *UpdateOptions) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UpdateOptions.Merge(m, src)
-}
-func (m *UpdateOptions) XXX_Size() int {
- return m.Size()
-}
-func (m *UpdateOptions) XXX_DiscardUnknown() {
- xxx_messageInfo_UpdateOptions.DiscardUnknown(m)
-}
+func (m *Timestamp) Reset() { *m = Timestamp{} }
-var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo
+func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (m *Verbs) Reset() { *m = Verbs{} }
-func (*Verbs) ProtoMessage() {}
-func (*Verbs) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{43}
-}
-func (m *Verbs) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Verbs) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Verbs.Merge(m, src)
-}
-func (m *Verbs) XXX_Size() int {
- return m.Size()
-}
-func (m *Verbs) XXX_DiscardUnknown() {
- xxx_messageInfo_Verbs.DiscardUnknown(m)
-}
+func (m *UpdateOptions) Reset() { *m = UpdateOptions{} }
-var xxx_messageInfo_Verbs proto.InternalMessageInfo
+func (m *Verbs) Reset() { *m = Verbs{} }
-func (m *WatchEvent) Reset() { *m = WatchEvent{} }
-func (*WatchEvent) ProtoMessage() {}
-func (*WatchEvent) Descriptor() ([]byte, []int) {
- return fileDescriptor_a8431b6e0aeeb761, []int{44}
-}
-func (m *WatchEvent) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *WatchEvent) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchEvent.Merge(m, src)
-}
-func (m *WatchEvent) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchEvent) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchEvent.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchEvent proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup")
- proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList")
- proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource")
- proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList")
- proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions")
- proto.RegisterType((*ApplyOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ApplyOptions")
- proto.RegisterType((*Condition)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Condition")
- proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions")
- proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions")
- proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration")
- proto.RegisterType((*FieldSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement")
- proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1")
- proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions")
- proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind")
- proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource")
- proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion")
- proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery")
- proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind")
- proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource")
- proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry")
- proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement")
- proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List")
- proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta")
- proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions")
- proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry")
- proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime")
- proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry")
- proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry")
- proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference")
- proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata")
- proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList")
- proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch")
- proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions")
- proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions")
- proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths")
- proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR")
- proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status")
- proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause")
- proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails")
- proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions")
- proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time")
- proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp")
- proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta")
- proto.RegisterType((*UpdateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.UpdateOptions")
- proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs")
- proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_a8431b6e0aeeb761)
-}
-
-var fileDescriptor_a8431b6e0aeeb761 = []byte{
- // 2928 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47,
- 0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e,
- 0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c,
- 0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93,
- 0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c,
- 0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44,
- 0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb,
- 0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2,
- 0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d,
- 0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3,
- 0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa,
- 0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc,
- 0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda,
- 0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3,
- 0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb,
- 0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf,
- 0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c,
- 0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93,
- 0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4,
- 0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c,
- 0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f,
- 0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc,
- 0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f,
- 0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61,
- 0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62,
- 0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d,
- 0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3,
- 0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6,
- 0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55,
- 0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49,
- 0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33,
- 0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6,
- 0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05,
- 0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea,
- 0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a,
- 0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f,
- 0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0,
- 0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75,
- 0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17,
- 0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c,
- 0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69,
- 0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5,
- 0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb,
- 0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef,
- 0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42,
- 0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b,
- 0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4,
- 0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc,
- 0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c,
- 0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5,
- 0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f,
- 0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f,
- 0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9,
- 0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80,
- 0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1,
- 0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38,
- 0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e,
- 0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc,
- 0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79,
- 0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b,
- 0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77,
- 0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5,
- 0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c,
- 0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45,
- 0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc,
- 0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69,
- 0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e,
- 0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30,
- 0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76,
- 0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a,
- 0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0,
- 0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7,
- 0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d,
- 0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e,
- 0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa,
- 0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f,
- 0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12,
- 0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b,
- 0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d,
- 0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05,
- 0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19,
- 0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10,
- 0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d,
- 0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe,
- 0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2,
- 0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f,
- 0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27,
- 0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb,
- 0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52,
- 0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd,
- 0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb,
- 0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9,
- 0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d,
- 0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c,
- 0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70,
- 0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53,
- 0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a,
- 0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde,
- 0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14,
- 0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7,
- 0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97,
- 0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e,
- 0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60,
- 0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e,
- 0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38,
- 0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe,
- 0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed,
- 0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c,
- 0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf,
- 0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec,
- 0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b,
- 0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2,
- 0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94,
- 0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40,
- 0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8,
- 0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba,
- 0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0,
- 0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e,
- 0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09,
- 0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f,
- 0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd,
- 0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03,
- 0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53,
- 0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8,
- 0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d,
- 0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03,
- 0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0,
- 0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7,
- 0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b,
- 0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13,
- 0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c,
- 0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b,
- 0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86,
- 0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e,
- 0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18,
- 0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5,
- 0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77,
- 0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f,
- 0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86,
- 0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47,
- 0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32,
- 0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95,
- 0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e,
- 0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30,
- 0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a,
- 0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac,
- 0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63,
- 0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8,
- 0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51,
- 0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b,
- 0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64,
- 0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b,
- 0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99,
- 0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71,
- 0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e,
- 0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c,
- 0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b,
- 0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84,
- 0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e,
- 0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4,
- 0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9,
- 0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2,
- 0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa,
- 0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2,
- 0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf,
- 0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03,
- 0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e,
- 0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9,
- 0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c,
- 0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde,
- 0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc,
- 0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2,
- 0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8,
- 0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5,
- 0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed,
- 0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72,
- 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb,
- 0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a,
- 0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92,
- 0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e,
- 0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1,
- 0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd,
- 0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00,
-}
+func (m *WatchEvent) Reset() { *m = WatchEvent{} }
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
size := m.Size()
@@ -2415,7 +999,7 @@ func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.MatchLabels {
keysForMatchLabels = append(keysForMatchLabels, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
+ sort.Strings(keysForMatchLabels)
for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- {
v := m.MatchLabels[string(keysForMatchLabels[iNdEx])]
baseI := i
@@ -2787,7 +1371,7 @@ func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Annotations {
keysForAnnotations = append(keysForAnnotations, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ sort.Strings(keysForAnnotations)
for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
v := m.Annotations[string(keysForAnnotations[iNdEx])]
baseI := i
@@ -2811,7 +1395,7 @@ func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
for k := range m.Labels {
keysForLabels = append(keysForLabels, string(k))
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ sort.Strings(keysForLabels)
for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- {
v := m.Labels[string(keysForLabels[iNdEx])]
baseI := i
@@ -4583,7 +3167,7 @@ func (this *LabelSelector) String() string {
for k := range this.MatchLabels {
keysForMatchLabels = append(keysForMatchLabels, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels)
+ sort.Strings(keysForMatchLabels)
mapStringForMatchLabels := "map[string]string{"
for _, k := range keysForMatchLabels {
mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k])
@@ -4690,7 +3274,7 @@ func (this *ObjectMeta) String() string {
for k := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+ sort.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
@@ -4700,7 +3284,7 @@ func (this *ObjectMeta) String() string {
for k := range this.Annotations {
keysForAnnotations = append(keysForAnnotations, k)
}
- github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+ sort.Strings(keysForAnnotations)
mapStringForAnnotations := "map[string]string{"
for _, k := range keysForAnnotations {
mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index 865d3e7c..fb21b723 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -1038,7 +1038,6 @@ message Status {
// is not guaranteed to conform to any schema except that defined by
// the reason type.
// +optional
- // +listType=atomic
optional StatusDetails details = 5;
// Suggested HTTP return code for this status, 0 if not set.
@@ -1114,6 +1113,7 @@ message StatusDetails {
}
// TableOptions are used when a Table is requested by the caller.
+// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
message TableOptions {
// includeObject decides whether to include each object along with its columnar information.
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.protomessage.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.protomessage.pb.go
new file mode 100644
index 00000000..459ae1ad
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.protomessage.pb.go
@@ -0,0 +1,112 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1
+
+func (*APIGroup) ProtoMessage() {}
+
+func (*APIGroupList) ProtoMessage() {}
+
+func (*APIResource) ProtoMessage() {}
+
+func (*APIResourceList) ProtoMessage() {}
+
+func (*APIVersions) ProtoMessage() {}
+
+func (*ApplyOptions) ProtoMessage() {}
+
+func (*Condition) ProtoMessage() {}
+
+func (*CreateOptions) ProtoMessage() {}
+
+func (*DeleteOptions) ProtoMessage() {}
+
+func (*Duration) ProtoMessage() {}
+
+func (*FieldSelectorRequirement) ProtoMessage() {}
+
+func (*FieldsV1) ProtoMessage() {}
+
+func (*GetOptions) ProtoMessage() {}
+
+func (*GroupKind) ProtoMessage() {}
+
+func (*GroupResource) ProtoMessage() {}
+
+func (*GroupVersion) ProtoMessage() {}
+
+func (*GroupVersionForDiscovery) ProtoMessage() {}
+
+func (*GroupVersionKind) ProtoMessage() {}
+
+func (*GroupVersionResource) ProtoMessage() {}
+
+func (*LabelSelector) ProtoMessage() {}
+
+func (*LabelSelectorRequirement) ProtoMessage() {}
+
+func (*List) ProtoMessage() {}
+
+func (*ListMeta) ProtoMessage() {}
+
+func (*ListOptions) ProtoMessage() {}
+
+func (*ManagedFieldsEntry) ProtoMessage() {}
+
+func (*MicroTime) ProtoMessage() {}
+
+func (*ObjectMeta) ProtoMessage() {}
+
+func (*OwnerReference) ProtoMessage() {}
+
+func (*PartialObjectMetadata) ProtoMessage() {}
+
+func (*PartialObjectMetadataList) ProtoMessage() {}
+
+func (*Patch) ProtoMessage() {}
+
+func (*PatchOptions) ProtoMessage() {}
+
+func (*Preconditions) ProtoMessage() {}
+
+func (*RootPaths) ProtoMessage() {}
+
+func (*ServerAddressByClientCIDR) ProtoMessage() {}
+
+func (*Status) ProtoMessage() {}
+
+func (*StatusCause) ProtoMessage() {}
+
+func (*StatusDetails) ProtoMessage() {}
+
+func (*TableOptions) ProtoMessage() {}
+
+func (*Time) ProtoMessage() {}
+
+func (*Timestamp) ProtoMessage() {}
+
+func (*TypeMeta) ProtoMessage() {}
+
+func (*UpdateOptions) ProtoMessage() {}
+
+func (*Verbs) ProtoMessage() {}
+
+func (*WatchEvent) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 2a669062..9970e877 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -784,7 +784,6 @@ type Status struct {
// is not guaranteed to conform to any schema except that defined by
// the reason type.
// +optional
- // +listType=atomic
Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"`
// Suggested HTTP return code for this status, 0 if not set.
// +optional
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
index 4dedec4b..d0ca2001 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
@@ -104,7 +104,7 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for _, msg := range validation.IsQualifiedName(labelName) {
- allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg).WithOrigin("labelKey"))
+ allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg).WithOrigin("format=k8s-label-key"))
}
return allErrs
}
@@ -115,7 +115,7 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi
for k, v := range labels {
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
for _, msg := range validation.IsValidLabelValue(v) {
- allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
+ allErrs = append(allErrs, field.Invalid(fldPath, v, msg).WithOrigin("format=k8s-label-value"))
}
}
return allErrs
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..fd6e876e
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.model_name.go
@@ -0,0 +1,267 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroup) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIGroupList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIResource) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIResourceList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in APIVersions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ApplyOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ApplyOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Condition) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Condition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in CreateOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.CreateOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in DeleteOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Duration) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Duration"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FieldSelectorRequirement) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.FieldSelectorRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in FieldsV1) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GetOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GetOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupKind) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupResource) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupResource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersion) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersion"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersionForDiscovery) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersionKind) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionKind"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in GroupVersionResource) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionResource"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in InternalEvent) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.InternalEvent"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LabelSelector) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in LabelSelectorRequirement) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in List) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.List"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ListMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ListOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ListOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ManagedFieldsEntry) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in MicroTime) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ObjectMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in OwnerReference) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PartialObjectMetadata) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PartialObjectMetadataList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Patch) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PatchOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.PatchOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Preconditions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RootPaths) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.RootPaths"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ServerAddressByClientCIDR) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Status) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatusCause) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StatusDetails) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Table) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Table"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableColumnDefinition) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableColumnDefinition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableRow) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableRow"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TableRowCondition) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TableRowCondition"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Time) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Timestamp) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.Timestamp"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.TypeMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in UpdateOptions) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.UpdateOptions"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WatchEvent) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
index 46b0e133..159ca057 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
@@ -17,6 +17,7 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.apis.meta.v1beta1
// +groupName=meta.k8s.io
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
index 819d936f..3c763898 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
@@ -24,84 +24,14 @@ import (
io "io"
- proto "github.com/gogo/protobuf/proto"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
-func (*PartialObjectMetadataList) ProtoMessage() {}
-func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
- return fileDescriptor_39237a8d8061b52f, []int{0}
-}
-func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
-}
-func (m *PartialObjectMetadataList) XXX_Size() int {
- return m.Size()
-}
-func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
- xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_39237a8d8061b52f)
-}
-
-var fileDescriptor_39237a8d8061b52f = []byte{
- // 303 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30,
- 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1,
- 0x84, 0x0d, 0x11, 0xc5, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x31, 0x8b, 0x35,
- 0x4d, 0x69, 0xfe, 0x15, 0xbc, 0xf9, 0x11, 0xfc, 0x58, 0x3d, 0xee, 0x38, 0x10, 0x86, 0x8d, 0x5f,
- 0x44, 0xd2, 0x56, 0x91, 0xa1, 0xd0, 0x5b, 0x9e, 0x07, 0x7e, 0xbf, 0x3c, 0x81, 0xf8, 0x67, 0xd1,
- 0xa9, 0x21, 0x52, 0x53, 0x96, 0x48, 0xc5, 0xc2, 0x95, 0x8c, 0x79, 0xfa, 0x4c, 0x93, 0x48, 0xb8,
- 0xc2, 0x50, 0xc5, 0x81, 0xd1, 0xa7, 0x49, 0xc0, 0x81, 0x4d, 0xa8, 0xe0, 0x31, 0x4f, 0x19, 0xf0,
- 0x25, 0x49, 0x52, 0x0d, 0xba, 0x3b, 0xae, 0x50, 0xf2, 0x13, 0x25, 0x49, 0x24, 0x5c, 0x61, 0x88,
- 0x43, 0x49, 0x8d, 0xf6, 0x8f, 0x84, 0x84, 0x55, 0x16, 0x90, 0x50, 0x2b, 0x2a, 0xb4, 0xd0, 0xb4,
- 0x34, 0x04, 0xd9, 0x7d, 0x99, 0xca, 0x50, 0x9e, 0x2a, 0x73, 0xff, 0xb8, 0xc9, 0xa8, 0xdd, 0x3d,
- 0xfd, 0x93, 0xbf, 0xa8, 0x34, 0x8b, 0x41, 0x2a, 0x4e, 0x4d, 0xb8, 0xe2, 0x8a, 0xed, 0x72, 0x87,
- 0x6f, 0xc8, 0x3f, 0xb8, 0x66, 0x29, 0x48, 0xf6, 0x38, 0x0f, 0x1e, 0x78, 0x08, 0x57, 0x1c, 0xd8,
- 0x92, 0x01, 0xbb, 0x94, 0x06, 0xba, 0xb7, 0x7e, 0x5b, 0xd5, 0xb9, 0xf7, 0x6f, 0x88, 0x46, 0x9d,
- 0x29, 0x21, 0x4d, 0x1e, 0x4e, 0x1c, 0xed, 0x4c, 0xb3, 0xfd, 0x7c, 0x3b, 0xf0, 0xec, 0x76, 0xd0,
- 0xfe, 0x6a, 0x16, 0xdf, 0xc6, 0xee, 0x9d, 0xdf, 0x92, 0xc0, 0x95, 0xe9, 0xa1, 0xe1, 0xff, 0x51,
- 0x67, 0x7a, 0xde, 0x4c, 0xfd, 0xeb, 0xda, 0xd9, 0x5e, 0x7d, 0x4f, 0xeb, 0xc2, 0x19, 0x17, 0x95,
- 0x78, 0x36, 0xcf, 0x0b, 0xec, 0xad, 0x0b, 0xec, 0x6d, 0x0a, 0xec, 0xbd, 0x58, 0x8c, 0x72, 0x8b,
- 0xd1, 0xda, 0x62, 0xb4, 0xb1, 0x18, 0xbd, 0x5b, 0x8c, 0x5e, 0x3f, 0xb0, 0x77, 0x33, 0x6e, 0xfc,
- 0x0d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x0f, 0xd7, 0x36, 0x32, 0x02, 0x00, 0x00,
-}
+func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.protomessage.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.protomessage.pb.go
new file mode 100644
index 00000000..a782b1d8
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.protomessage.pb.go
@@ -0,0 +1,24 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package v1beta1
+
+func (*PartialObjectMetadataList) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
index f16170a3..68f261cc 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
@@ -46,6 +46,7 @@ type ConditionStatus = v1.ConditionStatus
type IncludeObjectPolicy = v1.IncludeObjectPolicy
// TableOptions are used when a Table is requested by the caller.
+// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type TableOptions = v1.TableOptions
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..9c360011
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,27 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in PartialObjectMetadataList) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList"
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/operator/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index 067bcac0..031dcd21 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -25,6 +25,7 @@ import (
"k8s.io/klog/v2"
+ "k8s.io/apimachinery/pkg/api/validate/content"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
@@ -848,7 +849,6 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) {
return s, nil
}
if tok2 == CommaToken {
- p.consume(Values)
s.Insert("") // to handle ,, Double "" removed by StringSet
}
default: // it can be operator
@@ -927,7 +927,7 @@ func parse(selector string, path *field.Path) (internalSelector, error) {
}
func validateLabelKey(k string, path *field.Path) *field.Error {
- if errs := validation.IsQualifiedName(k); len(errs) != 0 {
+ if errs := content.IsLabelKey(k); len(errs) != 0 {
return field.Invalid(path, k, strings.Join(errs, "; "))
}
return nil
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
index b54429bd..fd012dbc 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/doc.go
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.runtime
+
// Package runtime includes helper functions for working with API objects
// that follow the kubernetes API object conventions, which are:
//
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
index 2e40e140..f5e78d4b 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
@@ -23,145 +23,16 @@ import (
fmt "fmt"
io "io"
- math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
-
- proto "github.com/gogo/protobuf/proto"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *RawExtension) Reset() { *m = RawExtension{} }
-func (*RawExtension) ProtoMessage() {}
-func (*RawExtension) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e0e4b920403a48c, []int{0}
-}
-func (m *RawExtension) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *RawExtension) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RawExtension.Merge(m, src)
-}
-func (m *RawExtension) XXX_Size() int {
- return m.Size()
-}
-func (m *RawExtension) XXX_DiscardUnknown() {
- xxx_messageInfo_RawExtension.DiscardUnknown(m)
-}
+func (m *RawExtension) Reset() { *m = RawExtension{} }
-var xxx_messageInfo_RawExtension proto.InternalMessageInfo
+func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (m *TypeMeta) Reset() { *m = TypeMeta{} }
-func (*TypeMeta) ProtoMessage() {}
-func (*TypeMeta) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e0e4b920403a48c, []int{1}
-}
-func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *TypeMeta) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TypeMeta.Merge(m, src)
-}
-func (m *TypeMeta) XXX_Size() int {
- return m.Size()
-}
-func (m *TypeMeta) XXX_DiscardUnknown() {
- xxx_messageInfo_TypeMeta.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
-
-func (m *Unknown) Reset() { *m = Unknown{} }
-func (*Unknown) ProtoMessage() {}
-func (*Unknown) Descriptor() ([]byte, []int) {
- return fileDescriptor_2e0e4b920403a48c, []int{2}
-}
-func (m *Unknown) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *Unknown) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Unknown.Merge(m, src)
-}
-func (m *Unknown) XXX_Size() int {
- return m.Size()
-}
-func (m *Unknown) XXX_DiscardUnknown() {
- xxx_messageInfo_Unknown.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Unknown proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension")
- proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta")
- proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_2e0e4b920403a48c)
-}
-
-var fileDescriptor_2e0e4b920403a48c = []byte{
- // 365 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x4f, 0x6b, 0x22, 0x31,
- 0x18, 0xc6, 0x27, 0x2a, 0xe8, 0x46, 0xc1, 0x25, 0x7b, 0xd8, 0xd9, 0x3d, 0x64, 0xc4, 0xd3, 0x7a,
- 0xd8, 0x0c, 0x08, 0x85, 0x5e, 0x1d, 0xf1, 0x50, 0x4a, 0xa1, 0x84, 0xfe, 0x81, 0x9e, 0x1a, 0x67,
- 0xd2, 0x31, 0x0c, 0x26, 0xc3, 0x18, 0x99, 0x7a, 0xeb, 0x47, 0xe8, 0xc7, 0xf2, 0xe8, 0xd1, 0x93,
- 0xd4, 0xe9, 0x87, 0xe8, 0xb5, 0x18, 0xa3, 0xb5, 0xed, 0xc1, 0x5b, 0xde, 0xf7, 0x79, 0x7e, 0xcf,
- 0xfb, 0xbe, 0x10, 0xe8, 0x27, 0xa7, 0x13, 0x22, 0x94, 0xcf, 0x52, 0x31, 0x66, 0xe1, 0x48, 0x48,
- 0x9e, 0xcd, 0xfc, 0x34, 0x89, 0xfd, 0x6c, 0x2a, 0xb5, 0x18, 0x73, 0x3f, 0xe6, 0x92, 0x67, 0x4c,
- 0xf3, 0x88, 0xa4, 0x99, 0xd2, 0x0a, 0x79, 0x5b, 0x80, 0x1c, 0x02, 0x24, 0x4d, 0x62, 0x62, 0x81,
- 0xbf, 0xff, 0x63, 0xa1, 0x47, 0xd3, 0x21, 0x09, 0xd5, 0xd8, 0x8f, 0x55, 0xac, 0x7c, 0xc3, 0x0d,
- 0xa7, 0x0f, 0xa6, 0x32, 0x85, 0x79, 0x6d, 0xf3, 0xda, 0x1d, 0xd8, 0xa0, 0x2c, 0x1f, 0x3c, 0x6a,
- 0x2e, 0x27, 0x42, 0x49, 0xf4, 0x07, 0x96, 0x33, 0x96, 0xbb, 0xa0, 0x05, 0xfe, 0x35, 0x82, 0x6a,
- 0xb1, 0xf2, 0xca, 0x94, 0xe5, 0x74, 0xd3, 0x6b, 0xdf, 0xc3, 0xda, 0xd5, 0x2c, 0xe5, 0x17, 0x5c,
- 0x33, 0xd4, 0x85, 0x90, 0xa5, 0xe2, 0x86, 0x67, 0x1b, 0xc8, 0xb8, 0x7f, 0x04, 0x68, 0xbe, 0xf2,
- 0x9c, 0x62, 0xe5, 0xc1, 0xde, 0xe5, 0x99, 0x55, 0xe8, 0x81, 0x0b, 0xb5, 0x60, 0x25, 0x11, 0x32,
- 0x72, 0x4b, 0xc6, 0xdd, 0xb0, 0xee, 0xca, 0xb9, 0x90, 0x11, 0x35, 0x4a, 0xfb, 0x0d, 0xc0, 0xea,
- 0xb5, 0x4c, 0xa4, 0xca, 0x25, 0xba, 0x85, 0x35, 0x6d, 0xa7, 0x99, 0xfc, 0x7a, 0xb7, 0x43, 0x8e,
- 0xdc, 0x4e, 0x76, 0xeb, 0x05, 0x3f, 0x6d, 0xf8, 0x7e, 0x61, 0xba, 0x0f, 0xdb, 0x5d, 0x58, 0xfa,
- 0x7e, 0x21, 0xea, 0xc1, 0x66, 0xa8, 0xa4, 0xe6, 0x52, 0x0f, 0x64, 0xa8, 0x22, 0x21, 0x63, 0xb7,
- 0x6c, 0x96, 0xfd, 0x6d, 0xf3, 0x9a, 0xfd, 0xcf, 0x32, 0xfd, 0xea, 0x47, 0x27, 0xb0, 0x6e, 0x5b,
- 0x9b, 0xd1, 0x6e, 0xc5, 0xe0, 0xbf, 0x2c, 0x5e, 0xef, 0x7f, 0x48, 0xf4, 0xd0, 0x17, 0x0c, 0xe6,
- 0x6b, 0xec, 0x2c, 0xd6, 0xd8, 0x59, 0xae, 0xb1, 0xf3, 0x54, 0x60, 0x30, 0x2f, 0x30, 0x58, 0x14,
- 0x18, 0x2c, 0x0b, 0x0c, 0x5e, 0x0a, 0x0c, 0x9e, 0x5f, 0xb1, 0x73, 0xe7, 0x1d, 0xf9, 0x2d, 0xef,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x9b, 0x09, 0xb3, 0x4f, 0x02, 0x00, 0x00,
-}
+func (m *Unknown) Reset() { *m = Unknown{} }
func (m *RawExtension) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.protomessage.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.protomessage.pb.go
new file mode 100644
index 00000000..1716853f
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.protomessage.pb.go
@@ -0,0 +1,28 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package runtime
+
+func (*RawExtension) ProtoMessage() {}
+
+func (*TypeMeta) ProtoMessage() {}
+
+func (*Unknown) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
index 7a26d279..ed57e08a 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
@@ -18,41 +18,3 @@ limitations under the License.
// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto
package schema
-
-import (
- fmt "fmt"
-
- math "math"
-
- proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func init() {
- proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_25f8f0eed21c6089)
-}
-
-var fileDescriptor_25f8f0eed21c6089 = []byte{
- // 170 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xce, 0xa1, 0x0e, 0xc2, 0x30,
- 0x10, 0xc6, 0xf1, 0xd6, 0x22, 0x91, 0x88, 0x93, 0x73, 0xdc, 0x39, 0x82, 0x46, 0xf3, 0x04, 0xb8,
- 0x6e, 0x94, 0xae, 0x59, 0xba, 0x6b, 0xba, 0x4e, 0xe0, 0x78, 0x04, 0x1e, 0x6b, 0x72, 0x72, 0x92,
- 0x95, 0x17, 0x21, 0x69, 0x11, 0x48, 0xdc, 0xfd, 0xc5, 0xef, 0xf2, 0x6d, 0x0e, 0xdd, 0x71, 0x40,
- 0xcb, 0xa4, 0xbc, 0x75, 0xaa, 0x69, 0x6d, 0xaf, 0xc3, 0x9d, 0x7c, 0x67, 0x28, 0x8c, 0x7d, 0xb4,
- 0x4e, 0xd3, 0xd0, 0xb4, 0xda, 0x29, 0x32, 0xba, 0xd7, 0x41, 0x45, 0x7d, 0x45, 0x1f, 0x38, 0xf2,
- 0xb6, 0x2a, 0x0e, 0x7f, 0x1d, 0xfa, 0xce, 0xe0, 0xd7, 0x61, 0x71, 0xbb, 0xbd, 0xb1, 0xb1, 0x1d,
- 0x6b, 0x6c, 0xd8, 0x91, 0x61, 0xc3, 0x94, 0x79, 0x3d, 0xde, 0x72, 0xe5, 0xc8, 0x57, 0x79, 0x7b,
- 0x3a, 0x4f, 0x2b, 0x88, 0x79, 0x05, 0xb1, 0xac, 0x20, 0x1e, 0x09, 0xe4, 0x94, 0x40, 0xce, 0x09,
- 0xe4, 0x92, 0x40, 0xbe, 0x12, 0xc8, 0xe7, 0x1b, 0xc4, 0xa5, 0xfa, 0x6f, 0xf4, 0x27, 0x00, 0x00,
- 0xff, 0xff, 0x97, 0xb8, 0x4d, 0x1f, 0xdd, 0x00, 0x00, 0x00,
-}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.protomessage.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.protomessage.pb.go
new file mode 100644
index 00000000..04743737
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.protomessage.pb.go
@@ -0,0 +1,22 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package schema
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
index b0e22c5e..e2fbeabd 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -29,6 +29,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
+ "k8s.io/kube-openapi/pkg/util"
)
// Scheme defines methods for serializing and deserializing API objects, a type
@@ -752,6 +753,9 @@ var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"}
// The OpenAPI definition name is the canonical name of the type, with the group and version removed.
// For example, the OpenAPI definition name of Pod is `io.k8s.api.core.v1.Pod`.
//
+// This respects the util.OpenAPIModelNamer interface and will return the name returned by
+// OpenAPIModelName() if it is defined on the type.
+//
// A known type that is registered as an unstructured.Unstructured type is treated as a custom resource and
// which has an OpenAPI definition name of the form `.`.
// For example, the OpenAPI definition name of `group: stable.example.com, version: v1, kind: Pod` is
@@ -764,6 +768,12 @@ func (s *Scheme) ToOpenAPIDefinitionName(groupVersionKind schema.GroupVersionKin
if err != nil {
return "", err
}
+
+ // Use a namer if provided
+ if namer, ok := example.(util.OpenAPIModelNamer); ok {
+ return namer.OpenAPIModelName(), nil
+ }
+
if _, ok := example.(Unstructured); ok {
if groupVersionKind.Group == "" || groupVersionKind.Kind == "" {
return "", fmt.Errorf("unable to convert GroupVersionKind with empty fields to unstructured type to an OpenAPI definition name: %v", groupVersionKind)
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
index 754a8082..afac03e9 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go
@@ -21,8 +21,6 @@ import (
"io"
"math/bits"
- "github.com/gogo/protobuf/proto"
-
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
@@ -98,6 +96,10 @@ type streamingListData struct {
items []runtime.Object
}
+type sizer interface {
+ Size() int
+}
+
// listSize return size of ListMeta and items to be later used for preallocations.
// listMetaSize and itemSizes do not include header bytes (field identifier, size).
func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) {
@@ -107,7 +109,7 @@ func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, list
// Items
itemSizes = make([]int, len(items))
for i, item := range items {
- sizer, ok := item.(proto.Sizer)
+ sizer, ok := item.(sizer)
if !ok {
return totalSize, listMetaSize, nil, errItemsSizer
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
index c66c49ac..67a2d124 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go
@@ -23,8 +23,6 @@ import (
"net/http"
"reflect"
- "github.com/gogo/protobuf/proto"
-
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -148,11 +146,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
types, _, err := s.typer.ObjectKinds(into)
switch {
case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
+ unmarshaler, ok := into.(unmarshaler)
if !ok {
return nil, &actual, errNotMarshalable{reflect.TypeOf(into)}
}
- if err := proto.Unmarshal(unk.Raw, pb); err != nil {
+ // top-level unmarshal resets before delegating unmarshaling to the object
+ unmarshaler.Reset()
+ if err := unmarshaler.Unmarshal(unk.Raw); err != nil {
return nil, &actual, err
}
return into, &actual, nil
@@ -251,7 +251,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.
_, err = w.Write(data[:prefixSize+uint64(i)])
return err
- case proto.Marshaler:
+ case unbufferedMarshaller:
// this path performs extra allocations
data, err := t.Marshal()
if err != nil {
@@ -306,16 +306,27 @@ func copyKindDefaults(dst, src *schema.GroupVersionKind) {
// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple
// byte buffers by pre-calculating the size of the final buffer needed.
type bufferedMarshaller interface {
- proto.Sizer
runtime.ProtobufMarshaller
}
// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
type bufferedReverseMarshaller interface {
- proto.Sizer
runtime.ProtobufReverseMarshaller
}
+type unbufferedMarshaller interface {
+ Marshal() ([]byte, error)
+}
+
+// unmarshaler is the subset of gogo Message and Unmarshaler used by unmarshal
+type unmarshaler interface {
+ // Reset() is called on the top-level message before unmarshaling,
+ // and clears all existing data from the message instance.
+ Reset()
+ // Unmarshal decodes from the start of the data into the message.
+ Unmarshal([]byte) error
+}
+
// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
// object with a nil RawJSON struct and the expected size of the provided buffer. The
// returned size will not be correct if RawJSOn is set on unk.
@@ -381,11 +392,13 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind
types, _, err := s.typer.ObjectKinds(into)
switch {
case runtime.IsNotRegisteredError(err):
- pb, ok := into.(proto.Message)
+ unmarshaler, ok := into.(unmarshaler)
if !ok {
return nil, actual, errNotMarshalable{reflect.TypeOf(into)}
}
- if err := proto.Unmarshal(data, pb); err != nil {
+ // top-level unmarshal resets before delegating unmarshaling to the object
+ unmarshaler.Reset()
+ if err := unmarshaler.Unmarshal(data); err != nil {
return nil, actual, err
}
return into, actual, nil
@@ -419,11 +432,13 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
return nil, actual, err
}
- pb, ok := obj.(proto.Message)
+ unmarshaler, ok := obj.(unmarshaler)
if !ok {
return nil, actual, errNotMarshalable{reflect.TypeOf(obj)}
}
- if err := proto.Unmarshal(data, pb); err != nil {
+ // top-level unmarshal resets before delegating unmarshaling to the object
+ unmarshaler.Reset()
+ if err := unmarshaler.Unmarshal(data); err != nil {
return nil, actual, err
}
if actual != nil {
@@ -519,7 +534,7 @@ func doEncode(obj any, w io.Writer, precomputedObjSize *int, memAlloc runtime.Me
}
return w.Write(data[:n])
- case proto.Marshaler:
+ case unbufferedMarshaller:
// this path performs extra allocations
data, err := t.Marshal()
if err != nil {
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
index 27a2064c..70c4ea8c 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go
@@ -21,11 +21,21 @@ import (
"io"
)
+// ProtobufReverseMarshaller can precompute size, and marshals to the start of the provided data buffer.
type ProtobufMarshaller interface {
+ // Size returns the number of bytes a call to MarshalTo would consume.
+ Size() int
+ // MarshalTo marshals to the start of the data buffer, which must be at least as big as Size(),
+ // and returns the number of bytes written, which must be identical to the return value of Size().
MarshalTo(data []byte) (int, error)
}
+// ProtobufReverseMarshaller can precompute size, and marshals to the end of the provided data buffer.
type ProtobufReverseMarshaller interface {
+ // Size returns the number of bytes a call to MarshalToSizedBuffer would consume.
+ Size() int
+ // MarshalToSizedBuffer marshals to the end of the data buffer, which must be at least as big as Size(),
+ // and returns the number of bytes written, which must be identical to the return value of Size().
MarshalToSizedBuffer(data []byte) (int, error)
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.model_name.go b/operator/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.model_name.go
new file mode 100644
index 00000000..cf3ec4dc
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.model_name.go
@@ -0,0 +1,92 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package runtime
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Allocator) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Allocator"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NegotiateError) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.NegotiateError"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NoopDecoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.NoopDecoder"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in NoopEncoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.NoopEncoder"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Pair) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Pair"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in RawExtension) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.RawExtension"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Scheme) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Scheme"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SerializerInfo) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.SerializerInfo"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in SimpleAllocator) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.SimpleAllocator"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in StreamSerializerInfo) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.StreamSerializerInfo"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in TypeMeta) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.TypeMeta"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Unknown) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.Unknown"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WithVersionEncoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.WithVersionEncoder"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in WithoutVersionDecoder) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.runtime.WithoutVersionDecoder"
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
index 1f287739..5be552e1 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
@@ -23,80 +23,10 @@ import (
fmt "fmt"
io "io"
- math "math"
math_bits "math/bits"
-
- proto "github.com/gogo/protobuf/proto"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-
-func (m *IntOrString) Reset() { *m = IntOrString{} }
-func (*IntOrString) ProtoMessage() {}
-func (*IntOrString) Descriptor() ([]byte, []int) {
- return fileDescriptor_771bacc35a5ec189, []int{0}
-}
-func (m *IntOrString) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *IntOrString) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IntOrString.Merge(m, src)
-}
-func (m *IntOrString) XXX_Size() int {
- return m.Size()
-}
-func (m *IntOrString) XXX_DiscardUnknown() {
- xxx_messageInfo_IntOrString.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IntOrString proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString")
-}
-
-func init() {
- proto.RegisterFile("k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_771bacc35a5ec189)
-}
-
-var fileDescriptor_771bacc35a5ec189 = []byte{
- // 277 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0xb6, 0x28, 0xd6,
- 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4,
- 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2,
- 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
- 0x52, 0x86, 0x68, 0xd2, 0x43, 0xd6, 0xa4, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd2, 0xa4, 0x07, 0xd1,
- 0x24, 0xa5, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f,
- 0x9e, 0xaf, 0x0f, 0xd6, 0x9b, 0x54, 0x9a, 0x06, 0xe6, 0x81, 0x39, 0x60, 0x16, 0xc4, 0x4c, 0xa5,
- 0x89, 0x8c, 0x5c, 0xdc, 0x9e, 0x79, 0x25, 0xfe, 0x45, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x42,
- 0x1a, 0x5c, 0x2c, 0x25, 0x95, 0x05, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x4e, 0x22, 0x27,
- 0xee, 0xc9, 0x33, 0x3c, 0xba, 0x27, 0xcf, 0x12, 0x52, 0x59, 0x90, 0xfa, 0x0b, 0x4a, 0x07, 0x81,
- 0x55, 0x08, 0xa9, 0x71, 0xb1, 0x65, 0xe6, 0x95, 0x84, 0x25, 0xe6, 0x48, 0x30, 0x29, 0x30, 0x6a,
- 0xb0, 0x3a, 0xf1, 0x41, 0xd5, 0xb2, 0x79, 0x82, 0x45, 0x83, 0xa0, 0xb2, 0x20, 0x75, 0xc5, 0x25,
- 0x45, 0x20, 0x75, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x08, 0x75, 0xc1, 0x60, 0xd1, 0x20, 0xa8, 0xac,
- 0x15, 0xc7, 0x8c, 0x05, 0xf2, 0x0c, 0x0d, 0x77, 0x14, 0x18, 0x9c, 0x3c, 0x4f, 0x3c, 0x94, 0x63,
- 0xb8, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9,
- 0x31, 0x5e, 0x78, 0x24, 0xc7, 0x78, 0xe3, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e,
- 0xcb, 0x31, 0x44, 0x29, 0x13, 0x11, 0x84, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0xa1, 0x0b,
- 0x1e, 0x68, 0x01, 0x00, 0x00,
-}
+func (m *IntOrString) Reset() { *m = IntOrString{} }
func (m *IntOrString) Marshal() (dAtA []byte, err error) {
size := m.Size()
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
index 7c63c5e4..e3d26a59 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto
@@ -33,6 +33,7 @@ option go_package = "k8s.io/apimachinery/pkg/util/intstr";
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.util.intstr
message IntOrString {
optional int64 type = 1;
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.protomessage.pb.go b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.protomessage.pb.go
new file mode 100644
index 00000000..2853a018
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.protomessage.pb.go
@@ -0,0 +1,24 @@
+//go:build kubernetes_protomessage_one_more_release
+// +build kubernetes_protomessage_one_more_release
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by go-to-protobuf. DO NOT EDIT.
+
+package intstr
+
+func (*IntOrString) ProtoMessage() {}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 5fd2e16c..f372ae58 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -38,6 +38,7 @@ import (
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.util.intstr
type IntOrString struct {
Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"`
IntVal int32 `protobuf:"varint,2,opt,name=intVal"`
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/zz_generated.model_name.go b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/zz_generated.model_name.go
new file mode 100644
index 00000000..b2d6e0ae
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/intstr/zz_generated.model_name.go
@@ -0,0 +1,27 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package intstr
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in IntOrString) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.util.intstr.IntOrString"
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/sets/set.go b/operator/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
index cd961c8c..ae3d15eb 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/sets/set.go
@@ -18,7 +18,7 @@ package sets
import (
"cmp"
- "sort"
+ "slices"
)
// Set is a set of the same type elements, implemented via map[comparable]struct{} for minimal memory consumption.
@@ -188,22 +188,13 @@ func (s1 Set[T]) Equal(s2 Set[T]) bool {
return len(s1) == len(s2) && s1.IsSuperset(s2)
}
-type sortableSliceOfGeneric[T cmp.Ordered] []T
-
-func (g sortableSliceOfGeneric[T]) Len() int { return len(g) }
-func (g sortableSliceOfGeneric[T]) Less(i, j int) bool { return less[T](g[i], g[j]) }
-func (g sortableSliceOfGeneric[T]) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
-
// List returns the contents as a sorted T slice.
//
// This is a separate function and not a method because not all types supported
// by Generic are ordered and only those can be sorted.
func List[T cmp.Ordered](s Set[T]) []T {
- res := make(sortableSliceOfGeneric[T], 0, len(s))
- for key := range s {
- res = append(res, key)
- }
- sort.Sort(res)
+ res := s.UnsortedList()
+ slices.Sort(res)
return res
}
@@ -230,7 +221,3 @@ func (s Set[T]) PopAny() (T, bool) {
func (s Set[T]) Len() int {
return len(s)
}
-
-func less[T cmp.Ordered](lhs, rhs T) bool {
- return lhs < rhs
-}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
index 85b0cfc0..1bfed1c2 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go
@@ -249,7 +249,7 @@ var _ LookupPatchMeta = PatchMetaFromOpenAPI{}
func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) {
if s.Schema == nil {
- return nil, PatchMeta{}, nil
+ return &PatchMetaFromOpenAPI{}, PatchMeta{}, nil
}
kindItem := NewKindItem(key, s.Schema.GetPath())
s.Schema.Accept(kindItem)
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go b/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
index afb5f1b0..f0264e50 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go
@@ -23,6 +23,13 @@ import (
"strings"
)
+// NormalizationRule holds a pre-compiled regular expression and its replacement string
+// for normalizing field paths.
+type NormalizationRule struct {
+ Regexp *regexp.Regexp
+ Replacement string
+}
+
// ErrorMatcher is a helper for comparing Error objects.
type ErrorMatcher struct {
// TODO(thockin): consider whether type is ever NOT required, maybe just
@@ -32,22 +39,37 @@ type ErrorMatcher struct {
// "want" error has a nil field, don't match on field.
matchField bool
// TODO(thockin): consider whether value could be assumed - if the
- // "want" error has a nil value, don't match on field.
+ // "want" error has a nil value, don't match on value.
matchValue bool
matchOrigin bool
matchDetail func(want, got string) bool
requireOriginWhenInvalid bool
+ // normalizationRules holds the pre-compiled regex patterns for path normalization.
+ normalizationRules []NormalizationRule
}
// Matches returns true if the two Error objects match according to the
-// configured criteria.
+// configured criteria. When field normalization is configured, only the
+// "got" error's field path is normalized (to bring older API versions up
+// to the internal/latest format), while "want" is assumed to already be
+// in the canonical internal API format.
func (m ErrorMatcher) Matches(want, got *Error) bool {
if m.matchType && want.Type != got.Type {
return false
}
- if m.matchField && want.Field != got.Field {
- return false
+ if m.matchField {
+ // Try direct match first (common case)
+ if want.Field != got.Field {
+ // Fields don't match, try normalization if rules are configured.
+ // Only normalize "got" - it may be from an older API version that
+ // needs to be brought up to the internal/latest format that "want"
+ // is already in.
+ if want.Field != m.normalizePath(got.Field) {
+ return false
+ }
+ }
}
+
if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) {
return false
}
@@ -67,6 +89,18 @@ func (m ErrorMatcher) Matches(want, got *Error) bool {
return true
}
+// normalizePath applies configured path normalization rules.
+func (m ErrorMatcher) normalizePath(path string) string {
+ for _, rule := range m.normalizationRules {
+ normalized := rule.Regexp.ReplaceAllString(path, rule.Replacement)
+ if normalized != path {
+ // Only apply the first matching rule.
+ return normalized
+ }
+ }
+ return path
+}
+
// Render returns a string representation of the specified Error object,
// according to the criteria configured in the ErrorMatcher.
func (m ErrorMatcher) Render(e *Error) string {
@@ -84,7 +118,11 @@ func (m ErrorMatcher) Render(e *Error) string {
}
if m.matchField {
comma()
- buf.WriteString(fmt.Sprintf("Field=%q", e.Field))
+ if normalized := m.normalizePath(e.Field); normalized != e.Field {
+ buf.WriteString(fmt.Sprintf("Field=%q (aka %q)", normalized, e.Field))
+ } else {
+ buf.WriteString(fmt.Sprintf("Field=%q", e.Field))
+ }
}
if m.matchValue {
comma()
@@ -125,11 +163,39 @@ func (m ErrorMatcher) ByType() ErrorMatcher {
}
// ByField returns a derived ErrorMatcher which also matches by field path.
+// If you need to mutate the field path (e.g. to normalize across versions),
+// see ByFieldNormalized.
func (m ErrorMatcher) ByField() ErrorMatcher {
m.matchField = true
return m
}
+// ByFieldNormalized returns a derived ErrorMatcher which also matches by field path
+// after applying normalization rules to the actual (got) error's field path.
+// This allows matching field paths from older API versions against the canonical
+// internal API format.
+//
+// The normalization rules are applied ONLY to the "got" error's field path, bringing
+// older API version field paths up to the latest/internal format. The "want" error
+// is assumed to always be in the internal API format (latest).
+//
+// The rules slice holds pre-compiled regular expressions and their replacement strings.
+//
+// Example:
+//
+// rules := []NormalizationRule{
+// {
+// Regexp: regexp.MustCompile(`spec\.devices\.requests\[(\d+)\]\.allocationMode`),
+// Replacement: "spec.devices.requests[$1].exactly.allocationMode",
+// },
+// }
+// matcher := ErrorMatcher{}.ByFieldNormalized(rules)
+func (m ErrorMatcher) ByFieldNormalized(rules []NormalizationRule) ErrorMatcher {
+ m.matchField = true
+ m.normalizationRules = rules
+ return m
+}
+
// ByValue returns a derived ErrorMatcher which also matches by the errant
// value.
func (m ErrorMatcher) ByValue() ErrorMatcher {
@@ -138,6 +204,13 @@ func (m ErrorMatcher) ByValue() ErrorMatcher {
}
// ByOrigin returns a derived ErrorMatcher which also matches by the origin.
+// When this is used and an origin is set in the error, the matcher will
+// consider all expected errors with the same origin to be a match. The only
+// expception to this is when it finds two errors which are exactly identical,
+// which is too suspicious to ignore. This multi-matching allows tests to
+// express a single expectation ("I set the X field to an invalid value, and I
+// expect an error from origin Y") without having to know exactly how many
+// errors might be returned, or in what order, or with what wording.
func (m ErrorMatcher) ByOrigin() ErrorMatcher {
m.matchOrigin = true
return m
@@ -184,40 +257,64 @@ func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher {
type TestIntf interface {
Helper()
Errorf(format string, args ...any)
- Logf(format string, args ...any)
}
// Test compares two ErrorLists by the criteria configured in this matcher, and
-// fails the test if they don't match. If a given "want" error matches multiple
-// "got" errors, they will all be consumed. This might be OK (e.g. if there are
-// multiple errors on the same field from the same origin) or it might be an
-// insufficiently specific matcher, so these will be logged.
+// fails the test if they don't match. The "want" errors are expected to be in
+// the internal API format (latest), while "got" errors may be from any API version
+// and will be normalized if field normalization rules are configured.
+//
+// If matching by origin is enabled and the error has a non-empty origin, a given
+// "want" error can match multiple "got" errors, and they will all be consumed.
+// The only exception to this is if the matcher got multiple identical (in every way,
+// even those not being matched on) errors, which is likely to indicate a bug.
func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) {
tb.Helper()
+ exactly := m.Exactly() // makes a copy
+
+ // If we ever find an EXACT duplicate error, it's almost certainly a bug
+ // worth reporting. If we ever find a use-case where this is not a bug, we
+ // can revisit this assumption.
+ seen := map[string]bool{}
+ for _, g := range got {
+ key := exactly.Render(g)
+ if seen[key] {
+ tb.Errorf("exact duplicate error:\n%s", key)
+ }
+ seen[key] = true
+ }
+
remaining := got
for _, w := range want {
tmp := make(ErrorList, 0, len(remaining))
- n := 0
- for _, g := range remaining {
+ matched := false
+ for i, g := range remaining {
if m.Matches(w, g) {
- n++
+ matched = true
+ if m.matchOrigin && w.Origin != "" {
+ // When origin is included in the match, we allow multiple
+ // matches against the same wanted error, so that tests
+ // can be insulated from the exact number, order, and
+ // wording of cases that might return more than one error.
+ continue
+ } else {
+ // Single-match, save the rest of the "got" errors and move
+ // on to the next "want" error.
+ tmp = append(tmp, remaining[i+1:]...)
+ break
+ }
} else {
tmp = append(tmp, g)
}
}
- if n == 0 {
+ if !matched {
tb.Errorf("expected an error matching:\n%s", m.Render(w))
- } else if n > 1 {
- // This is not necessarily and error, but it's worth logging in
- // case it's not what the test author intended.
- tb.Logf("multiple errors matched:\n%s", m.Render(w))
}
remaining = tmp
}
if len(remaining) > 0 {
for _, e := range remaining {
- exactly := m.Exactly() // makes a copy
tb.Errorf("unmatched error:\n%s", exactly.Render(e))
}
}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index f2a983ae..950d8386 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -341,6 +341,14 @@ func (list ErrorList) MarkCoveredByDeclarative() ErrorList {
return list
}
+// PrefixDetail adds a prefix to the Detail for all errors in the list and returns the updated list.
+func (list ErrorList) PrefixDetail(prefix string) ErrorList {
+ for _, err := range list {
+ err.Detail = prefix + err.Detail
+ }
+ return list
+}
+
// ToAggregate converts the ErrorList into an errors.Aggregate.
func (list ErrorList) ToAggregate() utilerrors.Aggregate {
if len(list) == 0 {
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/operator/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index bc4521c3..352ff19a 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -23,51 +23,17 @@ import (
"strings"
"unicode"
+ "k8s.io/apimachinery/pkg/api/validate/content"
+
"k8s.io/apimachinery/pkg/util/validation/field"
)
-const qnameCharFmt string = "[A-Za-z0-9]"
-const qnameExtCharFmt string = "[-A-Za-z0-9_.]"
-const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt
-const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
-const qualifiedNameMaxLength int = 63
-
-var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$")
-
// IsQualifiedName tests whether the value passed is what Kubernetes calls a
// "qualified name". This is a format used in various places throughout the
// system. If the value is not valid, a list of error strings is returned.
// Otherwise an empty list (or nil) is returned.
-func IsQualifiedName(value string) []string {
- var errs []string
- parts := strings.Split(value, "/")
- var name string
- switch len(parts) {
- case 1:
- name = parts[0]
- case 2:
- var prefix string
- prefix, name = parts[0], parts[1]
- if len(prefix) == 0 {
- errs = append(errs, "prefix part "+EmptyError())
- } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 {
- errs = append(errs, prefixEach(msgs, "prefix part ")...)
- }
- default:
- return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+
- " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')")
- }
-
- if len(name) == 0 {
- errs = append(errs, "name part "+EmptyError())
- } else if len(name) > qualifiedNameMaxLength {
- errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength))
- }
- if !qualifiedNameRegexp.MatchString(name) {
- errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc"))
- }
- return errs
-}
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.IsQualifiedName instead.
+var IsQualifiedName = content.IsLabelKey
// IsFullyQualifiedName checks if the name is fully qualified. This is similar
// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of
@@ -151,27 +117,40 @@ func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList {
return allErrs
}
-const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
-const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
+// IsDomainPrefixedKey checks if the given key string is a domain-prefixed key
+// (e.g. acme.io/foo). All characters before the first "/" must be a valid
+// subdomain as defined by RFC 1123. All characters trailing the first "/" must
+// be non-empty and match the regex ^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$.
+func IsDomainPrefixedKey(fldPath *field.Path, key string) field.ErrorList {
+ var allErrs field.ErrorList
+ if len(key) == 0 {
+ return append(allErrs, field.Required(fldPath, ""))
+ }
+ for _, errMessages := range content.IsLabelKey(key) {
+ allErrs = append(allErrs, field.Invalid(fldPath, key, errMessages))
+ }
-// LabelValueMaxLength is a label's max length
-const LabelValueMaxLength int = 63
+ if len(allErrs) > 0 {
+ return allErrs
+ }
-var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$")
+ segments := strings.Split(key, "/")
+ if len(segments) != 2 {
+ return append(allErrs, field.Invalid(fldPath, key, "must be a domain-prefixed key (such as \"acme.io/foo\")"))
+ }
+
+ return allErrs
+}
+
+// LabelValueMaxLength is a label's max length
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.LabelValueMaxLength instead.
+const LabelValueMaxLength int = content.LabelValueMaxLength
// IsValidLabelValue tests whether the value passed is a valid label value. If
// the value is not valid, a list of error strings is returned. Otherwise an
// empty list (or nil) is returned.
-func IsValidLabelValue(value string) []string {
- var errs []string
- if len(value) > LabelValueMaxLength {
- errs = append(errs, MaxLenError(LabelValueMaxLength))
- }
- if !labelValueRegexp.MatchString(value) {
- errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345"))
- }
- return errs
-}
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.IsLabelValue instead.
+var IsValidLabelValue = content.IsLabelValue
const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
@@ -283,19 +262,10 @@ func IsWildcardDNS1123Subdomain(value string) []string {
return errs
}
-const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*"
-const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'"
-
-var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$")
-
// IsCIdentifier tests for a string that conforms the definition of an identifier
// in C. This checks the format, but not the length.
-func IsCIdentifier(value string) []string {
- if !cIdentifierRegexp.MatchString(value) {
- return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")}
- }
- return nil
-}
+// Deprecated: Use k8s.io/apimachinery/pkg/api/validate/content.IsCIdentifier instead.
+var IsCIdentifier = content.IsCIdentifier
// IsValidPortNum tests that the argument is a valid, non-zero port number.
func IsValidPortNum(port int) []string {
@@ -478,13 +448,6 @@ func EmptyError() string {
return "must be non-empty"
}
-func prefixEach(msgs []string, prefix string) []string {
- for i := range msgs {
- msgs[i] = prefix + msgs[i]
- }
- return msgs
-}
-
// InclusiveRangeError returns a string explanation of a numeric "must be
// between" validation failure.
func InclusiveRangeError(lo, hi int) string {
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/operator/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
new file mode 100644
index 00000000..da88813d
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version provides utilities for version number comparisons
+package version
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/operator/vendor/k8s.io/apimachinery/pkg/util/version/version.go
new file mode 100644
index 00000000..72c0769e
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/util/version/version.go
@@ -0,0 +1,484 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// Version is an opaque representation of a version number
+type Version struct {
+ components []uint
+ semver bool
+ preRelease string
+ buildMetadata string
+}
+
+var (
+ // versionMatchRE splits a version string into numeric and "extra" parts
+ versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`)
+ // extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release
+ extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`)
+)
+
+func parse(str string, semver bool) (*Version, error) {
+ parts := versionMatchRE.FindStringSubmatch(str)
+ if parts == nil {
+ return nil, fmt.Errorf("could not parse %q as version", str)
+ }
+ numbers, extra := parts[1], parts[2]
+
+ components := strings.Split(numbers, ".")
+ if (semver && len(components) != 3) || (!semver && len(components) < 2) {
+ return nil, fmt.Errorf("illegal version string %q", str)
+ }
+
+ v := &Version{
+ components: make([]uint, len(components)),
+ semver: semver,
+ }
+ for i, comp := range components {
+ if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" {
+ return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
+ }
+ num, err := strconv.ParseUint(comp, 10, 0)
+ if err != nil {
+ return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err)
+ }
+ v.components[i] = uint(num)
+ }
+
+ if semver && extra != "" {
+ extraParts := extraMatchRE.FindStringSubmatch(extra)
+ if extraParts == nil {
+ return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str)
+ }
+ v.preRelease, v.buildMetadata = extraParts[1], extraParts[2]
+
+ for _, comp := range strings.Split(v.preRelease, ".") {
+ if _, err := strconv.ParseUint(comp, 10, 0); err == nil {
+ if strings.HasPrefix(comp, "0") && comp != "0" {
+ return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
+ }
+ }
+ }
+ }
+
+ return v, nil
+}
+
+// HighestSupportedVersion returns the highest supported version
+// This function assumes that the highest supported version must be v1.x.
+func HighestSupportedVersion(versions []string) (*Version, error) {
+ if len(versions) == 0 {
+ return nil, errors.New("empty array for supported versions")
+ }
+
+ var (
+ highestSupportedVersion *Version
+ theErr error
+ )
+
+ for i := len(versions) - 1; i >= 0; i-- {
+ currentHighestVer, err := ParseGeneric(versions[i])
+ if err != nil {
+ theErr = err
+ continue
+ }
+
+ if currentHighestVer.Major() > 1 {
+ continue
+ }
+
+ if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) {
+ highestSupportedVersion = currentHighestVer
+ }
+ }
+
+ if highestSupportedVersion == nil {
+ return nil, fmt.Errorf(
+ "could not find a highest supported version from versions (%v) reported: %+v",
+ versions, theErr)
+ }
+
+ if highestSupportedVersion.Major() != 1 {
+ return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion)
+ }
+
+ return highestSupportedVersion, nil
+}
+
+// ParseGeneric parses a "generic" version string. The version string must consist of two
+// or more dot-separated numeric fields (the first of which can't have leading zeroes),
+// followed by arbitrary uninterpreted data (which need not be separated from the final
+// numeric field by punctuation). For convenience, leading and trailing whitespace is
+// ignored, and the version can be preceded by the letter "v". See also ParseSemantic.
+func ParseGeneric(str string) (*Version, error) {
+ return parse(str, false)
+}
+
+// MustParseGeneric is like ParseGeneric except that it panics on error
+func MustParseGeneric(str string) *Version {
+ v, err := ParseGeneric(str)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Parse tries to do ParseSemantic first to keep more information.
+// If ParseSemantic fails, it would just do ParseGeneric.
+func Parse(str string) (*Version, error) {
+ v, err := parse(str, true)
+ if err != nil {
+ return parse(str, false)
+ }
+ return v, err
+}
+
+// MustParse is like Parse except that it panics on error
+func MustParse(str string) *Version {
+ v, err := Parse(str)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version.
+func ParseMajorMinor(str string) (*Version, error) {
+ v, err := ParseGeneric(str)
+ if err != nil {
+ return nil, err
+ }
+ return MajorMinor(v.Major(), v.Minor()), nil
+}
+
+// MustParseMajorMinor is like ParseMajorMinor except that it panics on error
+func MustParseMajorMinor(str string) *Version {
+ v, err := ParseMajorMinor(str)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// ParseSemantic parses a version string that exactly obeys the syntax and semantics of
+// the "Semantic Versioning" specification (http://semver.org/) (although it ignores
+// leading and trailing whitespace, and allows the version to be preceded by "v"). For
+// version strings that are not guaranteed to obey the Semantic Versioning syntax, use
+// ParseGeneric.
+func ParseSemantic(str string) (*Version, error) {
+ return parse(str, true)
+}
+
+// MustParseSemantic is like ParseSemantic except that it panics on error
+func MustParseSemantic(str string) *Version {
+ v, err := ParseSemantic(str)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// MajorMinor returns a version with the provided major and minor version.
+func MajorMinor(major, minor uint) *Version {
+ return &Version{components: []uint{major, minor}}
+}
+
+// Major returns the major release number
+func (v *Version) Major() uint {
+ return v.components[0]
+}
+
+// Minor returns the minor release number
+func (v *Version) Minor() uint {
+ return v.components[1]
+}
+
+// Patch returns the patch release number if v is a Semantic Version, or 0
+func (v *Version) Patch() uint {
+ if len(v.components) < 3 {
+ return 0
+ }
+ return v.components[2]
+}
+
+// BuildMetadata returns the build metadata, if v is a Semantic Version, or ""
+func (v *Version) BuildMetadata() string {
+ return v.buildMetadata
+}
+
+// PreRelease returns the prerelease metadata, if v is a Semantic Version, or ""
+func (v *Version) PreRelease() string {
+ return v.preRelease
+}
+
+// Components returns the version number components
+func (v *Version) Components() []uint {
+ return v.components
+}
+
+// WithMajor returns copy of the version object with requested major number
+func (v *Version) WithMajor(major uint) *Version {
+ result := *v
+ result.components = []uint{major, v.Minor(), v.Patch()}
+ return &result
+}
+
+// WithMinor returns copy of the version object with requested minor number
+func (v *Version) WithMinor(minor uint) *Version {
+ result := *v
+ result.components = []uint{v.Major(), minor, v.Patch()}
+ return &result
+}
+
+// SubtractMinor returns the version with offset from the original minor, with the same major and no patch.
+// If -offset >= current minor, the minor would be 0.
+func (v *Version) OffsetMinor(offset int) *Version {
+ var minor uint
+ if offset >= 0 {
+ minor = v.Minor() + uint(offset)
+ } else {
+ diff := uint(-offset)
+ if diff < v.Minor() {
+ minor = v.Minor() - diff
+ }
+ }
+ return MajorMinor(v.Major(), minor)
+}
+
+// SubtractMinor returns the version diff minor versions back, with the same major and no patch.
+// If diff >= current minor, the minor would be 0.
+func (v *Version) SubtractMinor(diff uint) *Version {
+ return v.OffsetMinor(-int(diff))
+}
+
+// AddMinor returns the version diff minor versions forward, with the same major and no patch.
+func (v *Version) AddMinor(diff uint) *Version {
+ return v.OffsetMinor(int(diff))
+}
+
+// WithPatch returns copy of the version object with requested patch number
+func (v *Version) WithPatch(patch uint) *Version {
+ result := *v
+ result.components = []uint{v.Major(), v.Minor(), patch}
+ return &result
+}
+
+// WithPreRelease returns copy of the version object with requested prerelease
+func (v *Version) WithPreRelease(preRelease string) *Version {
+ if len(preRelease) == 0 {
+ return v
+ }
+ result := *v
+ result.components = []uint{v.Major(), v.Minor(), v.Patch()}
+ result.preRelease = preRelease
+ return &result
+}
+
+// WithBuildMetadata returns copy of the version object with requested buildMetadata
+func (v *Version) WithBuildMetadata(buildMetadata string) *Version {
+ result := *v
+ result.components = []uint{v.Major(), v.Minor(), v.Patch()}
+ result.buildMetadata = buildMetadata
+ return &result
+}
+
+// String converts a Version back to a string; note that for versions parsed with
+// ParseGeneric, this will not include the trailing uninterpreted portion of the version
+// number.
+func (v *Version) String() string {
+ if v == nil {
+ return ""
+ }
+ var buffer bytes.Buffer
+
+ for i, comp := range v.components {
+ if i > 0 {
+ buffer.WriteString(".")
+ }
+ buffer.WriteString(fmt.Sprintf("%d", comp))
+ }
+ if v.preRelease != "" {
+ buffer.WriteString("-")
+ buffer.WriteString(v.preRelease)
+ }
+ if v.buildMetadata != "" {
+ buffer.WriteString("+")
+ buffer.WriteString(v.buildMetadata)
+ }
+
+ return buffer.String()
+}
+
+// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0
+// if they are equal
+func (v *Version) compareInternal(other *Version) int {
+
+ vLen := len(v.components)
+ oLen := len(other.components)
+ for i := 0; i < vLen && i < oLen; i++ {
+ switch {
+ case other.components[i] < v.components[i]:
+ return 1
+ case other.components[i] > v.components[i]:
+ return -1
+ }
+ }
+
+ // If components are common but one has more items and they are not zeros, it is bigger
+ switch {
+ case oLen < vLen && !onlyZeros(v.components[oLen:]):
+ return 1
+ case oLen > vLen && !onlyZeros(other.components[vLen:]):
+ return -1
+ }
+
+ if !v.semver || !other.semver {
+ return 0
+ }
+
+ switch {
+ case v.preRelease == "" && other.preRelease != "":
+ return 1
+ case v.preRelease != "" && other.preRelease == "":
+ return -1
+ case v.preRelease == other.preRelease: // includes case where both are ""
+ return 0
+ }
+
+ vPR := strings.Split(v.preRelease, ".")
+ oPR := strings.Split(other.preRelease, ".")
+ for i := 0; i < len(vPR) && i < len(oPR); i++ {
+ vNum, err := strconv.ParseUint(vPR[i], 10, 0)
+ if err == nil {
+ oNum, err := strconv.ParseUint(oPR[i], 10, 0)
+ if err == nil {
+ switch {
+ case oNum < vNum:
+ return 1
+ case oNum > vNum:
+ return -1
+ default:
+ continue
+ }
+ }
+ }
+ if oPR[i] < vPR[i] {
+ return 1
+ } else if oPR[i] > vPR[i] {
+ return -1
+ }
+ }
+
+ switch {
+ case len(oPR) < len(vPR):
+ return 1
+ case len(oPR) > len(vPR):
+ return -1
+ }
+
+ return 0
+}
+
+// returns false if array contain any non-zero element
+func onlyZeros(array []uint) bool {
+ for _, num := range array {
+ if num != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualTo tests if a version is equal to a given version.
+func (v *Version) EqualTo(other *Version) bool {
+ if v == nil {
+ return other == nil
+ }
+ if other == nil {
+ return false
+ }
+ return v.compareInternal(other) == 0
+}
+
+// AtLeast tests if a version is at least equal to a given minimum version. If both
+// Versions are Semantic Versions, this will use the Semantic Version comparison
+// algorithm. Otherwise, it will compare only the numeric components, with non-present
+// components being considered "0" (ie, "1.4" is equal to "1.4.0").
+func (v *Version) AtLeast(min *Version) bool {
+ return v.compareInternal(min) != -1
+}
+
+// LessThan tests if a version is less than a given version. (It is exactly the opposite
+// of AtLeast, for situations where asking "is v too old?" makes more sense than asking
+// "is v new enough?".)
+func (v *Version) LessThan(other *Version) bool {
+ return v.compareInternal(other) == -1
+}
+
+// GreaterThan tests if a version is greater than a given version.
+func (v *Version) GreaterThan(other *Version) bool {
+ return v.compareInternal(other) == 1
+}
+
+// Compare compares v against a version string (which will be parsed as either Semantic
+// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if
+// it is greater than other, or 0 if they are equal.
+func (v *Version) Compare(other string) (int, error) {
+ ov, err := parse(other, v.semver)
+ if err != nil {
+ return 0, err
+ }
+ return v.compareInternal(ov), nil
+}
+
+// WithInfo returns copy of the version object.
+// Deprecated: The Info field has been removed from the Version struct. This method no longer modifies the Version object.
+func (v *Version) WithInfo(info apimachineryversion.Info) *Version {
+ result := *v
+ return &result
+}
+
+// Info returns the version information of a component.
+// Deprecated: Use Info() from effective version instead.
+func (v *Version) Info() *apimachineryversion.Info {
+ if v == nil {
+ return nil
+ }
+ // in case info is empty, or the major and minor in info is different from the actual major and minor
+ return &apimachineryversion.Info{
+ Major: Itoa(v.Major()),
+ Minor: Itoa(v.Minor()),
+ GitVersion: v.String(),
+ }
+}
+
+func Itoa(i uint) string {
+ if i == 0 {
+ return ""
+ }
+ return strconv.Itoa(int(i))
+}
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/version/doc.go b/operator/vendor/k8s.io/apimachinery/pkg/version/doc.go
index 5f446a4f..70e3f76b 100644
--- a/operator/vendor/k8s.io/apimachinery/pkg/version/doc.go
+++ b/operator/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -15,6 +15,8 @@ limitations under the License.
*/
// +k8s:openapi-gen=true
+// +k8s:openapi-model-package=io.k8s.apimachinery.pkg.version
+//
// Package version supplies the type for version information.
package version
diff --git a/operator/vendor/k8s.io/apimachinery/pkg/version/zz_generated.model_name.go b/operator/vendor/k8s.io/apimachinery/pkg/version/zz_generated.model_name.go
new file mode 100644
index 00000000..e5a6d395
--- /dev/null
+++ b/operator/vendor/k8s.io/apimachinery/pkg/version/zz_generated.model_name.go
@@ -0,0 +1,27 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package version
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Info) OpenAPIModelName() string {
+ return "io.k8s.apimachinery.pkg.version.Info"
+}
diff --git a/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go b/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go
index ed47e999..04712fb4 100644
--- a/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go
+++ b/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/command_headers.go
@@ -19,6 +19,7 @@ package genericclioptions
import (
"net/http"
"strings"
+ "sync/atomic"
"github.com/google/uuid"
"github.com/spf13/cobra"
@@ -33,8 +34,9 @@ const (
// round tripper to add Request headers before delegation. Implements
// the go standard library "http.RoundTripper" interface.
type CommandHeaderRoundTripper struct {
- Delegate http.RoundTripper
- Headers map[string]string
+ Delegate http.RoundTripper
+ Headers map[string]string
+ SkipHeaders *atomic.Bool
}
// CommandHeaderRoundTripper adds Request headers before delegating to standard
@@ -43,9 +45,14 @@ type CommandHeaderRoundTripper struct {
//
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-cli/859-kubectl-headers
func (c *CommandHeaderRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ if c.shouldSkipHeaders() {
+ return c.Delegate.RoundTrip(req)
+ }
+
for header, value := range c.Headers {
req.Header.Set(header, value)
}
+
return c.Delegate.RoundTrip(req)
}
@@ -92,3 +99,11 @@ func (c *CommandHeaderRoundTripper) CancelRequest(req *http.Request) {
cr.CancelRequest(req)
}
}
+
+func (c *CommandHeaderRoundTripper) shouldSkipHeaders() bool {
+ if c.SkipHeaders == nil {
+ return false
+ }
+
+ return c.SkipHeaders.Load()
+}
diff --git a/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
index 8dba84e3..350c7bee 100644
--- a/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
+++ b/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
@@ -39,25 +39,26 @@ import (
)
const (
- flagClusterName = "cluster"
- flagAuthInfoName = "user"
- flagContext = "context"
- flagNamespace = "namespace"
- flagAPIServer = "server"
- flagTLSServerName = "tls-server-name"
- flagInsecure = "insecure-skip-tls-verify"
- flagCertFile = "client-certificate"
- flagKeyFile = "client-key"
- flagCAFile = "certificate-authority"
- flagBearerToken = "token"
- flagImpersonate = "as"
- flagImpersonateUID = "as-uid"
- flagImpersonateGroup = "as-group"
- flagUsername = "username"
- flagPassword = "password"
- flagTimeout = "request-timeout"
- flagCacheDir = "cache-dir"
- flagDisableCompression = "disable-compression"
+ flagClusterName = "cluster"
+ flagAuthInfoName = "user"
+ flagContext = "context"
+ flagNamespace = "namespace"
+ flagAPIServer = "server"
+ flagTLSServerName = "tls-server-name"
+ flagInsecure = "insecure-skip-tls-verify"
+ flagCertFile = "client-certificate"
+ flagKeyFile = "client-key"
+ flagCAFile = "certificate-authority"
+ flagBearerToken = "token"
+ flagImpersonate = "as"
+ flagImpersonateUID = "as-uid"
+ flagImpersonateGroup = "as-group"
+ flagImpersonateUserExtra = "as-user-extra"
+ flagUsername = "username"
+ flagPassword = "password"
+ flagTimeout = "request-timeout"
+ flagCacheDir = "cache-dir"
+ flagDisableCompression = "disable-compression"
)
// RESTClientGetter is an interface that the ConfigFlags describe to provide an easier way to mock for commands
@@ -83,24 +84,25 @@ type ConfigFlags struct {
KubeConfig *string
// config flags
- ClusterName *string
- AuthInfoName *string
- Context *string
- Namespace *string
- APIServer *string
- TLSServerName *string
- Insecure *bool
- CertFile *string
- KeyFile *string
- CAFile *string
- BearerToken *string
- Impersonate *string
- ImpersonateUID *string
- ImpersonateGroup *[]string
- Username *string
- Password *string
- Timeout *string
- DisableCompression *bool
+ ClusterName *string
+ AuthInfoName *string
+ Context *string
+ Namespace *string
+ APIServer *string
+ TLSServerName *string
+ Insecure *bool
+ CertFile *string
+ KeyFile *string
+ CAFile *string
+ BearerToken *string
+ Impersonate *string
+ ImpersonateUID *string
+ ImpersonateGroup *[]string
+ ImpersonateUserExtra *[]string
+ Username *string
+ Password *string
+ Timeout *string
+ DisableCompression *bool
// If non-nil, wrap config function can transform the Config
// before it is returned in ToRESTConfig function.
WrapConfigFn func(*rest.Config) *rest.Config
@@ -170,16 +172,32 @@ func (f *ConfigFlags) toRawKubeConfigLoader() clientcmd.ClientConfig {
// bind auth info flag values to overrides
if f.CertFile != nil {
overrides.AuthInfo.ClientCertificate = *f.CertFile
+ overrides.AuthInfo.ClientCertificateData = nil
}
if f.KeyFile != nil {
overrides.AuthInfo.ClientKey = *f.KeyFile
+ overrides.AuthInfo.ClientKeyData = nil
}
if f.BearerToken != nil {
overrides.AuthInfo.Token = *f.BearerToken
+ overrides.AuthInfo.TokenFile = ""
}
if f.Impersonate != nil {
overrides.AuthInfo.Impersonate = *f.Impersonate
}
+ if f.ImpersonateUserExtra != nil && len(*f.ImpersonateUserExtra) > 0 {
+ userExtras := make(map[string][]string)
+ for _, extra := range *f.ImpersonateUserExtra {
+ parts := strings.SplitN(extra, "=", 2)
+ if len(parts) != 2 {
+ continue
+ }
+ key := parts[0]
+ value := parts[1]
+ userExtras[key] = append(userExtras[key], value)
+ }
+ overrides.AuthInfo.ImpersonateUserExtra = userExtras
+ }
if f.ImpersonateUID != nil {
overrides.AuthInfo.ImpersonateUID = *f.ImpersonateUID
}
@@ -373,6 +391,9 @@ func (f *ConfigFlags) AddFlags(flags *pflag.FlagSet) {
if f.ImpersonateGroup != nil {
flags.StringArrayVar(f.ImpersonateGroup, flagImpersonateGroup, *f.ImpersonateGroup, "Group to impersonate for the operation, this flag can be repeated to specify multiple groups.")
}
+ if f.ImpersonateUserExtra != nil {
+ flags.StringArrayVar(f.ImpersonateUserExtra, flagImpersonateUserExtra, *f.ImpersonateUserExtra, "User extras to impersonate for the operation, this flag can be repeated to specify multiple values for the same key.")
+ }
if f.Username != nil {
flags.StringVar(f.Username, flagUsername, *f.Username, "Username for basic authentication to the API server")
}
@@ -446,6 +467,7 @@ func (f *ConfigFlags) WithWarningPrinter(ioStreams genericiooptions.IOStreams) *
// NewConfigFlags returns ConfigFlags with default values set
func NewConfigFlags(usePersistentConfig bool) *ConfigFlags {
impersonateGroup := []string{}
+ impersonateUserExtra := []string{}
insecure := false
disableCompression := false
@@ -454,21 +476,22 @@ func NewConfigFlags(usePersistentConfig bool) *ConfigFlags {
Timeout: ptr.To("0"),
KubeConfig: ptr.To(""),
- CacheDir: ptr.To(getDefaultCacheDir()),
- ClusterName: ptr.To(""),
- AuthInfoName: ptr.To(""),
- Context: ptr.To(""),
- Namespace: ptr.To(""),
- APIServer: ptr.To(""),
- TLSServerName: ptr.To(""),
- CertFile: ptr.To(""),
- KeyFile: ptr.To(""),
- CAFile: ptr.To(""),
- BearerToken: ptr.To(""),
- Impersonate: ptr.To(""),
- ImpersonateUID: ptr.To(""),
- ImpersonateGroup: &impersonateGroup,
- DisableCompression: &disableCompression,
+ CacheDir: ptr.To(getDefaultCacheDir()),
+ ClusterName: ptr.To(""),
+ AuthInfoName: ptr.To(""),
+ Context: ptr.To(""),
+ Namespace: ptr.To(""),
+ APIServer: ptr.To(""),
+ TLSServerName: ptr.To(""),
+ CertFile: ptr.To(""),
+ KeyFile: ptr.To(""),
+ CAFile: ptr.To(""),
+ BearerToken: ptr.To(""),
+ Impersonate: ptr.To(""),
+ ImpersonateUID: ptr.To(""),
+ ImpersonateGroup: &impersonateGroup,
+ ImpersonateUserExtra: &impersonateUserExtra,
+ DisableCompression: &disableCompression,
usePersistentConfig: usePersistentConfig,
// The more groups you have, the more discovery requests you need to make.
diff --git a/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go b/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go
index a2eeb1e2..98284509 100644
--- a/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go
+++ b/operator/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go
@@ -33,7 +33,7 @@ func (f *JSONYamlPrintFlags) AllowedFormats() []string {
}
formats := []string{"json", "yaml"}
// We can't use the cmdutil pkg directly because of import cycle.
- if strings.ToLower(os.Getenv("KUBECTL_KYAML")) == "true" {
+ if strings.ToLower(os.Getenv("KUBECTL_KYAML")) != "false" {
formats = append(formats, "kyaml")
}
return formats
diff --git a/operator/vendor/k8s.io/cli-runtime/pkg/resource/builder.go b/operator/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
index 00bd3701..37e83085 100644
--- a/operator/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
+++ b/operator/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
@@ -129,6 +129,10 @@ Example resource specifications include:
var StdinMultiUseError = errors.New("standard input cannot be used for multiple arguments")
+// ErrMultipleResourceTypes is returned when Builder.SingleResourceType() was called,
+// but multiple resource types were specified.
+var ErrMultipleResourceTypes = errors.New("you may only specify a single resource type")
+
// TODO: expand this to include other errors.
func IsUsageError(err error) bool {
if err == nil {
@@ -813,7 +817,7 @@ func (b *Builder) mappingFor(resourceOrKindArg string) (*meta.RESTMapping, error
func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) {
if len(b.resources) > 1 && b.singleResourceType {
- return nil, fmt.Errorf("you may only specify a single resource type")
+ return nil, ErrMultipleResourceTypes
}
mappings := []*meta.RESTMapping{}
seen := map[schema.GroupVersionKind]bool{}
@@ -849,7 +853,7 @@ func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error)
canonical[mapping.Resource] = struct{}{}
}
if len(canonical) > 1 && b.singleResourceType {
- return nil, fmt.Errorf("you may only specify a single resource type")
+ return nil, ErrMultipleResourceTypes
}
return mappings, nil
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go
index 0d50d44a..34f9e830 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go
@@ -20,8 +20,40 @@ package v1
// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use
// with apply.
+//
+// AuditAnnotation describes how to produce an audit annotation for an API request.
type AuditAnnotationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // key specifies the audit annotation key. The audit annotation keys of
+ // a ValidatingAdmissionPolicy must be unique. The key must be a qualified
+ // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
+ //
+ // The key is combined with the resource name of the
+ // ValidatingAdmissionPolicy to construct an audit annotation key:
+ // "{ValidatingAdmissionPolicy name}/{key}".
+ //
+ // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
+ // and the same audit annotation key, the annotation key will be identical.
+ // In this case, the first annotation written with the key will be included
+ // in the audit event and all subsequent annotations with the same key
+ // will be discarded.
+ //
+ // Required.
+ Key *string `json:"key,omitempty"`
+ // valueExpression represents the expression which is evaluated by CEL to
+ // produce an audit annotation value. The expression must evaluate to either
+ // a string or null value. If the expression evaluates to a string, the
+ // audit annotation is included with the string value. If the expression
+ // evaluates to null or empty string the audit annotation will be omitted.
+ // The valueExpression may be no longer than 5kb in length.
+ // If the result of the valueExpression is more than 10kb in length, it
+ // will be truncated to 10kb.
+ //
+ // If multiple ValidatingAdmissionPolicyBinding resources match an
+ // API request, then the valueExpression will be evaluated for
+ // each binding. All unique values produced by the valueExpressions
+ // will be joined together in a comma-separated list.
+ //
+ // Required.
ValueExpression *string `json:"valueExpression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go
index 1f890bcf..31219c7e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go
@@ -20,9 +20,17 @@ package v1
// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use
// with apply.
+//
+// ExpressionWarning is a warning information that targets a specific expression.
type ExpressionWarningApplyConfiguration struct {
+ // The path to the field that refers the expression.
+ // For example, the reference to the expression of the first item of
+ // validations is "spec.validations[0].expression"
FieldRef *string `json:"fieldRef,omitempty"`
- Warning *string `json:"warning,omitempty"`
+ // The content of type checking information in a human-readable form.
+ // Each line of the warning contains the type that the expression is checked
+ // against, followed by the type check error from the compiler.
+ Warning *string `json:"warning,omitempty"`
}
// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go
index d8a816f1..1e149da4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchcondition.go
@@ -20,8 +20,32 @@ package v1
// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use
// with apply.
+//
+// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.
type MatchConditionApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is an identifier for this match condition, used for strategic merging of MatchConditions,
+ // as well as providing an identifier for logging purposes. A good name should be descriptive of
+ // the associated expression.
+ // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and
+ // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or
+ // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an
+ // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')
+ //
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+ // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:
+ //
+ // 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // 'oldObject' - The existing object. The value is null for CREATE requests.
+ // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest).
+ // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+ //
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
index e840fe9e..cd1a3d5f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go
@@ -25,12 +25,88 @@ import (
// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
// with apply.
+//
+// MatchResources decides whether to run the admission control policy on an object based
+// on whether it meets the match criteria.
+// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
type MatchResourcesApplyConfiguration struct {
- NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
+ // NamespaceSelector decides whether to run the admission control policy on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the policy.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the policy on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the validation based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the cel validation, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
+ // The policy cares about an operation if it matches _any_ Rule.
+ ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
+ // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
+ // The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
- MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // matchPolicy defines how the "MatchResources" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
+ //
+ // Defaults to "Equivalent"
+ MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
}
// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go
index cd8096f9..498611b5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhook.go
@@ -25,19 +25,148 @@ import (
// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use
// with apply.
+//
+// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
type MutatingWebhookApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
- Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
- FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
- NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"`
- TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
- AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
- ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // The name of the admission webhook.
+ // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
+ // "imagepolicy" is the name of the webhook, and kubernetes.io is the name
+ // of the organization.
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // ClientConfig defines how to communicate with the hook.
+ // Required
+ ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
+ // Rules describes what operations on what resources/subresources the webhook cares about.
+ // The webhook cares about an operation if it matches _any_ Rule.
+ // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
+ // from putting the cluster in a state which cannot be recovered from without completely
+ // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
+ // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
+ Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
+ // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
+ // allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // matchPolicy defines how the "rules" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.
+ //
+ // Defaults to "Equivalent"
+ MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // NamespaceSelector decides whether to run the webhook on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the webhook.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the webhook on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the webhook based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the webhook, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // SideEffects states whether this webhook has side effects.
+ // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown).
+ // Webhooks with side effects MUST implement a reconciliation system, since a request may be
+ // rejected by a future step in the admission chain and the side effects therefore need to be undone.
+ // Requests with the dryRun attribute will be auto-rejected if they match a webhook with
+ // sideEffects == Unknown or Some.
+ SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"`
+ // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
+ // the webhook call will be ignored or the API call will fail based on the
+ // failure policy.
+ // The timeout value must be between 1 and 30 seconds.
+ // Default to 10 seconds.
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
+ // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
+ // versions the Webhook expects. API server will try to use first version in
+ // the list which it supports. If none of the versions specified in this list
+ // supported by API server, validation will fail for this object.
+ // If a persisted webhook configuration specifies allowed versions and does not
+ // include any versions known to the API Server, calls to the webhook will fail
+ // and be subject to the failure policy.
+ AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
+ // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: the webhook will not be called more than once in a single admission evaluation.
+ //
+ // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation
+ // if the object being admitted is modified by other admission plugins after the initial webhook call.
+ // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted.
+ // Note:
+ // * the number of additional invocations is not guaranteed to be exactly one.
+ // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again.
+ // * webhooks that use this option may be reordered to minimize the number of additional invocations.
+ // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.
+ //
+ // Defaults to "Never".
+ ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be sent to this
+ // webhook. Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the webhook is called.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
}
// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
index 9a12eba0..87909f25 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
@@ -29,10 +29,14 @@ import (
// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use
// with apply.
+//
+// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
type MutatingWebhookConfigurationApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
+ // Webhooks is a list of webhooks and the affected resources and operations.
+ Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
}
// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with
@@ -45,29 +49,14 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
return b
}
-// ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
-// mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a
-// MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractMutatingWebhookConfigurationFrom extracts the applied configuration owned by fieldManager from
+// mutatingWebhookConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
-// ExtractMutatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractMutatingWebhookConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
- return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "")
-}
-
-// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
- return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status")
-}
-
-func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractMutatingWebhookConfigurationFrom(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
b := &MutatingWebhookConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +68,21 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admission
b.WithAPIVersion("admissionregistration.k8s.io/v1")
return b, nil
}
+
+// ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
+// mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a
+// MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractMutatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+ return ExtractMutatingWebhookConfigurationFrom(mutatingWebhookConfiguration, fieldManager, "")
+}
+
func (b MutatingWebhookConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
index dd31981a..fe7a6b8c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go
@@ -24,8 +24,12 @@ import (
// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use
// with apply.
+//
+// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
type NamedRuleWithOperationsApplyConfiguration struct {
- ResourceNames []string `json:"resourceNames,omitempty"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ // RuleWithOperations is a tuple of Operations and Resources.
RuleWithOperationsApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go
index 07577929..5c6729fc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go
@@ -20,9 +20,16 @@ package v1
// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use
// with apply.
+//
+// ParamKind is a tuple of Group Kind and Version.
type ParamKindApplyConfiguration struct {
+ // APIVersion is the API group version the resources belong to.
+ // In format of "group/version".
+ // Required.
APIVersion *string `json:"apiVersion,omitempty"`
- Kind *string `json:"kind,omitempty"`
+ // Kind is the API kind the resources belong to.
+ // Required.
+ Kind *string `json:"kind,omitempty"`
}
// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
index 140233f6..eb9f6c47 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go
@@ -25,10 +25,53 @@ import (
// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
// with apply.
+//
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
type ParamRefApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // name is the name of the resource being referenced.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // A single parameter used for all admission requests can be configured
+ // by setting the `name` field, leaving `selector` blank, and setting namespace
+ // if `paramKind` is namespace-scoped.
+ Name *string `json:"name,omitempty"`
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ Namespace *string `json:"namespace,omitempty"`
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ //
+ // Required
ParameterNotFoundAction *admissionregistrationv1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
index a8c68136..056e944f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rule.go
@@ -24,11 +24,43 @@ import (
// RuleApplyConfiguration represents a declarative configuration of the Rule type for use
// with apply.
+//
+// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
+// to make sure that all the tuple expansions are valid.
type RuleApplyConfiguration struct {
- APIGroups []string `json:"apiGroups,omitempty"`
- APIVersions []string `json:"apiVersions,omitempty"`
- Resources []string `json:"resources,omitempty"`
- Scope *admissionregistrationv1.ScopeType `json:"scope,omitempty"`
+ // APIGroups is the API groups the resources belong to. '*' is all groups.
+ // If '*' is present, the length of the slice must be one.
+ // Required.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // APIVersions is the API versions the resources belong to. '*' is all versions.
+ // If '*' is present, the length of the slice must be one.
+ // Required.
+ APIVersions []string `json:"apiVersions,omitempty"`
+ // Resources is a list of resources this rule applies to.
+ //
+ // For example:
+ // 'pods' means pods.
+ // 'pods/log' means the log subresource of pods.
+ // '*' means all resources, but not subresources.
+ // 'pods/*' means all subresources of pods.
+ // '*/scale' means all scale subresources.
+ // '*/*' means all resources and their subresources.
+ //
+ // If wildcard is present, the validation rule will ensure resources do not
+ // overlap with each other.
+ //
+ // Depending on the enclosing object, subresources might not be allowed.
+ // Required.
+ Resources []string `json:"resources,omitempty"`
+ // scope specifies the scope of this rule.
+ // Valid values are "Cluster", "Namespaced", and "*"
+ // "Cluster" means that only cluster-scoped resources will match this rule.
+ // Namespace API objects are cluster-scoped.
+ // "Namespaced" means that only namespaced resources will match this rule.
+ // "*" means that there are no scope restrictions.
+ // Subresources match the scope of their parent resource.
+ // Default is "*".
+ Scope *admissionregistrationv1.ScopeType `json:"scope,omitempty"`
}
// RuleApplyConfiguration constructs a declarative configuration of the Rule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
index 55a985f9..a4a1643f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/rulewithoperations.go
@@ -24,8 +24,17 @@ import (
// RuleWithOperationsApplyConfiguration represents a declarative configuration of the RuleWithOperations type for use
// with apply.
+//
+// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
+// sure that all the tuple expansions are valid.
type RuleWithOperationsApplyConfiguration struct {
- Operations []admissionregistrationv1.OperationType `json:"operations,omitempty"`
+ // Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or *
+ // for all of those operations and any future admission operations that are added.
+ // If '*' is present, the length of the slice must be one.
+ // Required.
+ Operations []admissionregistrationv1.OperationType `json:"operations,omitempty"`
+ // Rule is embedded, it describes other criteria of the rule, like
+ // APIGroups, APIVersions, Resources, etc.
RuleApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go
index 23978066..c93b6115 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/servicereference.go
@@ -20,11 +20,22 @@ package v1
// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use
// with apply.
+//
+// ServiceReference holds a reference to Service.legacy.k8s.io
type ServiceReferenceApplyConfiguration struct {
+ // `namespace` is the namespace of the service.
+ // Required
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
- Path *string `json:"path,omitempty"`
- Port *int32 `json:"port,omitempty"`
+ // `name` is the name of the service.
+ // Required
+ Name *string `json:"name,omitempty"`
+ // `path` is an optional URL path which will be sent in any request to
+ // this service.
+ Path *string `json:"path,omitempty"`
+ // If specified, the port on the service that hosting webhook.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ Port *int32 `json:"port,omitempty"`
}
// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go
index 723d10ec..f6077ee3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go
@@ -20,7 +20,11 @@ package v1
// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use
// with apply.
+//
+// TypeChecking contains results of type checking the expressions in the
+// ValidatingAdmissionPolicy
type TypeCheckingApplyConfiguration struct {
+ // The type checking warnings for each expression.
ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
index bbfc66a6..22d965fa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go
@@ -29,11 +29,19 @@ import (
// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
// with apply.
+//
+// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
type ValidatingAdmissionPolicyApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
- Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicy.
+ Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
+ // behaves in the expected way.
+ // Populated by the system.
+ // Read-only.
+ Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
}
// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
@@ -46,6 +54,26 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
return b
}
+// ExtractValidatingAdmissionPolicyFrom extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+ b := &ValidatingAdmissionPolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(validatingAdmissionPolicy.Name)
+
+ b.WithKind("ValidatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1")
+ return b, nil
+}
+
// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a
// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "")
+ return ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy, fieldManager, "")
}
-// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractValidatingAdmissionPolicyStatus extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy for the status subresource.
func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status")
+ return ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy, fieldManager, "status")
}
-func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- b := &ValidatingAdmissionPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(validatingAdmissionPolicy.Name)
-
- b.WithKind("ValidatingAdmissionPolicy")
- b.WithAPIVersion("admissionregistration.k8s.io/v1")
- return b, nil
-}
func (b ValidatingAdmissionPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
index 416d26cb..5a083a05 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go
@@ -29,10 +29,24 @@ import (
// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
// with apply.
+//
+// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
+// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
+ Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
}
// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
@@ -45,29 +59,14 @@ func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBin
return b
}
-// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
-// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
-// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractValidatingAdmissionPolicyBindingFrom extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
-// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractValidatingAdmissionPolicyBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "")
-}
-
-// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status")
-}
-
-func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicyBindingFrom(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +78,21 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a
b.WithAPIVersion("admissionregistration.k8s.io/v1")
return b, nil
}
+
+// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
+// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return ExtractValidatingAdmissionPolicyBindingFrom(validatingAdmissionPolicyBinding, fieldManager, "")
+}
+
func (b ValidatingAdmissionPolicyBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go
index eb426af4..31d98bb9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go
@@ -24,10 +24,63 @@ import (
// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
// with apply.
+//
+// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
- PolicyName *string `json:"policyName,omitempty"`
- ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
- MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName *string `json:"policyName,omitempty"`
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // MatchResources declares what resources match this binding and will be validated by it.
+ // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
+ // If this is unset, all resources matched by the policy are validated by this binding
+ // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
+ // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
+ // If a validation evaluates to false it is always enforced according to these actions.
+ //
+ // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
+ // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
+ // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
+ //
+ // validationActions is declared as a set of action values. Order does
+ // not matter. validationActions may not contain duplicates of the same action.
+ //
+ // The supported actions values are:
+ //
+ // "Deny" specifies that a validation failure results in a denied request.
+ //
+ // "Warn" specifies that a validation failure is reported to the request client
+ // in HTTP Warning headers, with a warning code of 299. Warnings can be sent
+ // both for allowed or denied admission responses.
+ //
+ // "Audit" specifies that a validation failure is included in the published
+ // audit event for the request. The audit event will contain a
+ // `validation.policy.admission.k8s.io/validation_failure` audit annotation
+ // with a value containing the details of the validation failures, formatted as
+ // a JSON list of objects, each with the following fields:
+ // - message: The validation failure message string
+ // - policy: The resource name of the ValidatingAdmissionPolicy
+ // - binding: The resource name of the ValidatingAdmissionPolicyBinding
+ // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
+ // - validationActions: The enforcement actions enacted for the validation failure
+ // Example audit annotation:
+ // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
+ //
+ // Clients should expect to handle additional values by ignoring
+ // any values not recognized.
+ //
+ // "Deny" and "Warn" may not be used together since this combination
+ // needlessly duplicates the validation failure both in the
+ // API response body and the HTTP warning headers.
+ //
+ // Required.
ValidationActions []admissionregistrationv1.ValidationAction `json:"validationActions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go
index 1635b30a..b3f6989a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go
@@ -24,14 +24,66 @@ import (
// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use
// with apply.
+//
+// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
type ValidatingAdmissionPolicySpecApplyConfiguration struct {
- ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
- MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
- Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
- FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
- AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
- Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+ // ParamKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ // MatchConstraints specifies what resources this policy is designed to validate.
+ // The AdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
+ // Required.
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ // Validations contain CEL expressions which is used to apply the validation.
+ // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
+ // required.
+ Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if spec.paramKind refers to a non-existent Kind.
+ // A binding is invalid if spec.paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
+ // define how failures are enforced.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // auditAnnotations contains CEL expressions which are used to produce audit
+ // annotations for the audit event of the API request.
+ // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
+ // required.
+ AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
}
// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go
index e6f4e845..0b3ea761 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go
@@ -24,10 +24,16 @@ import (
// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use
// with apply.
+//
+// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
- Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The generation observed by the controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The results of type checking for each expression.
+ // Presence of this field indicates the completion of the type checking.
+ TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
+ // The conditions represent the latest available observations of a policy's current state.
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go
index a2c705eb..02298017 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhook.go
@@ -25,18 +25,132 @@ import (
// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use
// with apply.
+//
+// ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
type ValidatingWebhookApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
- Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
- FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
- NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"`
- TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
- AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // The name of the admission webhook.
+ // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
+ // "imagepolicy" is the name of the webhook, and kubernetes.io is the name
+ // of the organization.
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // ClientConfig defines how to communicate with the hook.
+ // Required
+ ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
+ // Rules describes what operations on what resources/subresources the webhook cares about.
+ // The webhook cares about an operation if it matches _any_ Rule.
+ // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
+ // from putting the cluster in a state which cannot be recovered from without completely
+ // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
+ // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
+ Rules []RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
+ // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
+ // allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // matchPolicy defines how the "rules" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.
+ //
+ // Defaults to "Equivalent"
+ MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // NamespaceSelector decides whether to run the webhook on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the webhook.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the webhook on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the webhook based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the webhook, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // SideEffects states whether this webhook has side effects.
+ // Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown).
+ // Webhooks with side effects MUST implement a reconciliation system, since a request may be
+ // rejected by a future step in the admission chain and the side effects therefore need to be undone.
+ // Requests with the dryRun attribute will be auto-rejected if they match a webhook with
+ // sideEffects == Unknown or Some.
+ SideEffects *admissionregistrationv1.SideEffectClass `json:"sideEffects,omitempty"`
+ // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
+ // the webhook call will be ignored or the API call will fail based on the
+ // failure policy.
+ // The timeout value must be between 1 and 30 seconds.
+ // Default to 10 seconds.
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
+ // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
+ // versions the Webhook expects. API server will try to use first version in
+ // the list which it supports. If none of the versions specified in this list
+ // supported by API server, validation will fail for this object.
+ // If a persisted webhook configuration specifies allowed versions and does not
+ // include any versions known to the API Server, calls to the webhook will fail
+ // and be subject to the failure policy.
+ AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be sent to this
+ // webhook. Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the webhook is called.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
}
// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
index cfe2e328..fdf7923e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
@@ -29,10 +29,14 @@ import (
// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use
// with apply.
+//
+// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
type ValidatingWebhookConfigurationApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
+ // Webhooks is a list of webhooks and the affected resources and operations.
+ Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
}
// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with
@@ -45,29 +49,14 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
return b
}
-// ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
-// validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a
-// ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractValidatingWebhookConfigurationFrom extracts the applied configuration owned by fieldManager from
+// validatingWebhookConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
-// ExtractValidatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractValidatingWebhookConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
- return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "")
-}
-
-// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
- return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status")
-}
-
-func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractValidatingWebhookConfigurationFrom(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
b := &ValidatingWebhookConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +68,21 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admis
b.WithAPIVersion("admissionregistration.k8s.io/v1")
return b, nil
}
+
+// ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
+// validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a
+// ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractValidatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+ return ExtractValidatingWebhookConfigurationFrom(validatingWebhookConfiguration, fieldManager, "")
+}
+
func (b ValidatingWebhookConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
index 9966a7a2..2ad6756d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go
@@ -24,11 +24,77 @@ import (
// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
// with apply.
+//
+// Validation specifies the CEL expression which is used to apply the validation.
type ValidationApplyConfiguration struct {
- Expression *string `json:"expression,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *metav1.StatusReason `json:"reason,omitempty"`
- MessageExpression *string `json:"messageExpression,omitempty"`
+ // Expression represents the expression which will be evaluated by CEL.
+ // ref: https://github.com/google/cel-spec
+ // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Accessible property names are escaped according to the following rules when accessed in the expression:
+ // - '__' escapes to '__underscores__'
+ // - '.' escapes to '__dot__'
+ // - '-' escapes to '__dash__'
+ // - '/' escapes to '__slash__'
+ // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
+ // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
+ // "import", "let", "loop", "package", "namespace", "return".
+ // Examples:
+ // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
+ // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
+ // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
+ //
+ // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
+ // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
+ // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
+ // non-intersecting elements in `Y` are appended, retaining their partial order.
+ // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
+ // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
+ // non-intersecting keys are appended, retaining their partial order.
+ // Required.
+ Expression *string `json:"expression,omitempty"`
+ // Message represents the message displayed when validation fails. The message is required if the Expression contains
+ // line breaks. The message must not contain line breaks.
+ // If unset, the message is "failed rule: {Rule}".
+ // e.g. "must be a URL with the host matching spec.host"
+ // If the Expression contains line breaks. Message is required.
+ // The message must not contain line breaks.
+ // If unset, the message is "failed Expression: {Expression}".
+ Message *string `json:"message,omitempty"`
+ // Reason represents a machine-readable description of why this validation failed.
+ // If this is the first validation in the list to fail, this reason, as well as the
+ // corresponding HTTP response code, are used in the
+ // HTTP response to the client.
+ // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
+ // If not set, StatusReasonInvalid is used in the response to the client.
+ Reason *metav1.StatusReason `json:"reason,omitempty"`
+ // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
+ // Since messageExpression is used as a failure message, it must evaluate to a string.
+ // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
+ // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
+ // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
+ // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
+ // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
+ // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
+ // Example:
+ // "object.x must be less than max ("+string(params.max)+")"
+ MessageExpression *string `json:"messageExpression,omitempty"`
}
// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go
index 9dd20afa..a99f8379 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go
@@ -20,8 +20,15 @@ package v1
// VariableApplyConfiguration represents a declarative configuration of the Variable type for use
// with apply.
+//
+// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
type VariableApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ Name *string `json:"name,omitempty"`
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go
index 77f2227b..10f3d48d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/webhookclientconfig.go
@@ -20,10 +20,44 @@ package v1
// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use
// with apply.
+//
+// WebhookClientConfig contains the information to make a TLS
+// connection with the webhook
type WebhookClientConfigApplyConfiguration struct {
- URL *string `json:"url,omitempty"`
- Service *ServiceReferenceApplyConfiguration `json:"service,omitempty"`
- CABundle []byte `json:"caBundle,omitempty"`
+ // `url` gives the location of the webhook, in standard URL form
+ // (`scheme://host:port/path`). Exactly one of `url` or `service`
+ // must be specified.
+ //
+ // The `host` should not refer to a service running in the cluster; use
+ // the `service` field instead. The host might be resolved via external
+ // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
+ // in-cluster DNS as that would be a layering violation). `host` may
+ // also be an IP address.
+ //
+ // Please note that using `localhost` or `127.0.0.1` as a `host` is
+ // risky unless you take great care to run this webhook on all hosts
+ // which run an apiserver which might need to make calls to this
+ // webhook. Such installs are likely to be non-portable, i.e., not easy
+ // to turn up in a new cluster.
+ //
+ // The scheme must be "https"; the URL must begin with "https://".
+ //
+ // A path is optional, and if present may be any string permissible in
+ // a URL. You may use the path to pass an arbitrary string to the
+ // webhook, for example, a cluster identifier.
+ //
+ // Attempting to use a user or basic auth e.g. "user:password@" is not
+ // allowed. Fragments ("#...") and query parameters ("?...") are not
+ // allowed, either.
+ URL *string `json:"url,omitempty"`
+ // `service` is a reference to the service for this webhook. Either
+ // `service` or `url` must be specified.
+ //
+ // If the webhook is running within the cluster, then you should use `service`.
+ Service *ServiceReferenceApplyConfiguration `json:"service,omitempty"`
+ // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ CABundle []byte `json:"caBundle,omitempty"`
}
// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go
index b08ac722..cf0d177c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go
@@ -20,7 +20,49 @@ package v1alpha1
// ApplyConfigurationApplyConfiguration represents a declarative configuration of the ApplyConfiguration type for use
// with apply.
+//
+// ApplyConfiguration defines the desired configuration values of an object.
type ApplyConfigurationApplyConfiguration struct {
+ // expression will be evaluated by CEL to create an apply configuration.
+ // ref: https://github.com/google/cel-spec
+ //
+ // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+ // returns an apply configuration to set a single field:
+ //
+ // Object{
+ // spec: Object.spec{
+ // serviceAccountName: "example"
+ // }
+ // }
+ //
+ // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+ // values not included in the apply configuration.
+ //
+ // CEL expressions have access to the object types needed to create apply configurations:
+ //
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go
index 958a5374..62f29126 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/auditannotation.go
@@ -20,8 +20,40 @@ package v1alpha1
// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use
// with apply.
+//
+// AuditAnnotation describes how to produce an audit annotation for an API request.
type AuditAnnotationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // key specifies the audit annotation key. The audit annotation keys of
+ // a ValidatingAdmissionPolicy must be unique. The key must be a qualified
+ // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
+ //
+ // The key is combined with the resource name of the
+ // ValidatingAdmissionPolicy to construct an audit annotation key:
+ // "{ValidatingAdmissionPolicy name}/{key}".
+ //
+ // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
+ // and the same audit annotation key, the annotation key will be identical.
+ // In this case, the first annotation written with the key will be included
+ // in the audit event and all subsequent annotations with the same key
+ // will be discarded.
+ //
+ // Required.
+ Key *string `json:"key,omitempty"`
+ // valueExpression represents the expression which is evaluated by CEL to
+ // produce an audit annotation value. The expression must evaluate to either
+ // a string or null value. If the expression evaluates to a string, the
+ // audit annotation is included with the string value. If the expression
+ // evaluates to null or empty string the audit annotation will be omitted.
+ // The valueExpression may be no longer than 5kb in length.
+ // If the result of the valueExpression is more than 10kb in length, it
+ // will be truncated to 10kb.
+ //
+ // If multiple ValidatingAdmissionPolicyBinding resources match an
+ // API request, then the valueExpression will be evaluated for
+ // each binding. All unique values produced by the valueExpressions
+ // will be joined together in a comma-separated list.
+ //
+ // Required.
ValueExpression *string `json:"valueExpression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go
index f36c2f0f..220c5d2c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/expressionwarning.go
@@ -20,9 +20,17 @@ package v1alpha1
// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use
// with apply.
+//
+// ExpressionWarning is a warning information that targets a specific expression.
type ExpressionWarningApplyConfiguration struct {
+ // The path to the field that refers the expression.
+ // For example, the reference to the expression of the first item of
+ // validations is "spec.validations[0].expression"
FieldRef *string `json:"fieldRef,omitempty"`
- Warning *string `json:"warning,omitempty"`
+ // The content of type checking information in a human-readable form.
+ // Each line of the warning contains the type that the expression is checked
+ // against, followed by the type check error from the compiler.
+ Warning *string `json:"warning,omitempty"`
}
// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go
index 418d86a2..c3ad775d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go
@@ -20,7 +20,73 @@ package v1alpha1
// JSONPatchApplyConfiguration represents a declarative configuration of the JSONPatch type for use
// with apply.
+//
+// JSONPatch defines a JSON Patch.
type JSONPatchApplyConfiguration struct {
+ // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+ // ref: https://github.com/google/cel-spec
+ //
+ // expression must return an array of JSONPatch values.
+ //
+ // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+ //
+ // [
+ // JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+ // JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+ // ]
+ //
+ // To define an object for the patch value, use Object types. For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/spec/selector",
+ // value: Object.spec.selector{matchLabels: {"environment": "test"}}
+ // }
+ // ]
+ //
+ // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+ // value: "test"
+ // },
+ // ]
+ //
+ // CEL expressions have access to the types needed to create JSON patches and objects:
+ //
+ // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+ // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+ // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
+ // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+ // function may be used to escape path keys containing '/' and '~'.
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+ // as well as:
+ //
+ // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go
index 7f983dcb..7b758dbf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchcondition.go
@@ -21,7 +21,29 @@ package v1alpha1
// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use
// with apply.
type MatchConditionApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is an identifier for this match condition, used for strategic merging of MatchConditions,
+ // as well as providing an identifier for logging purposes. A good name should be descriptive of
+ // the associated expression.
+ // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and
+ // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or
+ // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an
+ // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')
+ //
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+ // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:
+ //
+ // 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // 'oldObject' - The existing object. The value is null for CREATE requests.
+ // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest).
+ // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+ //
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go
index e443535b..5bdeab09 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/matchresources.go
@@ -25,12 +25,89 @@ import (
// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
// with apply.
+//
+// MatchResources decides whether to run the admission control policy on an object based
+// on whether it meets the match criteria.
+// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
type MatchResourcesApplyConfiguration struct {
- NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
- ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
- MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // NamespaceSelector decides whether to run the admission control policy on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the policy.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the policy on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the policy based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the policy's expression (CEL), and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // ResourceRules describes what operations on what resources/subresources the admission policy matches.
+ // The policy cares about an operation if it matches _any_ Rule.
+ ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
+ // ExcludeResourceRules describes what operations on what resources/subresources the policy should not care about.
+ // The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
+ ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
+ // matchPolicy defines how the "MatchResources" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // the admission policy does not consider requests to apps/v1beta1 or extensions/v1beta1 API groups.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // the admission policy **does** consider requests made to apps/v1beta1 or extensions/v1beta1
+ // API groups. The API server translates the request to a matched resource API if necessary.
+ //
+ // Defaults to "Equivalent"
+ MatchPolicy *admissionregistrationv1alpha1.MatchPolicyType `json:"matchPolicy,omitempty"`
}
// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
index 041bec5e..be5b7912 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
@@ -29,10 +29,14 @@ import (
// MutatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicy type for use
// with apply.
+//
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
type MutatingAdmissionPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicy.
+ Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// MutatingAdmissionPolicy constructs a declarative configuration of the MutatingAdmissionPolicy type for use with
@@ -45,29 +49,14 @@ func MutatingAdmissionPolicy(name string) *MutatingAdmissionPolicyApplyConfigura
return b
}
-// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
-// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a
-// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractMutatingAdmissionPolicyFrom extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
-// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractMutatingAdmissionPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
- return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "")
-}
-
-// ExtractMutatingAdmissionPolicyStatus is the same as ExtractMutatingAdmissionPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractMutatingAdmissionPolicyStatus(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
- return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "status")
-}
-
-func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+func ExtractMutatingAdmissionPolicyFrom(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
b := &MutatingAdmissionPolicyApplyConfiguration{}
err := managedfields.ExtractInto(mutatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicy"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +68,21 @@ func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrati
b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a
+// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1alpha1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+ return ExtractMutatingAdmissionPolicyFrom(mutatingAdmissionPolicy, fieldManager, "")
+}
+
func (b MutatingAdmissionPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
index be0690a1..4964a310 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
@@ -29,10 +29,24 @@ import (
// MutatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBinding type for use
// with apply.
+//
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
type MutatingAdmissionPolicyBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+ Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
}
// MutatingAdmissionPolicyBinding constructs a declarative configuration of the MutatingAdmissionPolicyBinding type for use with
@@ -45,29 +59,14 @@ func MutatingAdmissionPolicyBinding(name string) *MutatingAdmissionPolicyBinding
return b
}
-// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
-// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a
-// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractMutatingAdmissionPolicyBindingFrom extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicyBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
-// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractMutatingAdmissionPolicyBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "")
-}
-
-// ExtractMutatingAdmissionPolicyBindingStatus is the same as ExtractMutatingAdmissionPolicyBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractMutatingAdmissionPolicyBindingStatus(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "status")
-}
-
-func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractMutatingAdmissionPolicyBindingFrom(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
b := &MutatingAdmissionPolicyBindingApplyConfiguration{}
err := managedfields.ExtractInto(mutatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.MutatingAdmissionPolicyBinding"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +78,21 @@ func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admis
b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a
+// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1alpha1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return ExtractMutatingAdmissionPolicyBindingFrom(mutatingAdmissionPolicyBinding, fieldManager, "")
+}
+
func (b MutatingAdmissionPolicyBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go
index 04729f42..b4f5570c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go
@@ -20,9 +20,27 @@ package v1alpha1
// MutatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use
// with apply.
+//
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
type MutatingAdmissionPolicyBindingSpecApplyConfiguration struct {
- PolicyName *string `json:"policyName,omitempty"`
- ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName *string `json:"policyName,omitempty"`
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // matchResources limits what resources match this binding and may be mutated by it.
+ // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+ // matchConditions before the resource may be mutated.
+ // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+ // and matchConditions must match for the resource to be mutated.
+ // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+ // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go
index 334056a3..0c67fd75 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go
@@ -25,14 +25,74 @@ import (
// MutatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicySpec type for use
// with apply.
+//
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
type MutatingAdmissionPolicySpecApplyConfiguration struct {
- ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
- MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
- Variables []VariableApplyConfiguration `json:"variables,omitempty"`
- Mutations []MutationApplyConfiguration `json:"mutations,omitempty"`
- FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
- ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
+ // paramKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ // matchConstraints specifies what resources this policy is designed to validate.
+ // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // Required.
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ // variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except matchConditions because matchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, variables must be sorted by the order of first appearance and acyclic.
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+ // mutations contain operations to perform on matching objects.
+ // mutations may not be empty; a minimum of one mutation is required.
+ // mutations are evaluated in order, and are reinvoked according to
+ // the reinvocationPolicy.
+ // The mutations of a policy are invoked for each binding of this policy
+ // and reinvocation of mutations occurs on a per binding basis.
+ Mutations []MutationApplyConfiguration `json:"mutations,omitempty"`
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if paramKind refers to a non-existent Kind.
+ // A binding is invalid if paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // matchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the matchConstraints.
+ // An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+ // as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+ //
+ // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+ // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
+ // reinvoked when mutations change the object after this mutation is invoked.
+ // Required.
+ ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
}
// MutatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go
index 4ed9d93f..001565db 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go
@@ -24,10 +24,21 @@ import (
// MutationApplyConfiguration represents a declarative configuration of the Mutation type for use
// with apply.
+//
+// Mutation specifies the CEL expression which is used to apply the Mutation.
type MutationApplyConfiguration struct {
- PatchType *admissionregistrationv1alpha1.PatchType `json:"patchType,omitempty"`
- ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"`
- JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"`
+ // patchType indicates the patch strategy used.
+ // Allowed values are "ApplyConfiguration" and "JSONPatch".
+ // Required.
+ PatchType *admissionregistrationv1alpha1.PatchType `json:"patchType,omitempty"`
+ // applyConfiguration defines the desired configuration values of an object.
+ // The configuration is applied to the admission object using
+ // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+ // A CEL expression is used to create apply configuration.
+ ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"`
+ // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+ // A CEL expression is used to create the JSON patch.
+ JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"`
}
// MutationApplyConfiguration constructs a declarative configuration of the Mutation type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
index f630224a..b9e309ed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/namedrulewithoperations.go
@@ -25,8 +25,12 @@ import (
// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use
// with apply.
+//
+// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
type NamedRuleWithOperationsApplyConfiguration struct {
- ResourceNames []string `json:"resourceNames,omitempty"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ // RuleWithOperations is a tuple of Operations and Resources.
v1.RuleWithOperationsApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go
index daf17fb2..7e008e32 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramkind.go
@@ -20,9 +20,16 @@ package v1alpha1
// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use
// with apply.
+//
+// ParamKind is a tuple of Group Kind and Version.
type ParamKindApplyConfiguration struct {
+ // APIVersion is the API group version the resources belong to.
+ // In format of "group/version".
+ // Required.
APIVersion *string `json:"apiVersion,omitempty"`
- Kind *string `json:"kind,omitempty"`
+ // Kind is the API kind the resources belong to.
+ // Required.
+ Kind *string `json:"kind,omitempty"`
}
// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
index 669fadbd..6b77cd0e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/paramref.go
@@ -25,10 +25,48 @@ import (
// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
// with apply.
+//
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
type ParamRefApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // `name` is the name of the resource being referenced.
+ //
+ // `name` and `selector` are mutually exclusive properties. If one is set,
+ // the other must be unset.
+ Name *string `json:"name,omitempty"`
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ Namespace *string `json:"namespace,omitempty"`
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ // Default to `Deny`
ParameterNotFoundAction *admissionregistrationv1alpha1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go
index d1a7fff5..3b5bff0f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/typechecking.go
@@ -20,7 +20,11 @@ package v1alpha1
// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use
// with apply.
+//
+// TypeChecking contains results of type checking the expressions in the
+// ValidatingAdmissionPolicy
type TypeCheckingApplyConfiguration struct {
+ // The type checking warnings for each expression.
ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
index a8efff6b..7f85ed8e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicy.go
@@ -29,11 +29,19 @@ import (
// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
// with apply.
+//
+// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
type ValidatingAdmissionPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
- Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicy.
+ Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
+ // behaves in the expected way.
+ // Populated by the system.
+ // Read-only.
+ Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
}
// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
@@ -46,6 +54,26 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
return b
}
+// ExtractValidatingAdmissionPolicyFrom extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+ b := &ValidatingAdmissionPolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(validatingAdmissionPolicy.Name)
+
+ b.WithKind("ValidatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
+ return b, nil
+}
+
// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a
// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "")
+ return ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy, fieldManager, "")
}
-// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractValidatingAdmissionPolicyStatus extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy for the status subresource.
func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status")
+ return ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy, fieldManager, "status")
}
-func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1alpha1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- b := &ValidatingAdmissionPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(validatingAdmissionPolicy.Name)
-
- b.WithKind("ValidatingAdmissionPolicy")
- b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
- return b, nil
-}
func (b ValidatingAdmissionPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
index 5bcefba6..0760e927 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
@@ -29,10 +29,24 @@ import (
// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
// with apply.
+//
+// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
+// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
+ Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
}
// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
@@ -45,29 +59,14 @@ func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBin
return b
}
-// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
-// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
-// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractValidatingAdmissionPolicyBindingFrom extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
-// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractValidatingAdmissionPolicyBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "")
-}
-
-// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status")
-}
-
-func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicyBindingFrom(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +78,21 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a
b.WithAPIVersion("admissionregistration.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
+// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return ExtractValidatingAdmissionPolicyBindingFrom(validatingAdmissionPolicyBinding, fieldManager, "")
+}
+
func (b ValidatingAdmissionPolicyBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go
index 0f8e4e43..bbbd59c9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicybindingspec.go
@@ -24,10 +24,63 @@ import (
// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
// with apply.
+//
+// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
- PolicyName *string `json:"policyName,omitempty"`
- ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
- MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName *string `json:"policyName,omitempty"`
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // MatchResources declares what resources match this binding and will be validated by it.
+ // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
+ // If this is unset, all resources matched by the policy are validated by this binding
+ // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
+ // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
+ // If a validation evaluates to false it is always enforced according to these actions.
+ //
+ // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
+ // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
+ // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
+ //
+ // validationActions is declared as a set of action values. Order does
+ // not matter. validationActions may not contain duplicates of the same action.
+ //
+ // The supported actions values are:
+ //
+ // "Deny" specifies that a validation failure results in a denied request.
+ //
+ // "Warn" specifies that a validation failure is reported to the request client
+ // in HTTP Warning headers, with a warning code of 299. Warnings can be sent
+ // both for allowed or denied admission responses.
+ //
+ // "Audit" specifies that a validation failure is included in the published
+ // audit event for the request. The audit event will contain a
+ // `validation.policy.admission.k8s.io/validation_failure` audit annotation
+ // with a value containing the details of the validation failures, formatted as
+ // a JSON list of objects, each with the following fields:
+ // - message: The validation failure message string
+ // - policy: The resource name of the ValidatingAdmissionPolicy
+ // - binding: The resource name of the ValidatingAdmissionPolicyBinding
+ // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
+ // - validationActions: The enforcement actions enacted for the validation failure
+ // Example audit annotation:
+ // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
+ //
+ // Clients should expect to handle additional values by ignoring
+ // any values not recognized.
+ //
+ // "Deny" and "Warn" may not be used together since this combination
+ // needlessly duplicates the validation failure both in the
+ // API response body and the HTTP warning headers.
+ //
+ // Required.
ValidationActions []admissionregistrationv1alpha1.ValidationAction `json:"validationActions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
index d5d35299..ba20af88 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicyspec.go
@@ -24,14 +24,66 @@ import (
// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use
// with apply.
+//
+// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
type ValidatingAdmissionPolicySpecApplyConfiguration struct {
- ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
- MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
- Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
- FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"`
- AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
- Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+ // ParamKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ // MatchConstraints specifies what resources this policy is designed to validate.
+ // The AdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
+ // Required.
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ // Validations contain CEL expressions which is used to apply the validation.
+ // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
+ // required.
+ Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if spec.paramKind refers to a non-existent Kind.
+ // A binding is invalid if spec.paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
+ // define how failures are enforced.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1alpha1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // auditAnnotations contains CEL expressions which are used to produce audit
+ // annotations for the audit event of the API request.
+ // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
+ // required.
+ AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
}
// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go
index 2fec5ba4..bd33b855 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validatingadmissionpolicystatus.go
@@ -24,10 +24,16 @@ import (
// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use
// with apply.
+//
+// ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.
type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The generation observed by the controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The results of type checking for each expression.
+ // Presence of this field indicates the completion of the type checking.
+ TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
+ // The conditions represent the latest available observations of a policy's current state.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go
index 5f730437..24416b21 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/validation.go
@@ -24,11 +24,77 @@ import (
// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
// with apply.
+//
+// Validation specifies the CEL expression which is used to apply the validation.
type ValidationApplyConfiguration struct {
- Expression *string `json:"expression,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *v1.StatusReason `json:"reason,omitempty"`
- MessageExpression *string `json:"messageExpression,omitempty"`
+ // Expression represents the expression which will be evaluated by CEL.
+ // ref: https://github.com/google/cel-spec
+ // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Accessible property names are escaped according to the following rules when accessed in the expression:
+ // - '__' escapes to '__underscores__'
+ // - '.' escapes to '__dot__'
+ // - '-' escapes to '__dash__'
+ // - '/' escapes to '__slash__'
+ // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
+ // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
+ // "import", "let", "loop", "package", "namespace", "return".
+ // Examples:
+ // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
+ // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
+ // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
+ //
+ // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
+ // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
+ // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
+ // non-intersecting elements in `Y` are appended, retaining their partial order.
+ // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
+ // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
+ // non-intersecting keys are appended, retaining their partial order.
+ // Required.
+ Expression *string `json:"expression,omitempty"`
+ // Message represents the message displayed when validation fails. The message is required if the Expression contains
+ // line breaks. The message must not contain line breaks.
+ // If unset, the message is "failed rule: {Rule}".
+ // e.g. "must be a URL with the host matching spec.host"
+ // If the Expression contains line breaks. Message is required.
+ // The message must not contain line breaks.
+ // If unset, the message is "failed Expression: {Expression}".
+ Message *string `json:"message,omitempty"`
+ // Reason represents a machine-readable description of why this validation failed.
+ // If this is the first validation in the list to fail, this reason, as well as the
+ // corresponding HTTP response code, are used in the
+ // HTTP response to the client.
+ // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
+ // If not set, StatusReasonInvalid is used in the response to the client.
+ Reason *v1.StatusReason `json:"reason,omitempty"`
+ // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
+ // Since messageExpression is used as a failure message, it must evaluate to a string.
+ // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
+ // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
+ // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
+ // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
+ // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
+ // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
+ // Example:
+ // "object.x must be less than max ("+string(params.max)+")"
+ MessageExpression *string `json:"messageExpression,omitempty"`
}
// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
index 0459dae6..df7e1c9d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/variable.go
@@ -20,8 +20,15 @@ package v1alpha1
// VariableApplyConfiguration represents a declarative configuration of the Variable type for use
// with apply.
+//
+// Variable is the definition of a variable that is used for composition.
type VariableApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ Name *string `json:"name,omitempty"`
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/applyconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/applyconfiguration.go
index af604a61..c245dd9a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/applyconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/applyconfiguration.go
@@ -20,7 +20,49 @@ package v1beta1
// ApplyConfigurationApplyConfiguration represents a declarative configuration of the ApplyConfiguration type for use
// with apply.
+//
+// ApplyConfiguration defines the desired configuration values of an object.
type ApplyConfigurationApplyConfiguration struct {
+ // expression will be evaluated by CEL to create an apply configuration.
+ // ref: https://github.com/google/cel-spec
+ //
+ // Apply configurations are declared in CEL using object initialization. For example, this CEL expression
+ // returns an apply configuration to set a single field:
+ //
+ // Object{
+ // spec: Object.spec{
+ // serviceAccountName: "example"
+ // }
+ // }
+ //
+ // Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
+ // values not included in the apply configuration.
+ //
+ // CEL expressions have access to the object types needed to create apply configurations:
+ //
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
index 8718db94..31cc4220 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/auditannotation.go
@@ -20,8 +20,40 @@ package v1beta1
// AuditAnnotationApplyConfiguration represents a declarative configuration of the AuditAnnotation type for use
// with apply.
+//
+// AuditAnnotation describes how to produce an audit annotation for an API request.
type AuditAnnotationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // key specifies the audit annotation key. The audit annotation keys of
+ // a ValidatingAdmissionPolicy must be unique. The key must be a qualified
+ // name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
+ //
+ // The key is combined with the resource name of the
+ // ValidatingAdmissionPolicy to construct an audit annotation key:
+ // "{ValidatingAdmissionPolicy name}/{key}".
+ //
+ // If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
+ // and the same audit annotation key, the annotation key will be identical.
+ // In this case, the first annotation written with the key will be included
+ // in the audit event and all subsequent annotations with the same key
+ // will be discarded.
+ //
+ // Required.
+ Key *string `json:"key,omitempty"`
+ // valueExpression represents the expression which is evaluated by CEL to
+ // produce an audit annotation value. The expression must evaluate to either
+ // a string or null value. If the expression evaluates to a string, the
+ // audit annotation is included with the string value. If the expression
+ // evaluates to null or empty string the audit annotation will be omitted.
+ // The valueExpression may be no longer than 5kb in length.
+ // If the result of the valueExpression is more than 10kb in length, it
+ // will be truncated to 10kb.
+ //
+ // If multiple ValidatingAdmissionPolicyBinding resources match an
+ // API request, then the valueExpression will be evaluated for
+ // each binding. All unique values produced by the valueExpressions
+ // will be joined together in a comma-separated list.
+ //
+ // Required.
ValueExpression *string `json:"valueExpression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
index 66cfc8cd..0b0235f6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/expressionwarning.go
@@ -20,9 +20,17 @@ package v1beta1
// ExpressionWarningApplyConfiguration represents a declarative configuration of the ExpressionWarning type for use
// with apply.
+//
+// ExpressionWarning is a warning information that targets a specific expression.
type ExpressionWarningApplyConfiguration struct {
+ // The path to the field that refers the expression.
+ // For example, the reference to the expression of the first item of
+ // validations is "spec.validations[0].expression"
FieldRef *string `json:"fieldRef,omitempty"`
- Warning *string `json:"warning,omitempty"`
+ // The content of type checking information in a human-readable form.
+ // Each line of the warning contains the type that the expression is checked
+ // against, followed by the type check error from the compiler.
+ Warning *string `json:"warning,omitempty"`
}
// ExpressionWarningApplyConfiguration constructs a declarative configuration of the ExpressionWarning type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/jsonpatch.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/jsonpatch.go
index ea6e644c..729279aa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/jsonpatch.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/jsonpatch.go
@@ -20,7 +20,73 @@ package v1beta1
// JSONPatchApplyConfiguration represents a declarative configuration of the JSONPatch type for use
// with apply.
+//
+// JSONPatch defines a JSON Patch.
type JSONPatchApplyConfiguration struct {
+ // expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
+ // ref: https://github.com/google/cel-spec
+ //
+ // expression must return an array of JSONPatch values.
+ //
+ // For example, this CEL expression returns a JSON patch to conditionally modify a value:
+ //
+ // [
+ // JSONPatch{op: "test", path: "/spec/example", value: "Red"},
+ // JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
+ // ]
+ //
+ // To define an object for the patch value, use Object types. For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/spec/selector",
+ // value: Object.spec.selector{matchLabels: {"environment": "test"}}
+ // }
+ // ]
+ //
+ // To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
+ //
+ // [
+ // JSONPatch{
+ // op: "add",
+ // path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
+ // value: "test"
+ // },
+ // ]
+ //
+ // CEL expressions have access to the types needed to create JSON patches and objects:
+ //
+ // - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
+ // See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
+ // integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
+ // [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
+ // function may be used to escape path keys containing '/' and '~'.
+ // - 'Object' - CEL type of the resource object.
+ // - 'Object.' - CEL type of object field (such as 'Object.spec')
+ // - 'Object.....` - CEL type of nested field (such as 'Object.spec.containers')
+ //
+ // CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
+ // as well as:
+ //
+ // - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go
index 63db7fc8..a5c4b433 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchcondition.go
@@ -20,8 +20,32 @@ package v1beta1
// MatchConditionApplyConfiguration represents a declarative configuration of the MatchCondition type for use
// with apply.
+//
+// MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.
type MatchConditionApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is an identifier for this match condition, used for strategic merging of MatchConditions,
+ // as well as providing an identifier for logging purposes. A good name should be descriptive of
+ // the associated expression.
+ // Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and
+ // must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or
+ // '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an
+ // optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')
+ //
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // Expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+ // CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:
+ //
+ // 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // 'oldObject' - The existing object. The value is null for CREATE requests.
+ // 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest).
+ // 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+ //
+ // Required.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
index 4005e55a..a74a65fd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/matchresources.go
@@ -25,12 +25,88 @@ import (
// MatchResourcesApplyConfiguration represents a declarative configuration of the MatchResources type for use
// with apply.
+//
+// MatchResources decides whether to run the admission control policy on an object based
+// on whether it meets the match criteria.
+// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
type MatchResourcesApplyConfiguration struct {
- NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
- ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
- MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // NamespaceSelector decides whether to run the admission control policy on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the policy.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the policy on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the validation based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the cel validation, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *v1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
+ // The policy cares about an operation if it matches _any_ Rule.
+ ResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"resourceRules,omitempty"`
+ // ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
+ // The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
+ ExcludeResourceRules []NamedRuleWithOperationsApplyConfiguration `json:"excludeResourceRules,omitempty"`
+ // matchPolicy defines how the "MatchResources" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
+ //
+ // Defaults to "Equivalent"
+ MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
}
// MatchResourcesApplyConfiguration constructs a declarative configuration of the MatchResources type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicy.go
index 41d30201..8812dee1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicy.go
@@ -29,10 +29,14 @@ import (
// MutatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicy type for use
// with apply.
+//
+// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
type MutatingAdmissionPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicy.
+ Spec *MutatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// MutatingAdmissionPolicy constructs a declarative configuration of the MutatingAdmissionPolicy type for use with
@@ -45,29 +49,14 @@ func MutatingAdmissionPolicy(name string) *MutatingAdmissionPolicyApplyConfigura
return b
}
-// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
-// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a
-// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractMutatingAdmissionPolicyFrom extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
-// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractMutatingAdmissionPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1beta1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
- return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "")
-}
-
-// ExtractMutatingAdmissionPolicyStatus is the same as ExtractMutatingAdmissionPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractMutatingAdmissionPolicyStatus(mutatingAdmissionPolicy *admissionregistrationv1beta1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
- return extractMutatingAdmissionPolicy(mutatingAdmissionPolicy, fieldManager, "status")
-}
-
-func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1beta1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+func ExtractMutatingAdmissionPolicyFrom(mutatingAdmissionPolicy *admissionregistrationv1beta1.MutatingAdmissionPolicy, fieldManager string, subresource string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
b := &MutatingAdmissionPolicyApplyConfiguration{}
err := managedfields.ExtractInto(mutatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicy"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +68,21 @@ func extractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrati
b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractMutatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicy. If no managedFields are found in mutatingAdmissionPolicy for fieldManager, a
+// MutatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingAdmissionPolicy must be a unmodified MutatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractMutatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractMutatingAdmissionPolicy(mutatingAdmissionPolicy *admissionregistrationv1beta1.MutatingAdmissionPolicy, fieldManager string) (*MutatingAdmissionPolicyApplyConfiguration, error) {
+ return ExtractMutatingAdmissionPolicyFrom(mutatingAdmissionPolicy, fieldManager, "")
+}
+
func (b MutatingAdmissionPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go
index 05ab5f6e..7ee4a731 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go
@@ -29,10 +29,24 @@ import (
// MutatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBinding type for use
// with apply.
+//
+// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
+// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
+// configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
+//
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
type MutatingAdmissionPolicyBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
+ Spec *MutatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
}
// MutatingAdmissionPolicyBinding constructs a declarative configuration of the MutatingAdmissionPolicyBinding type for use with
@@ -45,29 +59,14 @@ func MutatingAdmissionPolicyBinding(name string) *MutatingAdmissionPolicyBinding
return b
}
-// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
-// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a
-// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractMutatingAdmissionPolicyBindingFrom extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicyBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
-// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractMutatingAdmissionPolicyBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "")
-}
-
-// ExtractMutatingAdmissionPolicyBindingStatus is the same as ExtractMutatingAdmissionPolicyBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractMutatingAdmissionPolicyBindingStatus(mutatingAdmissionPolicyBinding *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding, fieldManager, "status")
-}
-
-func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractMutatingAdmissionPolicyBindingFrom(mutatingAdmissionPolicyBinding *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, fieldManager string, subresource string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
b := &MutatingAdmissionPolicyBindingApplyConfiguration{}
err := managedfields.ExtractInto(mutatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingAdmissionPolicyBinding"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +78,21 @@ func extractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admis
b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractMutatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// mutatingAdmissionPolicyBinding. If no managedFields are found in mutatingAdmissionPolicyBinding for fieldManager, a
+// MutatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingAdmissionPolicyBinding must be a unmodified MutatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractMutatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractMutatingAdmissionPolicyBinding(mutatingAdmissionPolicyBinding *admissionregistrationv1beta1.MutatingAdmissionPolicyBinding, fieldManager string) (*MutatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return ExtractMutatingAdmissionPolicyBindingFrom(mutatingAdmissionPolicyBinding, fieldManager, "")
+}
+
func (b MutatingAdmissionPolicyBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybindingspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybindingspec.go
index 6dead7cc..57932b92 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybindingspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicybindingspec.go
@@ -20,9 +20,27 @@ package v1beta1
// MutatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicyBindingSpec type for use
// with apply.
+//
+// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
type MutatingAdmissionPolicyBindingSpecApplyConfiguration struct {
- PolicyName *string `json:"policyName,omitempty"`
- ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName *string `json:"policyName,omitempty"`
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // matchResources limits what resources match this binding and may be mutated by it.
+ // Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
+ // matchConditions before the resource may be mutated.
+ // When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
+ // and matchConditions must match for the resource to be mutated.
+ // Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
+ // Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicyspec.go
index 629d4e36..6de745a4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingadmissionpolicyspec.go
@@ -25,14 +25,74 @@ import (
// MutatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the MutatingAdmissionPolicySpec type for use
// with apply.
+//
+// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
type MutatingAdmissionPolicySpecApplyConfiguration struct {
- ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
- MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
- Variables []VariableApplyConfiguration `json:"variables,omitempty"`
- Mutations []MutationApplyConfiguration `json:"mutations,omitempty"`
- FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
- ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
+ // paramKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ // matchConstraints specifies what resources this policy is designed to validate.
+ // The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
+ // The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
+ // '*' matches CREATE, UPDATE and CONNECT.
+ // Required.
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ // variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except matchConditions because matchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, variables must be sorted by the order of first appearance and acyclic.
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+ // mutations contain operations to perform on matching objects.
+ // mutations may not be empty; a minimum of one mutation is required.
+ // mutations are evaluated in order, and are reinvoked according to
+ // the reinvocationPolicy.
+ // The mutations of a policy are invoked for each binding of this policy
+ // and reinvocation of mutations occurs on a per binding basis.
+ Mutations []MutationApplyConfiguration `json:"mutations,omitempty"`
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if paramKind refers to a non-existent Kind.
+ // A binding is invalid if paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // matchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the matchConstraints.
+ // An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
+ // as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: These mutations will not be called more than once per binding in a single admission evaluation.
+ //
+ // IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
+ // order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
+ // reinvoked when mutations change the object after this mutation is invoked.
+ // Required.
+ ReinvocationPolicy *v1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
}
// MutatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the MutatingAdmissionPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go
index f7eae58a..af93de8a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhook.go
@@ -27,19 +27,149 @@ import (
// MutatingWebhookApplyConfiguration represents a declarative configuration of the MutatingWebhook type for use
// with apply.
+//
+// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
type MutatingWebhookApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
- Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
- FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
- NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"`
- TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
- AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
- ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // The name of the admission webhook.
+ // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
+ // "imagepolicy" is the name of the webhook, and kubernetes.io is the name
+ // of the organization.
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // ClientConfig defines how to communicate with the hook.
+ // Required
+ ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
+ // Rules describes what operations on what resources/subresources the webhook cares about.
+ // The webhook cares about an operation if it matches _any_ Rule.
+ // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
+ // from putting the cluster in a state which cannot be recovered from without completely
+ // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
+ // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
+ Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
+ // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
+ // allowed values are Ignore or Fail. Defaults to Ignore.
+ FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // matchPolicy defines how the "rules" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.
+ //
+ // Defaults to "Exact"
+ MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // NamespaceSelector decides whether to run the webhook on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the webhook.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the webhook on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the webhook based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the webhook, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // SideEffects states whether this webhook has side effects.
+ // Acceptable values are: Unknown, None, Some, NoneOnDryRun
+ // Webhooks with side effects MUST implement a reconciliation system, since a request may be
+ // rejected by a future step in the admission chain and the side effects therefore need to be undone.
+ // Requests with the dryRun attribute will be auto-rejected if they match a webhook with
+ // sideEffects == Unknown or Some. Defaults to Unknown.
+ SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"`
+ // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
+ // the webhook call will be ignored or the API call will fail based on the
+ // failure policy.
+ // The timeout value must be between 1 and 30 seconds.
+ // Default to 30 seconds.
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
+ // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
+ // versions the Webhook expects. API server will try to use first version in
+ // the list which it supports. If none of the versions specified in this list
+ // supported by API server, validation will fail for this object.
+ // If a persisted webhook configuration specifies allowed versions and does not
+ // include any versions known to the API Server, calls to the webhook will fail
+ // and be subject to the failure policy.
+ // Default to `['v1beta1']`.
+ AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
+ // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
+ // Allowed values are "Never" and "IfNeeded".
+ //
+ // Never: the webhook will not be called more than once in a single admission evaluation.
+ //
+ // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation
+ // if the object being admitted is modified by other admission plugins after the initial webhook call.
+ // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted.
+ // Note:
+ // * the number of additional invocations is not guaranteed to be exactly one.
+ // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again.
+ // * webhooks that use this option may be reordered to minimize the number of additional invocations.
+ // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.
+ //
+ // Defaults to "Never".
+ ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType `json:"reinvocationPolicy,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be sent to this
+ // webhook. Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the webhook is called.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
}
// MutatingWebhookApplyConfiguration constructs a declarative configuration of the MutatingWebhook type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
index 2e70502a..d51d71a3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -29,10 +29,15 @@ import (
// MutatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the MutatingWebhookConfiguration type for use
// with apply.
+//
+// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
+// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.
type MutatingWebhookConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
+ // Webhooks is a list of webhooks and the affected resources and operations.
+ Webhooks []MutatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
}
// MutatingWebhookConfiguration constructs a declarative configuration of the MutatingWebhookConfiguration type for use with
@@ -45,29 +50,14 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
return b
}
-// ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
-// mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a
-// MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractMutatingWebhookConfigurationFrom extracts the applied configuration owned by fieldManager from
+// mutatingWebhookConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
-// ExtractMutatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractMutatingWebhookConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
- return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "")
-}
-
-// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
- return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status")
-}
-
-func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractMutatingWebhookConfigurationFrom(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
b := &MutatingWebhookConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +69,21 @@ func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admission
b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
+// mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a
+// MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractMutatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+ return ExtractMutatingWebhookConfigurationFrom(mutatingWebhookConfiguration, fieldManager, "")
+}
+
func (b MutatingWebhookConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutation.go
index ab50af6d..b4caf4c4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutation.go
@@ -24,10 +24,21 @@ import (
// MutationApplyConfiguration represents a declarative configuration of the Mutation type for use
// with apply.
+//
+// Mutation specifies the CEL expression which is used to apply the Mutation.
type MutationApplyConfiguration struct {
- PatchType *admissionregistrationv1beta1.PatchType `json:"patchType,omitempty"`
- ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"`
- JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"`
+ // patchType indicates the patch strategy used.
+ // Allowed values are "ApplyConfiguration" and "JSONPatch".
+ // Required.
+ PatchType *admissionregistrationv1beta1.PatchType `json:"patchType,omitempty"`
+ // applyConfiguration defines the desired configuration values of an object.
+ // The configuration is applied to the admission object using
+ // [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
+ // A CEL expression is used to create apply configuration.
+ ApplyConfiguration *ApplyConfigurationApplyConfiguration `json:"applyConfiguration,omitempty"`
+ // jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
+ // A CEL expression is used to create the JSON patch.
+ JSONPatch *JSONPatchApplyConfiguration `json:"jsonPatch,omitempty"`
}
// MutationApplyConfiguration constructs a declarative configuration of the Mutation type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
index 62c617d2..8b189dbb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/namedrulewithoperations.go
@@ -25,8 +25,12 @@ import (
// NamedRuleWithOperationsApplyConfiguration represents a declarative configuration of the NamedRuleWithOperations type for use
// with apply.
+//
+// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
type NamedRuleWithOperationsApplyConfiguration struct {
- ResourceNames []string `json:"resourceNames,omitempty"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ // RuleWithOperations is a tuple of Operations and Resources.
v1.RuleWithOperationsApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
index 39831252..dbbef1b6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramkind.go
@@ -20,9 +20,16 @@ package v1beta1
// ParamKindApplyConfiguration represents a declarative configuration of the ParamKind type for use
// with apply.
+//
+// ParamKind is a tuple of Group Kind and Version.
type ParamKindApplyConfiguration struct {
+ // APIVersion is the API group version the resources belong to.
+ // In format of "group/version".
+ // Required.
APIVersion *string `json:"apiVersion,omitempty"`
- Kind *string `json:"kind,omitempty"`
+ // Kind is the API kind the resources belong to.
+ // Required.
+ Kind *string `json:"kind,omitempty"`
}
// ParamKindApplyConfiguration constructs a declarative configuration of the ParamKind type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
index 5143b0cb..724cd1a0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/paramref.go
@@ -25,10 +25,53 @@ import (
// ParamRefApplyConfiguration represents a declarative configuration of the ParamRef type for use
// with apply.
+//
+// ParamRef describes how to locate the params to be used as input to
+// expressions of rules applied by a policy binding.
type ParamRefApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // name is the name of the resource being referenced.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ //
+ // A single parameter used for all admission requests can be configured
+ // by setting the `name` field, leaving `selector` blank, and setting namespace
+ // if `paramKind` is namespace-scoped.
+ Name *string `json:"name,omitempty"`
+ // namespace is the namespace of the referenced resource. Allows limiting
+ // the search for params to a specific namespace. Applies to both `name` and
+ // `selector` fields.
+ //
+ // A per-namespace parameter may be used by specifying a namespace-scoped
+ // `paramKind` in the policy and leaving this field empty.
+ //
+ // - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
+ // field results in a configuration error.
+ //
+ // - If `paramKind` is namespace-scoped, the namespace of the object being
+ // evaluated for admission will be used when this field is left unset. Take
+ // care that if this is left empty the binding must not match any cluster-scoped
+ // resources, which will result in an error.
+ Namespace *string `json:"namespace,omitempty"`
+ // selector can be used to match multiple param objects based on their labels.
+ // Supply selector: {} to match all resources of the ParamKind.
+ //
+ // If multiple params are found, they are all evaluated with the policy expressions
+ // and the results are ANDed together.
+ //
+ // One of `name` or `selector` must be set, but `name` and `selector` are
+ // mutually exclusive properties. If one is set, the other must be unset.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // `parameterNotFoundAction` controls the behavior of the binding when the resource
+ // exists, and name or selector is valid, but there are no parameters
+ // matched by the binding. If the value is set to `Allow`, then no
+ // matched parameters will be treated as successful validation by the binding.
+ // If set to `Deny`, then no matched parameters will be subject to the
+ // `failurePolicy` of the policy.
+ //
+ // Allowed values are `Allow` or `Deny`
+ //
+ // Required
ParameterNotFoundAction *admissionregistrationv1beta1.ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go
index 70cc6b5b..9591d48a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/servicereference.go
@@ -20,11 +20,22 @@ package v1beta1
// ServiceReferenceApplyConfiguration represents a declarative configuration of the ServiceReference type for use
// with apply.
+//
+// ServiceReference holds a reference to Service.legacy.k8s.io
type ServiceReferenceApplyConfiguration struct {
+ // `namespace` is the namespace of the service.
+ // Required
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
- Path *string `json:"path,omitempty"`
- Port *int32 `json:"port,omitempty"`
+ // `name` is the name of the service.
+ // Required
+ Name *string `json:"name,omitempty"`
+ // `path` is an optional URL path which will be sent in any request to
+ // this service.
+ Path *string `json:"path,omitempty"`
+ // If specified, the port on the service that hosting webhook.
+ // Default to 443 for backward compatibility.
+ // `port` should be a valid port number (1-65535, inclusive).
+ Port *int32 `json:"port,omitempty"`
}
// ServiceReferenceApplyConfiguration constructs a declarative configuration of the ServiceReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
index cea6e11d..b3b26edb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/typechecking.go
@@ -20,7 +20,11 @@ package v1beta1
// TypeCheckingApplyConfiguration represents a declarative configuration of the TypeChecking type for use
// with apply.
+//
+// TypeChecking contains results of type checking the expressions in the
+// ValidatingAdmissionPolicy
type TypeCheckingApplyConfiguration struct {
+ // The type checking warnings for each expression.
ExpressionWarnings []ExpressionWarningApplyConfiguration `json:"expressionWarnings,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
index 84f9dea5..b8eba927 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -29,11 +29,19 @@ import (
// ValidatingAdmissionPolicyApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicy type for use
// with apply.
+//
+// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
type ValidatingAdmissionPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
- Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicy.
+ Spec *ValidatingAdmissionPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
+ // behaves in the expected way.
+ // Populated by the system.
+ // Read-only.
+ Status *ValidatingAdmissionPolicyStatusApplyConfiguration `json:"status,omitempty"`
}
// ValidatingAdmissionPolicy constructs a declarative configuration of the ValidatingAdmissionPolicy type for use with
@@ -46,6 +54,26 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
return b
}
+// ExtractValidatingAdmissionPolicyFrom extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// validatingAdmissionPolicy must be a unmodified ValidatingAdmissionPolicy API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
+ b := &ValidatingAdmissionPolicyApplyConfiguration{}
+ err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(validatingAdmissionPolicy.Name)
+
+ b.WithKind("ValidatingAdmissionPolicy")
+ b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractValidatingAdmissionPolicy extracts the applied configuration owned by fieldManager from
// validatingAdmissionPolicy. If no managedFields are found in validatingAdmissionPolicy for fieldManager, a
// ValidatingAdmissionPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func ValidatingAdmissionPolicy(name string) *ValidatingAdmissionPolicyApplyConfi
// ExtractValidatingAdmissionPolicy provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "")
+ return ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy, fieldManager, "")
}
-// ExtractValidatingAdmissionPolicyStatus is the same as ExtractValidatingAdmissionPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractValidatingAdmissionPolicyStatus extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicy for the status subresource.
func ExtractValidatingAdmissionPolicyStatus(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- return extractValidatingAdmissionPolicy(validatingAdmissionPolicy, fieldManager, "status")
+ return ExtractValidatingAdmissionPolicyFrom(validatingAdmissionPolicy, fieldManager, "status")
}
-func extractValidatingAdmissionPolicy(validatingAdmissionPolicy *admissionregistrationv1beta1.ValidatingAdmissionPolicy, fieldManager string, subresource string) (*ValidatingAdmissionPolicyApplyConfiguration, error) {
- b := &ValidatingAdmissionPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(validatingAdmissionPolicy, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(validatingAdmissionPolicy.Name)
-
- b.WithKind("ValidatingAdmissionPolicy")
- b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
- return b, nil
-}
func (b ValidatingAdmissionPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
index c0cdef99..db33d910 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -29,10 +29,24 @@ import (
// ValidatingAdmissionPolicyBindingApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBinding type for use
// with apply.
+//
+// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
+// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
+//
+// For a given admission request, each binding will cause its policy to be
+// evaluated N times, where N is 1 for policies/bindings that don't use
+// params, otherwise N is the number of parameters selected by the binding.
+//
+// The CEL expressions of a policy must have a computed CEL cost below the maximum
+// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
+// Adding/removing policies, bindings, or params can not affect whether a
+// given (policy, binding, param) combination is within its own CEL budget.
type ValidatingAdmissionPolicyBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
+ Spec *ValidatingAdmissionPolicyBindingSpecApplyConfiguration `json:"spec,omitempty"`
}
// ValidatingAdmissionPolicyBinding constructs a declarative configuration of the ValidatingAdmissionPolicyBinding type for use with
@@ -45,29 +59,14 @@ func ValidatingAdmissionPolicyBinding(name string) *ValidatingAdmissionPolicyBin
return b
}
-// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
-// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
-// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractValidatingAdmissionPolicyBindingFrom extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
-// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractValidatingAdmissionPolicyBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "")
-}
-
-// ExtractValidatingAdmissionPolicyBindingStatus is the same as ExtractValidatingAdmissionPolicyBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractValidatingAdmissionPolicyBindingStatus(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
- return extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding, fieldManager, "status")
-}
-
-func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+func ExtractValidatingAdmissionPolicyBindingFrom(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string, subresource string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
b := &ValidatingAdmissionPolicyBindingApplyConfiguration{}
err := managedfields.ExtractInto(validatingAdmissionPolicyBinding, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +78,21 @@ func extractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *a
b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractValidatingAdmissionPolicyBinding extracts the applied configuration owned by fieldManager from
+// validatingAdmissionPolicyBinding. If no managedFields are found in validatingAdmissionPolicyBinding for fieldManager, a
+// ValidatingAdmissionPolicyBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingAdmissionPolicyBinding must be a unmodified ValidatingAdmissionPolicyBinding API object that was retrieved from the Kubernetes API.
+// ExtractValidatingAdmissionPolicyBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingAdmissionPolicyBinding(validatingAdmissionPolicyBinding *admissionregistrationv1beta1.ValidatingAdmissionPolicyBinding, fieldManager string) (*ValidatingAdmissionPolicyBindingApplyConfiguration, error) {
+ return ExtractValidatingAdmissionPolicyBindingFrom(validatingAdmissionPolicyBinding, fieldManager, "")
+}
+
func (b ValidatingAdmissionPolicyBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
index bddc3a40..7b1c0af2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicybindingspec.go
@@ -24,10 +24,63 @@ import (
// ValidatingAdmissionPolicyBindingSpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyBindingSpec type for use
// with apply.
+//
+// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
type ValidatingAdmissionPolicyBindingSpecApplyConfiguration struct {
- PolicyName *string `json:"policyName,omitempty"`
- ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
- MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ // PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
+ // If the referenced resource does not exist, this binding is considered invalid and will be ignored
+ // Required.
+ PolicyName *string `json:"policyName,omitempty"`
+ // paramRef specifies the parameter resource used to configure the admission control policy.
+ // It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
+ // If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
+ // If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
+ ParamRef *ParamRefApplyConfiguration `json:"paramRef,omitempty"`
+ // MatchResources declares what resources match this binding and will be validated by it.
+ // Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
+ // If this is unset, all resources matched by the policy are validated by this binding
+ // When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
+ // Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
+ MatchResources *MatchResourcesApplyConfiguration `json:"matchResources,omitempty"`
+ // validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
+ // If a validation evaluates to false it is always enforced according to these actions.
+ //
+ // Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
+ // to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
+ // ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
+ //
+ // validationActions is declared as a set of action values. Order does
+ // not matter. validationActions may not contain duplicates of the same action.
+ //
+ // The supported actions values are:
+ //
+ // "Deny" specifies that a validation failure results in a denied request.
+ //
+ // "Warn" specifies that a validation failure is reported to the request client
+ // in HTTP Warning headers, with a warning code of 299. Warnings can be sent
+ // both for allowed or denied admission responses.
+ //
+ // "Audit" specifies that a validation failure is included in the published
+ // audit event for the request. The audit event will contain a
+ // `validation.policy.admission.k8s.io/validation_failure` audit annotation
+ // with a value containing the details of the validation failures, formatted as
+ // a JSON list of objects, each with the following fields:
+ // - message: The validation failure message string
+ // - policy: The resource name of the ValidatingAdmissionPolicy
+ // - binding: The resource name of the ValidatingAdmissionPolicyBinding
+ // - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
+ // - validationActions: The enforcement actions enacted for the validation failure
+ // Example audit annotation:
+ // `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
+ //
+ // Clients should expect to handle additional values by ignoring
+ // any values not recognized.
+ //
+ // "Deny" and "Warn" may not be used together since this combination
+ // needlessly duplicates the validation failure both in the
+ // API response body and the HTTP warning headers.
+ //
+ // Required.
ValidationActions []admissionregistrationv1beta1.ValidationAction `json:"validationActions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
index 8b235337..4600fb9d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicyspec.go
@@ -24,14 +24,66 @@ import (
// ValidatingAdmissionPolicySpecApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicySpec type for use
// with apply.
+//
+// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
type ValidatingAdmissionPolicySpecApplyConfiguration struct {
- ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
- MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
- Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
- FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
- AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
- Variables []VariableApplyConfiguration `json:"variables,omitempty"`
+ // ParamKind specifies the kind of resources used to parameterize this policy.
+ // If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
+ // If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
+ // If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
+ ParamKind *ParamKindApplyConfiguration `json:"paramKind,omitempty"`
+ // MatchConstraints specifies what resources this policy is designed to validate.
+ // The AdmissionPolicy cares about a request if it matches _all_ Constraints.
+ // However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
+ // ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
+ // Required.
+ MatchConstraints *MatchResourcesApplyConfiguration `json:"matchConstraints,omitempty"`
+ // Validations contain CEL expressions which is used to apply the validation.
+ // Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
+ // required.
+ Validations []ValidationApplyConfiguration `json:"validations,omitempty"`
+ // failurePolicy defines how to handle failures for the admission policy. Failures can
+ // occur from CEL expression parse errors, type check errors, runtime errors and invalid
+ // or mis-configured policy definitions or bindings.
+ //
+ // A policy is invalid if spec.paramKind refers to a non-existent Kind.
+ // A binding is invalid if spec.paramRef.name refers to a non-existent resource.
+ //
+ // failurePolicy does not define how validations that evaluate to false are handled.
+ //
+ // When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
+ // define how failures are enforced.
+ //
+ // Allowed values are Ignore or Fail. Defaults to Fail.
+ FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // auditAnnotations contains CEL expressions which are used to produce audit
+ // annotations for the audit event of the API request.
+ // validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
+ // required.
+ AuditAnnotations []AuditAnnotationApplyConfiguration `json:"auditAnnotations,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be validated.
+ // Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // If a parameter object is provided, it can be accessed via the `params` handle in the same
+ // manner as validation expressions.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the policy is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // Variables contain definitions of variables that can be used in composition of other expressions.
+ // Each variable is defined as a named CEL expression.
+ // The variables defined here will be available under `variables` in other expressions of the policy
+ // except MatchConditions because MatchConditions are evaluated before the rest of the policy.
+ //
+ // The expression of a variable can refer to other variables defined earlier in the list but not those after.
+ // Thus, Variables must be sorted by the order of first appearance and acyclic.
+ Variables []VariableApplyConfiguration `json:"variables,omitempty"`
}
// ValidatingAdmissionPolicySpecApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
index 4612af0c..9e05da13 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingadmissionpolicystatus.go
@@ -24,10 +24,16 @@ import (
// ValidatingAdmissionPolicyStatusApplyConfiguration represents a declarative configuration of the ValidatingAdmissionPolicyStatus type for use
// with apply.
+//
+// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
type ValidatingAdmissionPolicyStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The generation observed by the controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The results of type checking for each expression.
+ // Presence of this field indicates the completion of the type checking.
+ TypeChecking *TypeCheckingApplyConfiguration `json:"typeChecking,omitempty"`
+ // The conditions represent the latest available observations of a policy's current state.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ValidatingAdmissionPolicyStatusApplyConfiguration constructs a declarative configuration of the ValidatingAdmissionPolicyStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go
index 1e107d68..c3df12b8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhook.go
@@ -26,18 +26,133 @@ import (
// ValidatingWebhookApplyConfiguration represents a declarative configuration of the ValidatingWebhook type for use
// with apply.
+//
+// ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
type ValidatingWebhookApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
- Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
- FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
- MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
- NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
- SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"`
- TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
- AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
- MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
+ // The name of the admission webhook.
+ // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
+ // "imagepolicy" is the name of the webhook, and kubernetes.io is the name
+ // of the organization.
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // ClientConfig defines how to communicate with the hook.
+ // Required
+ ClientConfig *WebhookClientConfigApplyConfiguration `json:"clientConfig,omitempty"`
+ // Rules describes what operations on what resources/subresources the webhook cares about.
+ // The webhook cares about an operation if it matches _any_ Rule.
+ // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
+ // from putting the cluster in a state which cannot be recovered from without completely
+ // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
+ // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
+ Rules []v1.RuleWithOperationsApplyConfiguration `json:"rules,omitempty"`
+ // FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
+ // allowed values are Ignore or Fail. Defaults to Ignore.
+ FailurePolicy *admissionregistrationv1beta1.FailurePolicyType `json:"failurePolicy,omitempty"`
+ // matchPolicy defines how the "rules" list is used to match incoming requests.
+ // Allowed values are "Exact" or "Equivalent".
+ //
+ // - Exact: match a request only if it exactly matches a specified rule.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
+ //
+ // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
+ // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
+ // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
+ // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.
+ //
+ // Defaults to "Exact"
+ MatchPolicy *admissionregistrationv1beta1.MatchPolicyType `json:"matchPolicy,omitempty"`
+ // NamespaceSelector decides whether to run the webhook on an object based
+ // on whether the namespace for that object matches the selector. If the
+ // object itself is a namespace, the matching is performed on
+ // object.metadata.labels. If the object is another cluster scoped resource,
+ // it never skips the webhook.
+ //
+ // For example, to run the webhook on any objects whose namespace is not
+ // associated with "runlevel" of "0" or "1"; you will set the selector as
+ // follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "runlevel",
+ // "operator": "NotIn",
+ // "values": [
+ // "0",
+ // "1"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // If instead you want to only run the webhook on any objects whose
+ // namespace is associated with the "environment" of "prod" or "staging";
+ // you will set the selector as follows:
+ // "namespaceSelector": {
+ // "matchExpressions": [
+ // {
+ // "key": "environment",
+ // "operator": "In",
+ // "values": [
+ // "prod",
+ // "staging"
+ // ]
+ // }
+ // ]
+ // }
+ //
+ // See
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ // for more examples of label selectors.
+ //
+ // Default to the empty LabelSelector, which matches everything.
+ NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
+ // ObjectSelector decides whether to run the webhook based on if the
+ // object has matching labels. objectSelector is evaluated against both
+ // the oldObject and newObject that would be sent to the webhook, and
+ // is considered to match if either object matches the selector. A null
+ // object (oldObject in the case of create, or newObject in the case of
+ // delete) or an object that cannot have labels (like a
+ // DeploymentRollback or a PodProxyOptions object) is not considered to
+ // match.
+ // Use the object selector only if the webhook is opt-in, because end
+ // users may skip the admission webhook by setting the labels.
+ // Default to the empty LabelSelector, which matches everything.
+ ObjectSelector *metav1.LabelSelectorApplyConfiguration `json:"objectSelector,omitempty"`
+ // SideEffects states whether this webhook has side effects.
+ // Acceptable values are: Unknown, None, Some, NoneOnDryRun
+ // Webhooks with side effects MUST implement a reconciliation system, since a request may be
+ // rejected by a future step in the admission chain and the side effects therefore need to be undone.
+ // Requests with the dryRun attribute will be auto-rejected if they match a webhook with
+ // sideEffects == Unknown or Some. Defaults to Unknown.
+ SideEffects *admissionregistrationv1beta1.SideEffectClass `json:"sideEffects,omitempty"`
+ // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
+ // the webhook call will be ignored or the API call will fail based on the
+ // failure policy.
+ // The timeout value must be between 1 and 30 seconds.
+ // Default to 30 seconds.
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
+ // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
+ // versions the Webhook expects. API server will try to use first version in
+ // the list which it supports. If none of the versions specified in this list
+ // supported by API server, validation will fail for this object.
+ // If a persisted webhook configuration specifies allowed versions and does not
+ // include any versions known to the API Server, calls to the webhook will fail
+ // and be subject to the failure policy.
+ // Default to `['v1beta1']`.
+ AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty"`
+ // MatchConditions is a list of conditions that must be met for a request to be sent to this
+ // webhook. Match conditions filter requests that have already been matched by the rules,
+ // namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
+ // There are a maximum of 64 match conditions allowed.
+ //
+ // The exact matching logic is (in order):
+ // 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.
+ // 2. If ALL matchConditions evaluate to TRUE, the webhook is called.
+ // 3. If any matchCondition evaluates to an error (but none are FALSE):
+ // - If failurePolicy=Fail, reject the request
+ // - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
+ MatchConditions []MatchConditionApplyConfiguration `json:"matchConditions,omitempty"`
}
// ValidatingWebhookApplyConfiguration constructs a declarative configuration of the ValidatingWebhook type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
index 2ad1fb8c..66b74b31 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -29,10 +29,15 @@ import (
// ValidatingWebhookConfigurationApplyConfiguration represents a declarative configuration of the ValidatingWebhookConfiguration type for use
// with apply.
+//
+// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
+// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.
type ValidatingWebhookConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
+ // Webhooks is a list of webhooks and the affected resources and operations.
+ Webhooks []ValidatingWebhookApplyConfiguration `json:"webhooks,omitempty"`
}
// ValidatingWebhookConfiguration constructs a declarative configuration of the ValidatingWebhookConfiguration type for use with
@@ -45,29 +50,14 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
return b
}
-// ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
-// validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a
-// ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractValidatingWebhookConfigurationFrom extracts the applied configuration owned by fieldManager from
+// validatingWebhookConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
-// ExtractValidatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractValidatingWebhookConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
- return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "")
-}
-
-// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
- return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status")
-}
-
-func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+func ExtractValidatingWebhookConfigurationFrom(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
b := &ValidatingWebhookConfigurationApplyConfiguration{}
err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +69,21 @@ func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admis
b.WithAPIVersion("admissionregistration.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
+// validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a
+// ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractValidatingWebhookConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+ return ExtractValidatingWebhookConfigurationFrom(validatingWebhookConfiguration, fieldManager, "")
+}
+
func (b ValidatingWebhookConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
index 019e8e7a..6505b8b4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validation.go
@@ -24,11 +24,77 @@ import (
// ValidationApplyConfiguration represents a declarative configuration of the Validation type for use
// with apply.
+//
+// Validation specifies the CEL expression which is used to apply the validation.
type ValidationApplyConfiguration struct {
- Expression *string `json:"expression,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *v1.StatusReason `json:"reason,omitempty"`
- MessageExpression *string `json:"messageExpression,omitempty"`
+ // Expression represents the expression which will be evaluated by CEL.
+ // ref: https://github.com/google/cel-spec
+ // CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
+ //
+ // - 'object' - The object from the incoming request. The value is null for DELETE requests.
+ // - 'oldObject' - The existing object. The value is null for CREATE requests.
+ // - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
+ // - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
+ // - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
+ // - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
+ // For example, a variable named 'foo' can be accessed as 'variables.foo'.
+ // - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
+ // See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
+ // - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
+ // request resource.
+ //
+ // The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
+ // object. No other metadata properties are accessible.
+ //
+ // Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
+ // Accessible property names are escaped according to the following rules when accessed in the expression:
+ // - '__' escapes to '__underscores__'
+ // - '.' escapes to '__dot__'
+ // - '-' escapes to '__dash__'
+ // - '/' escapes to '__slash__'
+ // - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
+ // "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
+ // "import", "let", "loop", "package", "namespace", "return".
+ // Examples:
+ // - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
+ // - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
+ // - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
+ //
+ // Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
+ // Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
+ // - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
+ // non-intersecting elements in `Y` are appended, retaining their partial order.
+ // - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
+ // are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
+ // non-intersecting keys are appended, retaining their partial order.
+ // Required.
+ Expression *string `json:"expression,omitempty"`
+ // Message represents the message displayed when validation fails. The message is required if the Expression contains
+ // line breaks. The message must not contain line breaks.
+ // If unset, the message is "failed rule: {Rule}".
+ // e.g. "must be a URL with the host matching spec.host"
+ // If the Expression contains line breaks. Message is required.
+ // The message must not contain line breaks.
+ // If unset, the message is "failed Expression: {Expression}".
+ Message *string `json:"message,omitempty"`
+ // Reason represents a machine-readable description of why this validation failed.
+ // If this is the first validation in the list to fail, this reason, as well as the
+ // corresponding HTTP response code, are used in the
+ // HTTP response to the client.
+ // The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
+ // If not set, StatusReasonInvalid is used in the response to the client.
+ Reason *v1.StatusReason `json:"reason,omitempty"`
+ // messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
+ // Since messageExpression is used as a failure message, it must evaluate to a string.
+ // If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
+ // If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
+ // as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
+ // that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
+ // the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
+ // messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
+ // Example:
+ // "object.x must be less than max ("+string(params.max)+")"
+ MessageExpression *string `json:"messageExpression,omitempty"`
}
// ValidationApplyConfiguration constructs a declarative configuration of the Validation type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
index 0ece197d..db334f22 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/variable.go
@@ -20,8 +20,15 @@ package v1beta1
// VariableApplyConfiguration represents a declarative configuration of the Variable type for use
// with apply.
+//
+// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
type VariableApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
+ // The variable can be accessed in other expressions through `variables`
+ // For example, if name is "foo", the variable will be available as `variables.foo`
+ Name *string `json:"name,omitempty"`
+ // Expression is the expression that will be evaluated as the value of the variable.
+ // The CEL expression has access to the same identifiers as the CEL expressions in Validation.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go
index 76ff71b4..593ff43e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/webhookclientconfig.go
@@ -20,10 +20,44 @@ package v1beta1
// WebhookClientConfigApplyConfiguration represents a declarative configuration of the WebhookClientConfig type for use
// with apply.
+//
+// WebhookClientConfig contains the information to make a TLS
+// connection with the webhook
type WebhookClientConfigApplyConfiguration struct {
- URL *string `json:"url,omitempty"`
- Service *ServiceReferenceApplyConfiguration `json:"service,omitempty"`
- CABundle []byte `json:"caBundle,omitempty"`
+ // `url` gives the location of the webhook, in standard URL form
+ // (`scheme://host:port/path`). Exactly one of `url` or `service`
+ // must be specified.
+ //
+ // The `host` should not refer to a service running in the cluster; use
+ // the `service` field instead. The host might be resolved via external
+ // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
+ // in-cluster DNS as that would be a layering violation). `host` may
+ // also be an IP address.
+ //
+ // Please note that using `localhost` or `127.0.0.1` as a `host` is
+ // risky unless you take great care to run this webhook on all hosts
+ // which run an apiserver which might need to make calls to this
+ // webhook. Such installs are likely to be non-portable, i.e., not easy
+ // to turn up in a new cluster.
+ //
+ // The scheme must be "https"; the URL must begin with "https://".
+ //
+ // A path is optional, and if present may be any string permissible in
+ // a URL. You may use the path to pass an arbitrary string to the
+ // webhook, for example, a cluster identifier.
+ //
+ // Attempting to use a user or basic auth e.g. "user:password@" is not
+ // allowed. Fragments ("#...") and query parameters ("?...") are not
+ // allowed, either.
+ URL *string `json:"url,omitempty"`
+ // `service` is a reference to the service for this webhook. Either
+ // `service` or `url` must be specified.
+ //
+ // If the webhook is running within the cluster, then you should use `service`.
+ Service *ServiceReferenceApplyConfiguration `json:"service,omitempty"`
+ // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
+ // If unspecified, system trust roots on the apiserver are used.
+ CABundle []byte `json:"caBundle,omitempty"`
}
// WebhookClientConfigApplyConfiguration constructs a declarative configuration of the WebhookClientConfig type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
index 8394298b..94379ddc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/serverstorageversion.go
@@ -20,11 +20,21 @@ package v1alpha1
// ServerStorageVersionApplyConfiguration represents a declarative configuration of the ServerStorageVersion type for use
// with apply.
+//
+// An API server instance reports the version it can decode and the version it
+// encodes objects to when persisting objects in the backend.
type ServerStorageVersionApplyConfiguration struct {
- APIServerID *string `json:"apiServerID,omitempty"`
- EncodingVersion *string `json:"encodingVersion,omitempty"`
+ // The ID of the reporting API server.
+ APIServerID *string `json:"apiServerID,omitempty"`
+ // The API server encodes the object to this version when persisting it in
+ // the backend (e.g., etcd).
+ EncodingVersion *string `json:"encodingVersion,omitempty"`
+ // The API server can decode objects encoded in these versions.
+ // The encodingVersion must be included in the decodableVersions.
DecodableVersions []string `json:"decodableVersions,omitempty"`
- ServedVersions []string `json:"servedVersions,omitempty"`
+ // The API server can serve these versions.
+ // DecodableVersions must include all ServedVersions.
+ ServedVersions []string `json:"servedVersions,omitempty"`
}
// ServerStorageVersionApplyConfiguration constructs a declarative configuration of the ServerStorageVersion type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
index 9838e3c9..e9f41cc2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
@@ -29,11 +29,17 @@ import (
// StorageVersionApplyConfiguration represents a declarative configuration of the StorageVersion type for use
// with apply.
+//
+// Storage version of a specific resource.
type StorageVersionApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // The name is ..
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *apiserverinternalv1alpha1.StorageVersionSpec `json:"spec,omitempty"`
- Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec is an empty spec. It is here to comply with Kubernetes API style.
+ Spec *apiserverinternalv1alpha1.StorageVersionSpec `json:"spec,omitempty"`
+ // API server instances report the version they can decode and the version they
+ // encode objects to when persisting objects in the backend.
+ Status *StorageVersionStatusApplyConfiguration `json:"status,omitempty"`
}
// StorageVersion constructs a declarative configuration of the StorageVersion type for use with
@@ -46,6 +52,26 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration {
return b
}
+// ExtractStorageVersionFrom extracts the applied configuration owned by fieldManager from
+// storageVersion for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// storageVersion must be a unmodified StorageVersion API object that was retrieved from the Kubernetes API.
+// ExtractStorageVersionFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStorageVersionFrom(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) {
+ b := &StorageVersionApplyConfiguration{}
+ err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(storageVersion.Name)
+
+ b.WithKind("StorageVersion")
+ b.WithAPIVersion("internal.apiserver.k8s.io/v1alpha1")
+ return b, nil
+}
+
// ExtractStorageVersion extracts the applied configuration owned by fieldManager from
// storageVersion. If no managedFields are found in storageVersion for fieldManager, a
// StorageVersionApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +82,16 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration {
// ExtractStorageVersion provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
- return extractStorageVersion(storageVersion, fieldManager, "")
+ return ExtractStorageVersionFrom(storageVersion, fieldManager, "")
}
-// ExtractStorageVersionStatus is the same as ExtractStorageVersion except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractStorageVersionStatus extracts the applied configuration owned by fieldManager from
+// storageVersion for the status subresource.
func ExtractStorageVersionStatus(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
- return extractStorageVersion(storageVersion, fieldManager, "status")
+ return ExtractStorageVersionFrom(storageVersion, fieldManager, "status")
}
-func extractStorageVersion(storageVersion *apiserverinternalv1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) {
- b := &StorageVersionApplyConfiguration{}
- err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(storageVersion.Name)
-
- b.WithKind("StorageVersion")
- b.WithAPIVersion("internal.apiserver.k8s.io/v1alpha1")
- return b, nil
-}
func (b StorageVersionApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
index 1ed71cf8..84752a45 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversioncondition.go
@@ -25,13 +25,21 @@ import (
// StorageVersionConditionApplyConfiguration represents a declarative configuration of the StorageVersionCondition type for use
// with apply.
+//
+// Describes the state of the storageVersion at a certain point.
type StorageVersionConditionApplyConfiguration struct {
- Type *apiserverinternalv1alpha1.StorageVersionConditionType `json:"type,omitempty"`
- Status *apiserverinternalv1alpha1.ConditionStatus `json:"status,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of the condition.
+ Type *apiserverinternalv1alpha1.StorageVersionConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *apiserverinternalv1alpha1.ConditionStatus `json:"status,omitempty"`
+ // If set, this represents the .metadata.generation that the condition was set based upon.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// StorageVersionConditionApplyConfiguration constructs a declarative configuration of the StorageVersionCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go
index 2e25d675..00a71ad5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversionstatus.go
@@ -20,10 +20,19 @@ package v1alpha1
// StorageVersionStatusApplyConfiguration represents a declarative configuration of the StorageVersionStatus type for use
// with apply.
+//
+// API server instances report the versions they can decode and the version they
+// encode objects to when persisting objects in the backend.
type StorageVersionStatusApplyConfiguration struct {
- StorageVersions []ServerStorageVersionApplyConfiguration `json:"storageVersions,omitempty"`
- CommonEncodingVersion *string `json:"commonEncodingVersion,omitempty"`
- Conditions []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The reported versions per API server instance.
+ StorageVersions []ServerStorageVersionApplyConfiguration `json:"storageVersions,omitempty"`
+ // If all API server instances agree on the same encoding storage version,
+ // then this field is set to that version. Otherwise this field is left empty.
+ // API servers should finish updating its storageVersionStatus entry before
+ // serving write operations, so that this field will be in sync with the reality.
+ CommonEncodingVersion *string `json:"commonEncodingVersion,omitempty"`
+ // The latest available observations of the storageVersion's state.
+ Conditions []StorageVersionConditionApplyConfiguration `json:"conditions,omitempty"`
}
// StorageVersionStatusApplyConfiguration constructs a declarative configuration of the StorageVersionStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
index 1c97bcc5..8a01c0d7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
@@ -30,11 +30,25 @@ import (
// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
// with apply.
+//
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
type ControllerRevisionApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
- Revision *int64 `json:"revision,omitempty"`
+ // Data is the serialized representation of the state.
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ // Revision indicates the revision of the state represented by Data.
+ Revision *int64 `json:"revision,omitempty"`
}
// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
@@ -48,29 +62,14 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
return b
}
-// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
-// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
-// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractControllerRevisionFrom extracts the applied configuration owned by fieldManager from
+// controllerRevision for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
-// ExtractControllerRevision provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractControllerRevisionFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
- return extractControllerRevision(controllerRevision, fieldManager, "")
-}
-
-// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractControllerRevisionStatus(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
- return extractControllerRevision(controllerRevision, fieldManager, "status")
-}
-
-func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevisionFrom(controllerRevision *appsv1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +82,21 @@ func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fi
b.WithAPIVersion("apps/v1")
return b, nil
}
+
+// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
+// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
+// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
+// ExtractControllerRevision provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return ExtractControllerRevisionFrom(controllerRevision, fieldManager, "")
+}
+
func (b ControllerRevisionApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
index 14b4a88c..cc8f33f2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
@@ -29,11 +29,22 @@ import (
// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
// with apply.
+//
+// DaemonSet represents the configuration of a daemon set.
type DaemonSetApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
}
// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
@@ -47,6 +58,27 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
return b
}
+// ExtractDaemonSetFrom extracts the applied configuration owned by fieldManager from
+// daemonSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API.
+// ExtractDaemonSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDaemonSetFrom(daemonSet *appsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
+ b := &DaemonSetApplyConfiguration{}
+ err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(daemonSet.Name)
+ b.WithNamespace(daemonSet.Namespace)
+
+ b.WithKind("DaemonSet")
+ b.WithAPIVersion("apps/v1")
+ return b, nil
+}
+
// ExtractDaemonSet extracts the applied configuration owned by fieldManager from
// daemonSet. If no managedFields are found in daemonSet for fieldManager, a
// DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +89,16 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// ExtractDaemonSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
- return extractDaemonSet(daemonSet, fieldManager, "")
+ return ExtractDaemonSetFrom(daemonSet, fieldManager, "")
}
-// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractDaemonSetStatus extracts the applied configuration owned by fieldManager from
+// daemonSet for the status subresource.
func ExtractDaemonSetStatus(daemonSet *appsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
- return extractDaemonSet(daemonSet, fieldManager, "status")
+ return ExtractDaemonSetFrom(daemonSet, fieldManager, "status")
}
-func extractDaemonSet(daemonSet *appsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
- b := &DaemonSetApplyConfiguration{}
- err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(daemonSet.Name)
- b.WithNamespace(daemonSet.Namespace)
-
- b.WithKind("DaemonSet")
- b.WithAPIVersion("apps/v1")
- return b, nil
-}
func (b DaemonSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
index 8c56e499..c2ba0bf8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetcondition.go
@@ -26,12 +26,20 @@ import (
// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
// with apply.
+//
+// TODO: Add valid condition types of a DaemonSet.
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
type DaemonSetConditionApplyConfiguration struct {
- Type *appsv1.DaemonSetConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of DaemonSet condition.
+ Type *appsv1.DaemonSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
index d2382b80..7b7a922d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetspec.go
@@ -25,12 +25,32 @@ import (
// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
// with apply.
+//
+// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpecApplyConfiguration struct {
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // The only allowed template.spec.restartPolicy value is "Always".
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
}
// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go
index a40dc165..c8d76571 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetstatus.go
@@ -20,17 +20,42 @@ package v1
// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use
// with apply.
+//
+// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatusApplyConfiguration struct {
- CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"`
- NumberMisscheduled *int32 `json:"numberMisscheduled,omitempty"`
- DesiredNumberScheduled *int32 `json:"desiredNumberScheduled,omitempty"`
- NumberReady *int32 `json:"numberReady,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- UpdatedNumberScheduled *int32 `json:"updatedNumberScheduled,omitempty"`
- NumberAvailable *int32 `json:"numberAvailable,omitempty"`
- NumberUnavailable *int32 `json:"numberUnavailable,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
- Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"`
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ NumberMisscheduled *int32 `json:"numberMisscheduled,omitempty"`
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ DesiredNumberScheduled *int32 `json:"desiredNumberScheduled,omitempty"`
+ // numberReady is the number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running with a Ready Condition.
+ NumberReady *int32 `json:"numberReady,omitempty"`
+ // The most recent generation observed by the daemon set controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The total number of nodes that are running updated daemon pod
+ UpdatedNumberScheduled *int32 `json:"updatedNumberScheduled,omitempty"`
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ NumberAvailable *int32 `json:"numberAvailable,omitempty"`
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ NumberUnavailable *int32 `json:"numberUnavailable,omitempty"`
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // Represents the latest available observations of a DaemonSet's current state.
+ Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
}
// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
index 993e1bd5..3e3168dc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonsetupdatestrategy.go
@@ -24,8 +24,16 @@ import (
// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
// with apply.
+//
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
type DaemonSetUpdateStrategyApplyConfiguration struct {
- Type *appsv1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+ Type *appsv1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
index 9678c87b..771ca0a7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
@@ -29,11 +29,17 @@ import (
// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
// with apply.
+//
+// Deployment enables declarative updates for Pods and ReplicaSets.
type DeploymentApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the Deployment.
+ Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the Deployment.
+ Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
}
// Deployment constructs a declarative configuration of the Deployment type for use with
@@ -47,6 +53,27 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
return b
}
+// ExtractDeploymentFrom extracts the applied configuration owned by fieldManager from
+// deployment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
+// ExtractDeploymentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeploymentFrom(deployment *appsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
+ b := &DeploymentApplyConfiguration{}
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(deployment.Name)
+ b.WithNamespace(deployment.Namespace)
+
+ b.WithKind("Deployment")
+ b.WithAPIVersion("apps/v1")
+ return b, nil
+}
+
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +84,22 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDeployment(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "")
+ return ExtractDeploymentFrom(deployment, fieldManager, "")
}
-// ExtractDeploymentStatus is the same as ExtractDeployment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractDeploymentStatus(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "status")
+// ExtractDeploymentScale extracts the applied configuration owned by fieldManager from
+// deployment for the scale subresource.
+func ExtractDeploymentScale(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return ExtractDeploymentFrom(deployment, fieldManager, "scale")
}
-func extractDeployment(deployment *appsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
- b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(deployment.Name)
- b.WithNamespace(deployment.Namespace)
-
- b.WithKind("Deployment")
- b.WithAPIVersion("apps/v1")
- return b, nil
+// ExtractDeploymentStatus extracts the applied configuration owned by fieldManager from
+// deployment for the status subresource.
+func ExtractDeploymentStatus(deployment *appsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return ExtractDeploymentFrom(deployment, fieldManager, "status")
}
+
func (b DeploymentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
index 3a669363..a7f5b506 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentcondition.go
@@ -26,13 +26,21 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
+//
+// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentConditionApplyConfiguration struct {
- Type *appsv1.DeploymentConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of deployment condition.
+ Type *appsv1.DeploymentConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // The last time this condition was updated.
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
index 5f34b058..50a45b3e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentspec.go
@@ -25,15 +25,37 @@ import (
// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
// with apply.
+//
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
type DeploymentSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- Paused *bool `json:"paused,omitempty"`
- ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template describes the pods that will be created.
+ // The only allowed template.spec.restartPolicy value is "Always".
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // The deployment strategy to use to replace existing pods with new ones.
+ Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // Indicates that the deployment is paused.
+ Paused *bool `json:"paused,omitempty"`
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go
index 8d9e6cca..d932a5b1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstatus.go
@@ -20,16 +20,34 @@ package v1
// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
// with apply.
+//
+// DeploymentStatus is the most recently observed status of the Deployment.
type DeploymentStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // The generation observed by the deployment controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // Represents the latest available observations of a deployment's current state.
+ Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
}
// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
index 7bf8a159..eb2737d9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deploymentstrategy.go
@@ -24,8 +24,16 @@ import (
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
+//
+// DeploymentStrategy describes how to replace existing pods with new ones.
type DeploymentStrategyApplyConfiguration struct {
- Type *appsv1.DeploymentStrategyType `json:"type,omitempty"`
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ Type *appsv1.DeploymentStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
index aee110a2..63f11a21 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
@@ -29,11 +29,24 @@ import (
// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
// with apply.
+//
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSetApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
}
// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
@@ -47,6 +60,27 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
return b
}
+// ExtractReplicaSetFrom extracts the applied configuration owned by fieldManager from
+// replicaSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API.
+// ExtractReplicaSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractReplicaSetFrom(replicaSet *appsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
+ b := &ReplicaSetApplyConfiguration{}
+ err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(replicaSet.Name)
+ b.WithNamespace(replicaSet.Namespace)
+
+ b.WithKind("ReplicaSet")
+ b.WithAPIVersion("apps/v1")
+ return b, nil
+}
+
// ExtractReplicaSet extracts the applied configuration owned by fieldManager from
// replicaSet. If no managedFields are found in replicaSet for fieldManager, a
// ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,22 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// ExtractReplicaSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
- return extractReplicaSet(replicaSet, fieldManager, "")
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "")
}
-// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractReplicaSetStatus(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
- return extractReplicaSet(replicaSet, fieldManager, "status")
+// ExtractReplicaSetScale extracts the applied configuration owned by fieldManager from
+// replicaSet for the scale subresource.
+func ExtractReplicaSetScale(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "scale")
}
-func extractReplicaSet(replicaSet *appsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
- b := &ReplicaSetApplyConfiguration{}
- err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(replicaSet.Name)
- b.WithNamespace(replicaSet.Namespace)
-
- b.WithKind("ReplicaSet")
- b.WithAPIVersion("apps/v1")
- return b, nil
+// ExtractReplicaSetStatus extracts the applied configuration owned by fieldManager from
+// replicaSet for the status subresource.
+func ExtractReplicaSetStatus(replicaSet *appsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "status")
}
+
func (b ReplicaSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
index 0325ce05..12077083 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetcondition.go
@@ -26,12 +26,19 @@ import (
// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
// with apply.
+//
+// ReplicaSetCondition describes the state of a replica set at a certain point.
type ReplicaSetConditionApplyConfiguration struct {
- Type *appsv1.ReplicaSetConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of replica set condition.
+ Type *appsv1.ReplicaSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // The last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
index 714ddcfe..01a0c321 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetspec.go
@@ -25,11 +25,27 @@ import (
// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
// with apply.
+//
+// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // Replicas is the number of desired pods.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // Selector is a label query over pods that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go
index d11526d6..b3560959 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicasetstatus.go
@@ -20,14 +20,27 @@ package v1
// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use
// with apply.
+//
+// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatusApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
+ Replicas *int32 `json:"replicas,omitempty"`
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
+ FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Represents the latest available observations of a replica set's current state.
+ Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go
index e898f508..7fbc3e3b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedaemonset.go
@@ -24,9 +24,43 @@ import (
// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use
// with apply.
+//
+// Spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSetApplyConfiguration struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0 if MaxSurge is 0
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given time. The update
+ // starts by stopping at most 30% of those DaemonSet pods and then brings
+ // up new DaemonSet pods in their place. Once the new pods are available,
+ // it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ // 70% of original number of DaemonSet pods are available at all times during
+ // the update.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of nodes with an existing available DaemonSet pod that
+ // can have an updated DaemonSet pod during during an update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up to a minimum of 1.
+ // Default value is 0.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their a new pod created before the old pod is marked as deleted.
+ // The update starts by launching new pods on 30% of nodes. Once an updated
+ // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
+ // on that node is marked deleted. If the old pod becomes unavailable for any
+ // reason (Ready transitions to false, is evicted, or is drained) an updated
+ // pod is immediately created on that node without considering surge limits.
+ // Allowing surge implies the possibility that the resources consumed by the
+ // daemonset on any given node can double if the readiness check fails, and
+ // so resource intensive daemonsets should take into account that they may
+ // cause evictions during disruption.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go
index 2bc29372..20b52d1e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatedeployment.go
@@ -24,9 +24,32 @@ import (
// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
// with apply.
+//
+// Spec to control the desired behavior of rolling update.
type RollingUpdateDeploymentApplyConfiguration struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go
index dd0de81a..d73e1f98 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/rollingupdatestatefulsetstrategy.go
@@ -24,8 +24,21 @@ import (
// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use
// with apply.
+//
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
type RollingUpdateStatefulSetStrategyApplyConfiguration struct {
- Partition *int32 `json:"partition,omitempty"`
+ // Partition indicates the ordinal at which the StatefulSet should be partitioned
+ // for updates. During a rolling update, all pods from ordinal Replicas-1 to
+ // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched.
+ // This is helpful in being able to do a canary based deployment. The default value is 0.
+ Partition *int32 `json:"partition,omitempty"`
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up. This can not be 0.
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
+ // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
+ // will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
index fc682f68..e3dc2046 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
@@ -29,11 +29,24 @@ import (
// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
// with apply.
+//
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+//
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
type StatefulSetApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the desired identities of pods in this set.
+ Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
}
// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
@@ -47,6 +60,27 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
return b
}
+// ExtractStatefulSetFrom extracts the applied configuration owned by fieldManager from
+// statefulSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API.
+// ExtractStatefulSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStatefulSetFrom(statefulSet *appsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
+ b := &StatefulSetApplyConfiguration{}
+ err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(statefulSet.Name)
+ b.WithNamespace(statefulSet.Namespace)
+
+ b.WithKind("StatefulSet")
+ b.WithAPIVersion("apps/v1")
+ return b, nil
+}
+
// ExtractStatefulSet extracts the applied configuration owned by fieldManager from
// statefulSet. If no managedFields are found in statefulSet for fieldManager, a
// StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,22 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// ExtractStatefulSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
- return extractStatefulSet(statefulSet, fieldManager, "")
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "")
}
-// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractStatefulSetStatus(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
- return extractStatefulSet(statefulSet, fieldManager, "status")
+// ExtractStatefulSetScale extracts the applied configuration owned by fieldManager from
+// statefulSet for the scale subresource.
+func ExtractStatefulSetScale(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "scale")
}
-func extractStatefulSet(statefulSet *appsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
- b := &StatefulSetApplyConfiguration{}
- err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(statefulSet.Name)
- b.WithNamespace(statefulSet.Namespace)
-
- b.WithKind("StatefulSet")
- b.WithAPIVersion("apps/v1")
- return b, nil
+// ExtractStatefulSetStatus extracts the applied configuration owned by fieldManager from
+// statefulSet for the status subresource.
+func ExtractStatefulSetStatus(statefulSet *appsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "status")
}
+
func (b StatefulSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
index 45b2ad81..2d230fa8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetcondition.go
@@ -26,12 +26,19 @@ import (
// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
// with apply.
+//
+// StatefulSetCondition describes the state of a statefulset at a certain point.
type StatefulSetConditionApplyConfiguration struct {
- Type *appsv1.StatefulSetConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of statefulset condition.
+ Type *appsv1.StatefulSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go
index 86f39e16..94492ac3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetordinals.go
@@ -20,7 +20,18 @@ package v1
// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use
// with apply.
+//
+// StatefulSetOrdinals describes the policy used for replica ordinal assignment
+// in this StatefulSet.
type StatefulSetOrdinalsApplyConfiguration struct {
+ // start is the number representing the first replica's index. It may be used
+ // to number replicas from an alternate index (eg: 1-indexed) over the default
+ // 0-indexed names, or to orchestrate progressive movement of replicas from
+ // one StatefulSet to another.
+ // If set, replica indices will be in the range:
+ // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).
+ // If unset, defaults to 0. Replica indices will be in the range:
+ // [0, .spec.replicas).
Start *int32 `json:"start,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
index dff3e2a7..5a414847 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetpersistentvolumeclaimretentionpolicy.go
@@ -24,9 +24,21 @@ import (
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
// with apply.
+//
+// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
+// created from the StatefulSet VolumeClaimTemplates.
type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
+ // WhenDeleted specifies what happens to PVCs created from StatefulSet
+ // VolumeClaimTemplates when the StatefulSet is deleted. The default policy
+ // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The
+ // `Delete` policy causes those PVCs to be deleted.
WhenDeleted *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
- WhenScaled *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
+ // WhenScaled specifies what happens to PVCs created from StatefulSet
+ // VolumeClaimTemplates when the StatefulSet is scaled down. The default
+ // policy of `Retain` causes PVCs to not be affected by a scaledown. The
+ // `Delete` policy causes the associated PVCs for any excess pods above
+ // the replica count to be deleted.
+ WhenScaled *appsv1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
index c48b64fe..ac6114a9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
@@ -26,18 +26,74 @@ import (
// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
// with apply.
+//
+// A StatefulSetSpec is the specification of a StatefulSet.
type StatefulSetSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
- ServiceName *string `json:"serviceName,omitempty"`
- PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
- UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet. Each pod will be named with the format
+ // -. For example, a pod in a StatefulSet named
+ // "web" with index number "3" would be named "web-3".
+ // The only allowed template.spec.restartPolicy value is "Always".
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ ServiceName *string `json:"serviceName,omitempty"`
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"`
- Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"`
+ // ordinals controls the numbering of replica indices in a StatefulSet. The
+ // default ordinals behavior assigns a "0" index to the first replica and
+ // increments the index by one for each additional replica requested.
+ Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"`
}
// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
index 637a1c64..f60786d4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
@@ -20,17 +20,36 @@ package v1
// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use
// with apply.
+//
+// StatefulSetStatus represents the current state of a StatefulSet.
type StatefulSetStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- CurrentRevision *string `json:"currentRevision,omitempty"`
- UpdateRevision *string `json:"updateRevision,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
- Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // replicas is the number of Pods created by the StatefulSet controller.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // readyReplicas is the number of pods created for this StatefulSet with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ CurrentRevision *string `json:"currentRevision,omitempty"`
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ UpdateRevision *string `json:"updateRevision,omitempty"`
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // Represents the latest available observations of a statefulset's current state.
+ Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
}
// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
index ae135d34..7f273383 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetupdatestrategy.go
@@ -24,8 +24,15 @@ import (
// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
// with apply.
+//
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
type StatefulSetUpdateStrategyApplyConfiguration struct {
- Type *appsv1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ // Default is RollingUpdate.
+ Type *appsv1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
index f8406d26..6d2fdbef 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
@@ -30,11 +30,27 @@ import (
// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
// with apply.
+//
+// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the
+// release notes for more information.
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
type ControllerRevisionApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
- Revision *int64 `json:"revision,omitempty"`
+ // data is the serialized representation of the state.
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ // revision indicates the revision of the state represented by Data.
+ Revision *int64 `json:"revision,omitempty"`
}
// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
@@ -48,29 +64,14 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
return b
}
-// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
-// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
-// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractControllerRevisionFrom extracts the applied configuration owned by fieldManager from
+// controllerRevision for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
-// ExtractControllerRevision provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractControllerRevisionFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
- return extractControllerRevision(controllerRevision, fieldManager, "")
-}
-
-// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractControllerRevisionStatus(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
- return extractControllerRevision(controllerRevision, fieldManager, "status")
-}
-
-func extractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevisionFrom(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +84,21 @@ func extractControllerRevision(controllerRevision *appsv1beta1.ControllerRevisio
b.WithAPIVersion("apps/v1beta1")
return b, nil
}
+
+// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
+// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
+// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
+// ExtractControllerRevision provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractControllerRevision(controllerRevision *appsv1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return ExtractControllerRevisionFrom(controllerRevision, fieldManager, "")
+}
+
func (b ControllerRevisionApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
index eae15040..e8195218 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
@@ -29,11 +29,18 @@ import (
// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
// with apply.
+//
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
type DeploymentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the Deployment.
+ Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the Deployment.
+ Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
}
// Deployment constructs a declarative configuration of the Deployment type for use with
@@ -47,6 +54,27 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
return b
}
+// ExtractDeploymentFrom extracts the applied configuration owned by fieldManager from
+// deployment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
+// ExtractDeploymentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeploymentFrom(deployment *appsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
+ b := &DeploymentApplyConfiguration{}
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta1.Deployment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(deployment.Name)
+ b.WithNamespace(deployment.Namespace)
+
+ b.WithKind("Deployment")
+ b.WithAPIVersion("apps/v1beta1")
+ return b, nil
+}
+
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +85,16 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDeployment(deployment *appsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "")
+ return ExtractDeploymentFrom(deployment, fieldManager, "")
}
-// ExtractDeploymentStatus is the same as ExtractDeployment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractDeploymentStatus extracts the applied configuration owned by fieldManager from
+// deployment for the status subresource.
func ExtractDeploymentStatus(deployment *appsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "status")
+ return ExtractDeploymentFrom(deployment, fieldManager, "status")
}
-func extractDeployment(deployment *appsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
- b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta1.Deployment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(deployment.Name)
- b.WithNamespace(deployment.Namespace)
-
- b.WithKind("Deployment")
- b.WithAPIVersion("apps/v1beta1")
- return b, nil
-}
func (b DeploymentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
index b0a45b1a..a146cbac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentcondition.go
@@ -26,13 +26,21 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
+//
+// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentConditionApplyConfiguration struct {
- Type *appsv1beta1.DeploymentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of deployment condition.
+ Type *appsv1beta1.DeploymentConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // The last time this condition was updated.
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go
index 5531c756..a286c17b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentspec.go
@@ -25,16 +25,39 @@ import (
// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
// with apply.
+//
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
type DeploymentSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- Paused *bool `json:"paused,omitempty"`
- RollbackTo *RollbackConfigApplyConfiguration `json:"rollbackTo,omitempty"`
- ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
+ // replicas is the number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // selector is the label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template describes the pods that will be created.
+ // The only allowed template.spec.restartPolicy value is "Always".
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // The deployment strategy to use to replace existing pods with new ones.
+ Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
+ // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // revisionHistoryLimit is the number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 2.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // paused indicates that the deployment is paused.
+ Paused *bool `json:"paused,omitempty"`
+ // DEPRECATED.
+ // rollbackTo is the config this deployment is rolling back to. Will be cleared after rollback is done.
+ RollbackTo *RollbackConfigApplyConfiguration `json:"rollbackTo,omitempty"`
+ // progressDeadlineSeconds is the maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go
index 36b4fd42..d508accb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstatus.go
@@ -20,16 +20,34 @@ package v1beta1
// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
// with apply.
+//
+// DeploymentStatus is the most recently observed status of the Deployment.
type DeploymentStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // The generation observed by the deployment controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // Represents the latest available observations of a deployment's current state.
+ Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
+ // collisionCount is the count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
}
// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
index 03e66555..c033daeb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deploymentstrategy.go
@@ -24,8 +24,16 @@ import (
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
+//
+// DeploymentStrategy describes how to replace existing pods with new ones.
type DeploymentStrategyApplyConfiguration struct {
- Type *appsv1beta1.DeploymentStrategyType `json:"type,omitempty"`
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ Type *appsv1beta1.DeploymentStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go
index 775f82ee..f73b9e67 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollbackconfig.go
@@ -20,7 +20,10 @@ package v1beta1
// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use
// with apply.
+//
+// DEPRECATED.
type RollbackConfigApplyConfiguration struct {
+ // The revision to rollback to. If set to 0, rollback to the last revision.
Revision *int64 `json:"revision,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go
index 244701a5..31f04799 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatedeployment.go
@@ -24,9 +24,32 @@ import (
// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
// with apply.
+//
+// Spec to control the desired behavior of rolling update.
type RollingUpdateDeploymentApplyConfiguration struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go
index 94c29713..38c6b755 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/rollingupdatestatefulsetstrategy.go
@@ -24,8 +24,21 @@ import (
// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use
// with apply.
+//
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
type RollingUpdateStatefulSetStrategyApplyConfiguration struct {
- Partition *int32 `json:"partition,omitempty"`
+ // Partition indicates the ordinal at which the StatefulSet should be partitioned
+ // for updates. During a rolling update, all pods from ordinal Replicas-1 to
+ // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched.
+ // This is helpful in being able to do a canary based deployment. The default value is 0.
+ Partition *int32 `json:"partition,omitempty"`
+ // maxUnavailable is the maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up. This can not be 0.
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
+ // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
+ // will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
index d9b3af8e..361ed68a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
@@ -29,11 +29,24 @@ import (
// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
// with apply.
+//
+// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for
+// more information.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+//
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
type StatefulSetApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the desired identities of pods in this set.
+ Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
}
// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
@@ -47,6 +60,27 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
return b
}
+// ExtractStatefulSetFrom extracts the applied configuration owned by fieldManager from
+// statefulSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API.
+// ExtractStatefulSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStatefulSetFrom(statefulSet *appsv1beta1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
+ b := &StatefulSetApplyConfiguration{}
+ err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta1.StatefulSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(statefulSet.Name)
+ b.WithNamespace(statefulSet.Namespace)
+
+ b.WithKind("StatefulSet")
+ b.WithAPIVersion("apps/v1beta1")
+ return b, nil
+}
+
// ExtractStatefulSet extracts the applied configuration owned by fieldManager from
// statefulSet. If no managedFields are found in statefulSet for fieldManager, a
// StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,16 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// ExtractStatefulSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
- return extractStatefulSet(statefulSet, fieldManager, "")
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "")
}
-// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractStatefulSetStatus extracts the applied configuration owned by fieldManager from
+// statefulSet for the status subresource.
func ExtractStatefulSetStatus(statefulSet *appsv1beta1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
- return extractStatefulSet(statefulSet, fieldManager, "status")
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "status")
}
-func extractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
- b := &StatefulSetApplyConfiguration{}
- err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta1.StatefulSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(statefulSet.Name)
- b.WithNamespace(statefulSet.Namespace)
-
- b.WithKind("StatefulSet")
- b.WithAPIVersion("apps/v1beta1")
- return b, nil
-}
func (b StatefulSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
index 5a13584b..6811ce55 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetcondition.go
@@ -26,12 +26,19 @@ import (
// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
// with apply.
+//
+// StatefulSetCondition describes the state of a statefulset at a certain point.
type StatefulSetConditionApplyConfiguration struct {
- Type *appsv1beta1.StatefulSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of statefulset condition.
+ Type *appsv1beta1.StatefulSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go
index 2e3049e5..a2b1bd09 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetordinals.go
@@ -20,7 +20,18 @@ package v1beta1
// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use
// with apply.
+//
+// StatefulSetOrdinals describes the policy used for replica ordinal assignment
+// in this StatefulSet.
type StatefulSetOrdinalsApplyConfiguration struct {
+ // start is the number representing the first replica's index. It may be used
+ // to number replicas from an alternate index (eg: 1-indexed) over the default
+ // 0-indexed names, or to orchestrate progressive movement of replicas from
+ // one StatefulSet to another.
+ // If set, replica indices will be in the range:
+ // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).
+ // If unset, defaults to 0. Replica indices will be in the range:
+ // [0, .spec.replicas).
Start *int32 `json:"start,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
index f9b6fbd8..b2d69b66 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetpersistentvolumeclaimretentionpolicy.go
@@ -24,9 +24,21 @@ import (
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
// with apply.
+//
+// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
+// created from the StatefulSet VolumeClaimTemplates.
type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
+ // whenDeleted specifies what happens to PVCs created from StatefulSet
+ // VolumeClaimTemplates when the StatefulSet is deleted. The default policy
+ // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The
+ // `Delete` policy causes those PVCs to be deleted.
WhenDeleted *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
- WhenScaled *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
+ // whenScaled specifies what happens to PVCs created from StatefulSet
+ // VolumeClaimTemplates when the StatefulSet is scaled down. The default
+ // policy of `Retain` causes PVCs to not be affected by a scaledown. The
+ // `Delete` policy causes the associated PVCs for any excess pods above
+ // the replica count to be deleted.
+ WhenScaled *appsv1beta1.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
index 137c7243..3047440c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
@@ -26,18 +26,73 @@ import (
// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
// with apply.
+//
+// A StatefulSetSpec is the specification of a StatefulSet.
type StatefulSetSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
- ServiceName *string `json:"serviceName,omitempty"`
- PodManagementPolicy *appsv1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
- UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // selector is a label query over pods that should match the replica count.
+ // If empty, defaulted to labels on the pod template.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet. Each pod will be named with the format
+ // -. For example, a pod in a StatefulSet named
+ // "web" with index number "3" would be named "web-3".
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ ServiceName *string `json:"serviceName,omitempty"`
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ PodManagementPolicy *appsv1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // minReadySeconds is the minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"`
- Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"`
+ // ordinals controls the numbering of replica indices in a StatefulSet. The
+ // default ordinals behavior assigns a "0" index to the first replica and
+ // increments the index by one for each additional replica requested.
+ Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"`
}
// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
index 27ae7540..9ffc085a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
@@ -20,17 +20,36 @@ package v1beta1
// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use
// with apply.
+//
+// StatefulSetStatus represents the current state of a StatefulSet.
type StatefulSetStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- CurrentRevision *string `json:"currentRevision,omitempty"`
- UpdateRevision *string `json:"updateRevision,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
- Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // replicas is the number of Pods created by the StatefulSet controller.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ CurrentRevision *string `json:"currentRevision,omitempty"`
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ UpdateRevision *string `json:"updateRevision,omitempty"`
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // conditions represent the latest available observations of a statefulset's current state.
+ Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // availableReplicas is the total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
}
// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
index 24154f7a..48d62bf5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetupdatestrategy.go
@@ -24,8 +24,14 @@ import (
// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
// with apply.
+//
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
type StatefulSetUpdateStrategyApplyConfiguration struct {
- Type *appsv1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ Type *appsv1beta1.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
index 4c08b852..8ffacf61 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
@@ -30,11 +30,27 @@ import (
// ControllerRevisionApplyConfiguration represents a declarative configuration of the ControllerRevision type for use
// with apply.
+//
+// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the
+// release notes for more information.
+// ControllerRevision implements an immutable snapshot of state data. Clients
+// are responsible for serializing and deserializing the objects that contain
+// their internal state.
+// Once a ControllerRevision has been successfully created, it can not be updated.
+// The API Server will fail validation of all requests that attempt to mutate
+// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both
+// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However,
+// it may be subject to name and representation changes in future releases, and clients should not
+// depend on its stability. It is primarily for internal use by controllers.
type ControllerRevisionApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
- Revision *int64 `json:"revision,omitempty"`
+ // Data is the serialized representation of the state.
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ // Revision indicates the revision of the state represented by Data.
+ Revision *int64 `json:"revision,omitempty"`
}
// ControllerRevision constructs a declarative configuration of the ControllerRevision type for use with
@@ -48,29 +64,14 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
return b
}
-// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
-// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
-// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractControllerRevisionFrom extracts the applied configuration owned by fieldManager from
+// controllerRevision for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
-// ExtractControllerRevision provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractControllerRevisionFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
- return extractControllerRevision(controllerRevision, fieldManager, "")
-}
-
-// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractControllerRevisionStatus(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
- return extractControllerRevision(controllerRevision, fieldManager, "status")
-}
-
-func extractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
+func ExtractControllerRevisionFrom(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +84,21 @@ func extractControllerRevision(controllerRevision *appsv1beta2.ControllerRevisio
b.WithAPIVersion("apps/v1beta2")
return b, nil
}
+
+// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
+// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
+// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
+// ExtractControllerRevision provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractControllerRevision(controllerRevision *appsv1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return ExtractControllerRevisionFrom(controllerRevision, fieldManager, "")
+}
+
func (b ControllerRevisionApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
index b7599b3c..12b6150a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
@@ -29,11 +29,24 @@ import (
// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
// with apply.
+//
+// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for
+// more information.
+// DaemonSet represents the configuration of a daemon set.
type DaemonSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
}
// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
@@ -47,6 +60,27 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
return b
}
+// ExtractDaemonSetFrom extracts the applied configuration owned by fieldManager from
+// daemonSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API.
+// ExtractDaemonSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDaemonSetFrom(daemonSet *appsv1beta2.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
+ b := &DaemonSetApplyConfiguration{}
+ err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.DaemonSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(daemonSet.Name)
+ b.WithNamespace(daemonSet.Namespace)
+
+ b.WithKind("DaemonSet")
+ b.WithAPIVersion("apps/v1beta2")
+ return b, nil
+}
+
// ExtractDaemonSet extracts the applied configuration owned by fieldManager from
// daemonSet. If no managedFields are found in daemonSet for fieldManager, a
// DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,16 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// ExtractDaemonSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
- return extractDaemonSet(daemonSet, fieldManager, "")
+ return ExtractDaemonSetFrom(daemonSet, fieldManager, "")
}
-// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractDaemonSetStatus extracts the applied configuration owned by fieldManager from
+// daemonSet for the status subresource.
func ExtractDaemonSetStatus(daemonSet *appsv1beta2.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
- return extractDaemonSet(daemonSet, fieldManager, "status")
+ return ExtractDaemonSetFrom(daemonSet, fieldManager, "status")
}
-func extractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
- b := &DaemonSetApplyConfiguration{}
- err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.DaemonSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(daemonSet.Name)
- b.WithNamespace(daemonSet.Namespace)
-
- b.WithKind("DaemonSet")
- b.WithAPIVersion("apps/v1beta2")
- return b, nil
-}
func (b DaemonSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
index 0aa47cf0..5c25b815 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetcondition.go
@@ -26,12 +26,20 @@ import (
// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
// with apply.
+//
+// TODO: Add valid condition types of a DaemonSet.
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
type DaemonSetConditionApplyConfiguration struct {
- Type *appsv1beta2.DaemonSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of DaemonSet condition.
+ Type *appsv1beta2.DaemonSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go
index 74d8bf51..a2808eb7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetspec.go
@@ -25,12 +25,32 @@ import (
// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
// with apply.
+//
+// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpecApplyConfiguration struct {
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // The only allowed template.spec.restartPolicy value is "Always".
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
}
// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go
index 6b0fda89..c42eb276 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetstatus.go
@@ -20,17 +20,42 @@ package v1beta2
// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use
// with apply.
+//
+// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatusApplyConfiguration struct {
- CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"`
- NumberMisscheduled *int32 `json:"numberMisscheduled,omitempty"`
- DesiredNumberScheduled *int32 `json:"desiredNumberScheduled,omitempty"`
- NumberReady *int32 `json:"numberReady,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- UpdatedNumberScheduled *int32 `json:"updatedNumberScheduled,omitempty"`
- NumberAvailable *int32 `json:"numberAvailable,omitempty"`
- NumberUnavailable *int32 `json:"numberUnavailable,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
- Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"`
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ NumberMisscheduled *int32 `json:"numberMisscheduled,omitempty"`
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ DesiredNumberScheduled *int32 `json:"desiredNumberScheduled,omitempty"`
+ // Total number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running with a Ready Condition by passing the readinessProbe.
+ NumberReady *int32 `json:"numberReady,omitempty"`
+ // The most recent generation observed by the daemon set controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The total number of nodes that are running updated daemon pod
+ UpdatedNumberScheduled *int32 `json:"updatedNumberScheduled,omitempty"`
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ NumberAvailable *int32 `json:"numberAvailable,omitempty"`
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ NumberUnavailable *int32 `json:"numberUnavailable,omitempty"`
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // Represents the latest available observations of a DaemonSet's current state.
+ Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
}
// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
index 2cee58cf..2d8f2b6e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonsetupdatestrategy.go
@@ -24,8 +24,16 @@ import (
// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
// with apply.
+//
+// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.
type DaemonSetUpdateStrategyApplyConfiguration struct {
- Type *appsv1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate.
+ Type *appsv1beta2.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
index bb6b6791..4add5ae2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
@@ -29,11 +29,18 @@ import (
// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
// with apply.
+//
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
type DeploymentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the Deployment.
+ Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the Deployment.
+ Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
}
// Deployment constructs a declarative configuration of the Deployment type for use with
@@ -47,6 +54,27 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
return b
}
+// ExtractDeploymentFrom extracts the applied configuration owned by fieldManager from
+// deployment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
+// ExtractDeploymentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeploymentFrom(deployment *appsv1beta2.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
+ b := &DeploymentApplyConfiguration{}
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta2.Deployment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(deployment.Name)
+ b.WithNamespace(deployment.Namespace)
+
+ b.WithKind("Deployment")
+ b.WithAPIVersion("apps/v1beta2")
+ return b, nil
+}
+
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +85,16 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDeployment(deployment *appsv1beta2.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "")
+ return ExtractDeploymentFrom(deployment, fieldManager, "")
}
-// ExtractDeploymentStatus is the same as ExtractDeployment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractDeploymentStatus extracts the applied configuration owned by fieldManager from
+// deployment for the status subresource.
func ExtractDeploymentStatus(deployment *appsv1beta2.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "status")
+ return ExtractDeploymentFrom(deployment, fieldManager, "status")
}
-func extractDeployment(deployment *appsv1beta2.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
- b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta2.Deployment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(deployment.Name)
- b.WithNamespace(deployment.Namespace)
-
- b.WithKind("Deployment")
- b.WithAPIVersion("apps/v1beta2")
- return b, nil
-}
func (b DeploymentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
index f404dd9d..5abb05d4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentcondition.go
@@ -26,13 +26,21 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
+//
+// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentConditionApplyConfiguration struct {
- Type *appsv1beta2.DeploymentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of deployment condition.
+ Type *appsv1beta2.DeploymentConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // The last time this condition was updated.
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go
index 1b55130c..91e37ebc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentspec.go
@@ -25,15 +25,37 @@ import (
// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
// with apply.
+//
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
type DeploymentSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- Paused *bool `json:"paused,omitempty"`
- ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ // It must match the pod template's labels.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template describes the pods that will be created.
+ // The only allowed template.spec.restartPolicy value is "Always".
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // The deployment strategy to use to replace existing pods with new ones.
+ Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // Indicates that the deployment is paused.
+ Paused *bool `json:"paused,omitempty"`
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. Defaults to 600s.
+ ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go
index 554be024..139fe4cf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstatus.go
@@ -20,16 +20,34 @@ package v1beta2
// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
// with apply.
+//
+// DeploymentStatus is the most recently observed status of the Deployment.
type DeploymentStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // The generation observed by the deployment controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // Represents the latest available observations of a deployment's current state.
+ Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
}
// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
index 6347a3a3..554f3cde 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deploymentstrategy.go
@@ -24,8 +24,16 @@ import (
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
+//
+// DeploymentStrategy describes how to replace existing pods with new ones.
type DeploymentStrategyApplyConfiguration struct {
- Type *appsv1beta2.DeploymentStrategyType `json:"type,omitempty"`
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ Type *appsv1beta2.DeploymentStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
index b289fdd4..ea35da04 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
@@ -29,11 +29,25 @@ import (
// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
// with apply.
+//
+// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for
+// more information.
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
}
// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
@@ -47,6 +61,27 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
return b
}
+// ExtractReplicaSetFrom extracts the applied configuration owned by fieldManager from
+// replicaSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API.
+// ExtractReplicaSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractReplicaSetFrom(replicaSet *appsv1beta2.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
+ b := &ReplicaSetApplyConfiguration{}
+ err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.ReplicaSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(replicaSet.Name)
+ b.WithNamespace(replicaSet.Namespace)
+
+ b.WithKind("ReplicaSet")
+ b.WithAPIVersion("apps/v1beta2")
+ return b, nil
+}
+
// ExtractReplicaSet extracts the applied configuration owned by fieldManager from
// replicaSet. If no managedFields are found in replicaSet for fieldManager, a
// ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +92,16 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// ExtractReplicaSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
- return extractReplicaSet(replicaSet, fieldManager, "")
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "")
}
-// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractReplicaSetStatus extracts the applied configuration owned by fieldManager from
+// replicaSet for the status subresource.
func ExtractReplicaSetStatus(replicaSet *appsv1beta2.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
- return extractReplicaSet(replicaSet, fieldManager, "status")
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "status")
}
-func extractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
- b := &ReplicaSetApplyConfiguration{}
- err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.ReplicaSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(replicaSet.Name)
- b.WithNamespace(replicaSet.Namespace)
-
- b.WithKind("ReplicaSet")
- b.WithAPIVersion("apps/v1beta2")
- return b, nil
-}
func (b ReplicaSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
index 3d8cd363..72bc1a65 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetcondition.go
@@ -26,12 +26,19 @@ import (
// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
// with apply.
+//
+// ReplicaSetCondition describes the state of a replica set at a certain point.
type ReplicaSetConditionApplyConfiguration struct {
- Type *appsv1beta2.ReplicaSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of replica set condition.
+ Type *appsv1beta2.ReplicaSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // The last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go
index 1d77b9e0..a8861263 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetspec.go
@@ -25,11 +25,27 @@ import (
// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
// with apply.
+//
+// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // Replicas is the number of desired pods.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // Selector is a label query over pods that should match the replica count.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go
index 13004fde..5d0caaeb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicasetstatus.go
@@ -20,14 +20,27 @@ package v1beta2
// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use
// with apply.
+//
+// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatusApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
+ Replicas *int32 `json:"replicas,omitempty"`
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
+ FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Represents the latest available observations of a replica set's current state.
+ Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go
index ad6021d3..5a4d3df4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedaemonset.go
@@ -24,9 +24,43 @@ import (
// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use
// with apply.
+//
+// Spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSetApplyConfiguration struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0 if MaxSurge is 0
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given time. The update
+ // starts by stopping at most 30% of those DaemonSet pods and then brings
+ // up new DaemonSet pods in their place. Once the new pods are available,
+ // it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ // 70% of original number of DaemonSet pods are available at all times during
+ // the update.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of nodes with an existing available DaemonSet pod that
+ // can have an updated DaemonSet pod during during an update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up to a minimum of 1.
+ // Default value is 0.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their a new pod created before the old pod is marked as deleted.
+ // The update starts by launching new pods on 30% of nodes. Once an updated
+ // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
+ // on that node is marked deleted. If the old pod becomes unavailable for any
+ // reason (Ready transitions to false, is evicted, or is drained) an updated
+ // pod is immediately created on that node without considering surge limits.
+ // Allowing surge implies the possibility that the resources consumed by the
+ // daemonset on any given node can double if the readiness check fails, and
+ // so resource intensive daemonsets should take into account that they may
+ // cause evictions during disruption.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go
index b0cc3a4e..c87938a3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatedeployment.go
@@ -24,9 +24,32 @@ import (
// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
// with apply.
+//
+// Spec to control the desired behavior of rolling update.
type RollingUpdateDeploymentApplyConfiguration struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // Defaults to 25%.
+ // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go
index 0046c264..8140b7ca 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/rollingupdatestatefulsetstrategy.go
@@ -24,8 +24,21 @@ import (
// RollingUpdateStatefulSetStrategyApplyConfiguration represents a declarative configuration of the RollingUpdateStatefulSetStrategy type for use
// with apply.
+//
+// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
type RollingUpdateStatefulSetStrategyApplyConfiguration struct {
- Partition *int32 `json:"partition,omitempty"`
+ // Partition indicates the ordinal at which the StatefulSet should be partitioned
+ // for updates. During a rolling update, all pods from ordinal Replicas-1 to
+ // Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched.
+ // This is helpful in being able to do a canary based deployment. The default value is 0.
+ Partition *int32 `json:"partition,omitempty"`
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding up. This can not be 0.
+ // Defaults to 1. This field is beta-level and is enabled by default. The field applies to all pods in the range 0 to
+ // Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
+ // will be counted towards MaxUnavailable.
+ // This setting might not be effective for the OrderedReady podManagementPolicy. That policy ensures pods are created and become ready one at a time.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
index 3942ed4b..ca62809f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
@@ -27,11 +27,16 @@ import (
// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
// with apply.
+//
+// Scale represents a scaling request for a resource.
type ScaleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *appsv1beta2.ScaleSpec `json:"spec,omitempty"`
- Status *appsv1beta2.ScaleStatus `json:"status,omitempty"`
+ // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *appsv1beta2.ScaleSpec `json:"spec,omitempty"`
+ // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
+ Status *appsv1beta2.ScaleStatus `json:"status,omitempty"`
}
// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
@@ -42,6 +47,7 @@ func Scale() *ScaleApplyConfiguration {
b.WithAPIVersion("apps/v1beta2")
return b
}
+
func (b ScaleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
index d2d4e9cd..5649c118 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
@@ -29,11 +29,24 @@ import (
// StatefulSetApplyConfiguration represents a declarative configuration of the StatefulSet type for use
// with apply.
+//
+// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for
+// more information.
+// StatefulSet represents a set of pods with consistent identities.
+// Identities are defined as:
+// - Network: A single stable DNS and hostname.
+// - Storage: As many VolumeClaims as requested.
+//
+// The StatefulSet guarantees that a given network identity will always
+// map to the same storage identity.
type StatefulSetApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the desired identities of pods in this set.
+ Spec *StatefulSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the current status of Pods in this StatefulSet. This data
+ // may be out of date by some window of time.
+ Status *StatefulSetStatusApplyConfiguration `json:"status,omitempty"`
}
// StatefulSet constructs a declarative configuration of the StatefulSet type for use with
@@ -47,6 +60,27 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
return b
}
+// ExtractStatefulSetFrom extracts the applied configuration owned by fieldManager from
+// statefulSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API.
+// ExtractStatefulSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStatefulSetFrom(statefulSet *appsv1beta2.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
+ b := &StatefulSetApplyConfiguration{}
+ err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.StatefulSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(statefulSet.Name)
+ b.WithNamespace(statefulSet.Namespace)
+
+ b.WithKind("StatefulSet")
+ b.WithAPIVersion("apps/v1beta2")
+ return b, nil
+}
+
// ExtractStatefulSet extracts the applied configuration owned by fieldManager from
// statefulSet. If no managedFields are found in statefulSet for fieldManager, a
// StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,22 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// ExtractStatefulSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
- return extractStatefulSet(statefulSet, fieldManager, "")
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "")
}
-// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractStatefulSetStatus(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
- return extractStatefulSet(statefulSet, fieldManager, "status")
+// ExtractStatefulSetScale extracts the applied configuration owned by fieldManager from
+// statefulSet for the scale subresource.
+func ExtractStatefulSetScale(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "scale")
}
-func extractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
- b := &StatefulSetApplyConfiguration{}
- err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.StatefulSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(statefulSet.Name)
- b.WithNamespace(statefulSet.Namespace)
-
- b.WithKind("StatefulSet")
- b.WithAPIVersion("apps/v1beta2")
- return b, nil
+// ExtractStatefulSetStatus extracts the applied configuration owned by fieldManager from
+// statefulSet for the status subresource.
+func ExtractStatefulSetStatus(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return ExtractStatefulSetFrom(statefulSet, fieldManager, "status")
}
+
func (b StatefulSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
index 50bef200..50a3c63d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetcondition.go
@@ -26,12 +26,19 @@ import (
// StatefulSetConditionApplyConfiguration represents a declarative configuration of the StatefulSetCondition type for use
// with apply.
+//
+// StatefulSetCondition describes the state of a statefulset at a certain point.
type StatefulSetConditionApplyConfiguration struct {
- Type *appsv1beta2.StatefulSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of statefulset condition.
+ Type *appsv1beta2.StatefulSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// StatefulSetConditionApplyConfiguration constructs a declarative configuration of the StatefulSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go
index a899243a..ce0db0f0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetordinals.go
@@ -20,7 +20,18 @@ package v1beta2
// StatefulSetOrdinalsApplyConfiguration represents a declarative configuration of the StatefulSetOrdinals type for use
// with apply.
+//
+// StatefulSetOrdinals describes the policy used for replica ordinal assignment
+// in this StatefulSet.
type StatefulSetOrdinalsApplyConfiguration struct {
+ // start is the number representing the first replica's index. It may be used
+ // to number replicas from an alternate index (eg: 1-indexed) over the default
+ // 0-indexed names, or to orchestrate progressive movement of replicas from
+ // one StatefulSet to another.
+ // If set, replica indices will be in the range:
+ // [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).
+ // If unset, defaults to 0. Replica indices will be in the range:
+ // [0, .spec.replicas).
Start *int32 `json:"start,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
index d4d139ae..8db02c32 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetpersistentvolumeclaimretentionpolicy.go
@@ -24,9 +24,21 @@ import (
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration represents a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use
// with apply.
+//
+// StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs
+// created from the StatefulSet VolumeClaimTemplates.
type StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration struct {
+ // WhenDeleted specifies what happens to PVCs created from StatefulSet
+ // VolumeClaimTemplates when the StatefulSet is deleted. The default policy
+ // of `Retain` causes PVCs to not be affected by StatefulSet deletion. The
+ // `Delete` policy causes those PVCs to be deleted.
WhenDeleted *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenDeleted,omitempty"`
- WhenScaled *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
+ // WhenScaled specifies what happens to PVCs created from StatefulSet
+ // VolumeClaimTemplates when the StatefulSet is scaled down. The default
+ // policy of `Retain` causes PVCs to not be affected by a scaledown. The
+ // `Delete` policy causes the associated PVCs for any excess pods above
+ // the replica count to be deleted.
+ WhenScaled *appsv1beta2.PersistentVolumeClaimRetentionPolicyType `json:"whenScaled,omitempty"`
}
// StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration constructs a declarative configuration of the StatefulSetPersistentVolumeClaimRetentionPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
index 952ca0a8..ee24f132 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
@@ -26,18 +26,74 @@ import (
// StatefulSetSpecApplyConfiguration represents a declarative configuration of the StatefulSetSpec type for use
// with apply.
+//
+// A StatefulSetSpec is the specification of a StatefulSet.
type StatefulSetSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
- ServiceName *string `json:"serviceName,omitempty"`
- PodManagementPolicy *appsv1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
- UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // replicas is the desired number of replicas of the given Template.
+ // These are replicas in the sense that they are instantiations of the
+ // same Template, but individual replicas also have a consistent identity.
+ // If unspecified, defaults to 1.
+ // TODO: Consider a rename of this field.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // selector is a label query over pods that should match the replica count.
+ // It must match the pod template's labels.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. Each pod stamped out by the StatefulSet
+ // will fulfill this Template, but have a unique identity from the rest
+ // of the StatefulSet. Each pod will be named with the format
+ // -. For example, a pod in a StatefulSet named
+ // "web" with index number "3" would be named "web-3".
+ // The only allowed template.spec.restartPolicy value is "Always".
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ // The StatefulSet controller is responsible for mapping network identities to
+ // claims in a way that maintains the identity of a pod. Every claim in
+ // this list must have at least one matching (by name) volumeMount in one
+ // container in the template. A claim in this list takes precedence over
+ // any volumes in the template, with the same name.
+ // TODO: Define the behavior if a claim already exists with the same name.
+ VolumeClaimTemplates []corev1.PersistentVolumeClaimApplyConfiguration `json:"volumeClaimTemplates,omitempty"`
+ // serviceName is the name of the service that governs this StatefulSet.
+ // This service must exist before the StatefulSet, and is responsible for
+ // the network identity of the set. Pods get DNS/hostnames that follow the
+ // pattern: pod-specific-string.serviceName.default.svc.cluster.local
+ // where "pod-specific-string" is managed by the StatefulSet controller.
+ ServiceName *string `json:"serviceName,omitempty"`
+ // podManagementPolicy controls how pods are created during initial scale up,
+ // when replacing pods on nodes, or when scaling down. The default policy is
+ // `OrderedReady`, where pods are created in increasing order (pod-0, then
+ // pod-1, etc) and the controller will wait until each pod is ready before
+ // continuing. When scaling down, the pods are removed in the opposite order.
+ // The alternative policy is `Parallel` which will create pods in parallel
+ // to match the desired scale without waiting, and on scale down will delete
+ // all pods at once.
+ PodManagementPolicy *appsv1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
+ // updateStrategy indicates the StatefulSetUpdateStrategy that will be
+ // employed to update Pods in the StatefulSet when a revision is made to
+ // Template.
+ UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
+ // revisionHistoryLimit is the maximum number of revisions that will
+ // be maintained in the StatefulSet's revision history. The revision history
+ // consists of all revisions not represented by a currently applied
+ // StatefulSetSpec version. The default value is 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent
+ // volume claims created from volumeClaimTemplates. By default, all persistent
+ // volume claims are created as needed and retained until manually deleted. This
+ // policy allows the lifecycle to be altered, for example by deleting persistent
+ // volume claims when their stateful set is deleted, or when their pod is scaled
+ // down.
PersistentVolumeClaimRetentionPolicy *StatefulSetPersistentVolumeClaimRetentionPolicyApplyConfiguration `json:"persistentVolumeClaimRetentionPolicy,omitempty"`
- Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"`
+ // ordinals controls the numbering of replica indices in a StatefulSet. The
+ // default ordinals behavior assigns a "0" index to the first replica and
+ // increments the index by one for each additional replica requested.
+ Ordinals *StatefulSetOrdinalsApplyConfiguration `json:"ordinals,omitempty"`
}
// StatefulSetSpecApplyConfiguration constructs a declarative configuration of the StatefulSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
index a647cd7d..4f40460d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
@@ -20,17 +20,36 @@ package v1beta2
// StatefulSetStatusApplyConfiguration represents a declarative configuration of the StatefulSetStatus type for use
// with apply.
+//
+// StatefulSetStatus represents the current state of a StatefulSet.
type StatefulSetStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- CurrentRevision *string `json:"currentRevision,omitempty"`
- UpdateRevision *string `json:"updateRevision,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
- Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the
+ // StatefulSet's generation, which is updated on mutation by the API Server.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // replicas is the number of Pods created by the StatefulSet controller.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // readyReplicas is the number of pods created by this StatefulSet controller with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by currentRevision.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version
+ // indicated by updateRevision.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the
+ // sequence [0,currentReplicas).
+ CurrentRevision *string `json:"currentRevision,omitempty"`
+ // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence
+ // [replicas-updatedReplicas,replicas)
+ UpdateRevision *string `json:"updateRevision,omitempty"`
+ // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller
+ // uses this field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ControllerRevision.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // Represents the latest available observations of a statefulset's current state.
+ Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
}
// StatefulSetStatusApplyConfiguration constructs a declarative configuration of the StatefulSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
index f93db4f7..421c9a3d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetupdatestrategy.go
@@ -24,8 +24,15 @@ import (
// StatefulSetUpdateStrategyApplyConfiguration represents a declarative configuration of the StatefulSetUpdateStrategy type for use
// with apply.
+//
+// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
type StatefulSetUpdateStrategyApplyConfiguration struct {
- Type *appsv1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ // Type indicates the type of the StatefulSetUpdateStrategy.
+ // Default is RollingUpdate.
+ Type *appsv1beta2.StatefulSetUpdateStrategyType `json:"type,omitempty"`
+ // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.
RollingUpdate *RollingUpdateStatefulSetStrategyApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go
index 51ec6650..ed688f0a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/crossversionobjectreference.go
@@ -20,9 +20,14 @@ package v1
// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
// with apply.
+//
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReferenceApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name *string `json:"name,omitempty"`
+ // apiVersion is the API version of the referent
APIVersion *string `json:"apiVersion,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
index cbcbfb57..f3bd1438 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
@@ -29,11 +29,16 @@ import (
// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
// with apply.
+//
+// configuration of a horizontal pod autoscaler.
type HorizontalPodAutoscalerApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
+ // spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current information about the autoscaler.
+ Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
}
// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
@@ -47,6 +52,27 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
return b
}
+// ExtractHorizontalPodAutoscalerFrom extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
+// ExtractHorizontalPodAutoscalerFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ b := &HorizontalPodAutoscalerApplyConfiguration{}
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(horizontalPodAutoscaler.Name)
+ b.WithNamespace(horizontalPodAutoscaler.Namespace)
+
+ b.WithKind("HorizontalPodAutoscaler")
+ b.WithAPIVersion("autoscaling/v1")
+ return b, nil
+}
+
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +83,16 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "")
}
-// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractHorizontalPodAutoscalerStatus extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the status subresource.
func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "status")
}
-func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(horizontalPodAutoscaler.Name)
- b.WithNamespace(horizontalPodAutoscaler.Namespace)
-
- b.WithKind("HorizontalPodAutoscaler")
- b.WithAPIVersion("autoscaling/v1")
- return b, nil
-}
func (b HorizontalPodAutoscalerApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go
index 0ca2f84e..bf58371a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerspec.go
@@ -20,11 +20,23 @@ package v1
// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
// with apply.
+//
+// specification of a horizontal pod autoscaler.
type HorizontalPodAutoscalerSpecApplyConfiguration struct {
- ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
- MinReplicas *int32 `json:"minReplicas,omitempty"`
- MaxReplicas *int32 `json:"maxReplicas,omitempty"`
- TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"`
+ // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
+ // and will set the desired number of pods by using its Scale subresource.
+ ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
+ // minReplicas is the lower limit for the number of replicas to which the autoscaler
+ // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
+ // alpha feature gate HPAScaleToZero is enabled and at least one Object or External
+ // metric is configured. Scaling is active as long as at least one metric value is
+ // available.
+ MinReplicas *int32 `json:"minReplicas,omitempty"`
+ // maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
+ MaxReplicas *int32 `json:"maxReplicas,omitempty"`
+ // targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
+ // if not specified the default autoscaling policy will be used.
+ TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty"`
}
// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
index 8575214e..0a0d8be7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscalerstatus.go
@@ -24,12 +24,21 @@ import (
// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
// with apply.
+//
+// current status of a horizontal pod autoscaler
type HorizontalPodAutoscalerStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
- CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"`
+ // observedGeneration is the most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods;
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty"`
+ // currentReplicas is the current number of replicas of pods managed by this autoscaler.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // desiredReplicas is the desired number of replicas of pods managed by this autoscaler.
+ DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
+ // currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU,
+ // e.g. 70 means that an average pod is using now 70% of its requested CPU.
+ CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty"`
}
// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
index d5f9d729..a491c5ac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
@@ -26,11 +26,16 @@ import (
// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
// with apply.
+//
+// Scale represents a scaling request for a resource.
type ScaleApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ScaleStatusApplyConfiguration `json:"status,omitempty"`
+ // spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
+ Status *ScaleStatusApplyConfiguration `json:"status,omitempty"`
}
// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
@@ -41,6 +46,7 @@ func Scale() *ScaleApplyConfiguration {
b.WithAPIVersion("autoscaling/v1")
return b
}
+
func (b ScaleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
index 025004ba..e21d4dba 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
@@ -20,7 +20,10 @@ package v1
// ScaleSpecApplyConfiguration represents a declarative configuration of the ScaleSpec type for use
// with apply.
+//
+// ScaleSpec describes the attributes of a scale subresource.
type ScaleSpecApplyConfiguration struct {
+ // replicas is the desired number of instances for the scaled object.
Replicas *int32 `json:"replicas,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
index 51f96d23..fb5a5668 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
@@ -20,8 +20,15 @@ package v1
// ScaleStatusApplyConfiguration represents a declarative configuration of the ScaleStatus type for use
// with apply.
+//
+// ScaleStatus represents the current status of a scale subresource.
type ScaleStatusApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
+ // replicas is the actual number of observed instances of the scaled object.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // selector is the label query over pods that should match the replicas count. This is same
+ // as the label selector but in the string format to avoid introspection
+ // by clients. The string will be in the same format as the query-param syntax.
+ // More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
Selector *string `json:"selector,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go
index b6e071e8..6f3eecbe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricsource.go
@@ -24,10 +24,21 @@ import (
// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use
// with apply.
+//
+// ContainerResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). The values will be averaged
+// together before being compared to the target. Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source. Only one "target" type
+// should be set.
type ContainerResourceMetricSourceApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
- Container *string `json:"container,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // container is the name of the container in the pods of the scaling target
+ Container *string `json:"container,omitempty"`
}
// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go
index 46bd2bac..1c964851 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/containerresourcemetricstatus.go
@@ -24,10 +24,19 @@ import (
// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use
// with apply.
+//
+// ContainerResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing a single container in each pod in the
+// current scale target (e.g. CPU or memory). Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
type ContainerResourceMetricStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
- Container *string `json:"container,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // current contains the current value for the given metric
+ Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
+ // container is the name of the container in the pods of the scaling target
+ Container *string `json:"container,omitempty"`
}
// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go
index 645f0985..062065b9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/crossversionobjectreference.go
@@ -20,9 +20,14 @@ package v2
// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
// with apply.
+//
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReferenceApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name *string `json:"name,omitempty"`
+ // apiVersion is the API version of the referent
APIVersion *string `json:"apiVersion,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go
index a9c45b31..be289fd2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricsource.go
@@ -20,9 +20,15 @@ package v2
// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use
// with apply.
+//
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
type ExternalMetricSourceApplyConfiguration struct {
+ // metric identifies the target metric by name and selector
Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
}
// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go
index 4280086f..000b0521 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/externalmetricstatus.go
@@ -20,8 +20,13 @@ package v2
// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use
// with apply.
+//
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
type ExternalMetricStatusApplyConfiguration struct {
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
index a2a3a5a7..e001653f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscaler.go
@@ -29,11 +29,20 @@ import (
// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
// with apply.
+//
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
type HorizontalPodAutoscalerApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata is the standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the specification for the behaviour of the autoscaler.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current information about the autoscaler.
+ Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
}
// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
@@ -47,6 +56,27 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
return b
}
+// ExtractHorizontalPodAutoscalerFrom extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
+// ExtractHorizontalPodAutoscalerFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ b := &HorizontalPodAutoscalerApplyConfiguration{}
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(horizontalPodAutoscaler.Name)
+ b.WithNamespace(horizontalPodAutoscaler.Namespace)
+
+ b.WithKind("HorizontalPodAutoscaler")
+ b.WithAPIVersion("autoscaling/v2")
+ return b, nil
+}
+
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +87,16 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "")
}
-// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractHorizontalPodAutoscalerStatus extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the status subresource.
func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "status")
}
-func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(horizontalPodAutoscaler.Name)
- b.WithNamespace(horizontalPodAutoscaler.Namespace)
-
- b.WithKind("HorizontalPodAutoscaler")
- b.WithAPIVersion("autoscaling/v2")
- return b, nil
-}
func (b HorizontalPodAutoscalerApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go
index 05750cc2..faf70021 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerbehavior.go
@@ -20,8 +20,20 @@ package v2
// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use
// with apply.
+//
+// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target
+// in both Up and Down directions (scaleUp and scaleDown fields respectively).
type HorizontalPodAutoscalerBehaviorApplyConfiguration struct {
- ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"`
+ // scaleUp is scaling policy for scaling Up.
+ // If not set, the default value is the higher of:
+ // * increase no more than 4 pods per 60 seconds
+ // * double the number of pods per 60 seconds
+ // No stabilization is used.
+ ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"`
+ // scaleDown is scaling policy for scaling Down.
+ // If not set, the default value is to allow to scale down to minReplicas pods, with a
+ // 300 second stabilization window (i.e., the highest recommendation for
+ // the last 300sec is used).
ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
index 25ea3903..75bb4985 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalercondition.go
@@ -26,12 +26,22 @@ import (
// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
// with apply.
+//
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
type HorizontalPodAutoscalerConditionApplyConfiguration struct {
- Type *autoscalingv2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // type describes the current condition
+ Type *autoscalingv2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
+ // status is the status of the condition (True, False, Unknown)
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // lastTransitionTime is the last time the condition transitioned from
+ // one status to another
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // reason is the reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // message is a human-readable explanation containing details about
+ // the transition
+ Message *string `json:"message,omitempty"`
}
// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go
index e34ababc..5b95ec54 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerspec.go
@@ -20,12 +20,34 @@ package v2
// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
// with apply.
+//
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpecApplyConfiguration struct {
- ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
- MinReplicas *int32 `json:"minReplicas,omitempty"`
- MaxReplicas *int32 `json:"maxReplicas,omitempty"`
- Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"`
- Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"`
+ // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+ // should be collected, as well as to actually change the replica count.
+ ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
+ // minReplicas is the lower limit for the number of replicas to which the autoscaler
+ // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
+ // alpha feature gate HPAScaleToZero is enabled and at least one Object or External
+ // metric is configured. Scaling is active as long as at least one metric value is
+ // available.
+ MinReplicas *int32 `json:"minReplicas,omitempty"`
+ // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+ // It cannot be less that minReplicas.
+ MaxReplicas *int32 `json:"maxReplicas,omitempty"`
+ // metrics contains the specifications for which to use to calculate the
+ // desired replica count (the maximum replica count across all metrics will
+ // be used). The desired replica count is calculated multiplying the
+ // ratio between the target value and the current value by the current
+ // number of pods. Ergo, metrics used must decrease as the pod count is
+ // increased, and vice-versa. See the individual metric source types for
+ // more information about how each type of metric must respond.
+ // If not set, the default metric will be set to 80% average CPU utilization.
+ Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"`
+ // behavior configures the scaling behavior of the target
+ // in both Up and Down directions (scaleUp and scaleDown fields respectively).
+ // If not set, the default HPAScalingRules for scale up and scale down are used.
+ Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"`
}
// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go
index f1a2c3f4..c65115bc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/horizontalpodautoscalerstatus.go
@@ -24,13 +24,25 @@ import (
// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
// with apply.
+//
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
- CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"`
- Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
+ // observedGeneration is the most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
+ // currentReplicas is current number of replicas of pods managed by this autoscaler,
+ // as last seen by the autoscaler.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+ // as last calculated by the autoscaler.
+ DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
+ // currentMetrics is the last read state of the metrics used by this autoscaler.
+ CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"`
+ // conditions is the set of conditions required for this autoscaler to scale its target,
+ // and indicates whether or not those conditions are met.
+ Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
}
// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
index f89185c5..49a22db7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingpolicy.go
@@ -24,10 +24,17 @@ import (
// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use
// with apply.
+//
+// HPAScalingPolicy is a single policy which must hold true for a specified past interval.
type HPAScalingPolicyApplyConfiguration struct {
- Type *autoscalingv2.HPAScalingPolicyType `json:"type,omitempty"`
- Value *int32 `json:"value,omitempty"`
- PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
+ // type is used to specify the scaling policy.
+ Type *autoscalingv2.HPAScalingPolicyType `json:"type,omitempty"`
+ // value contains the amount of change which is permitted by the policy.
+ // It must be greater than zero
+ Value *int32 `json:"value,omitempty"`
+ // periodSeconds specifies the window of time for which the policy should hold true.
+ // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
+ PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
}
// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
index 6fd0f25c..cebb11aa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/hpascalingrules.go
@@ -25,11 +25,47 @@ import (
// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
// with apply.
+//
+// HPAScalingRules configures the scaling behavior for one direction via
+// scaling Policy Rules and a configurable metric tolerance.
+//
+// Scaling Policy Rules are applied after calculating DesiredReplicas from metrics for the HPA.
+// They can limit the scaling velocity by specifying scaling policies.
+// They can prevent flapping by specifying the stabilization window, so that the
+// number of replicas is not set instantly, instead, the safest value from the stabilization
+// window is chosen.
+//
+// The tolerance is applied to the metric values and prevents scaling too
+// eagerly for small metric variations. (Note that setting a tolerance requires
+// the beta HPAConfigurableTolerance feature gate to be enabled.)
type HPAScalingRulesApplyConfiguration struct {
- StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
- SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
- Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
- Tolerance *resource.Quantity `json:"tolerance,omitempty"`
+ // stabilizationWindowSeconds is the number of seconds for which past recommendations should be
+ // considered while scaling up or scaling down.
+ // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour).
+ // If not set, use the default values:
+ // - For scale up: 0 (i.e. no stabilization is done).
+ // - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
+ StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
+ // selectPolicy is used to specify which policy should be used.
+ // If not set, the default value Max is used.
+ SelectPolicy *autoscalingv2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
+ // policies is a list of potential scaling polices which can be used during scaling.
+ // If not set, use the default values:
+ // - For scale up: allow doubling the number of pods, or an absolute change of 4 pods in a 15s window.
+ // - For scale down: allow all pods to be removed in a 15s window.
+ Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
+ // tolerance is the tolerance on the ratio between the current and desired
+ // metric value under which no updates are made to the desired number of
+ // replicas (e.g. 0.01 for 1%). Must be greater than or equal to zero. If not
+ // set, the default cluster-wide tolerance is applied (by default 10%).
+ //
+ // For example, if autoscaling is configured with a memory consumption target of 100Mi,
+ // and scale-down and scale-up tolerances of 5% and 1% respectively, scaling will be
+ // triggered when the actual consumption falls below 95Mi or exceeds 101Mi.
+ //
+ // This is an beta field and requires the HPAConfigurableTolerance feature
+ // gate to be enabled.
+ Tolerance *resource.Quantity `json:"tolerance,omitempty"`
}
// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go
index 2f99f7d0..caa7d594 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricidentifier.go
@@ -24,8 +24,14 @@ import (
// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use
// with apply.
+//
+// MetricIdentifier defines the name and optionally selector for a metric
type MetricIdentifierApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // name is the name of the given metric
+ Name *string `json:"name,omitempty"`
+ // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+ // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+ // When unset, just the metricName will be used to gather metrics.
Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
index 282b84a4..5e26d8d5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricspec.go
@@ -24,13 +24,38 @@ import (
// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
// with apply.
+//
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
type MetricSpecApplyConfiguration struct {
- Type *autoscalingv2.MetricSourceType `json:"type,omitempty"`
- Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
- Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
- Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
+ // type is the type of metric source. It should be one of "ContainerResource", "External",
+ // "Object", "Pods" or "Resource", each mapping to a matching field in the object.
+ Type *autoscalingv2.MetricSourceType `json:"type,omitempty"`
+ // object refers to a metric describing a single kubernetes object
+ // (for example, hits-per-second on an Ingress object).
+ Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
+ // pods refers to a metric describing each pod in the current scale target
+ // (for example, transactions-processed-per-second). The values will be
+ // averaged together before being compared to the target value.
+ Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
+ // resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
+ Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
+ // containerResource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing a single container in
+ // each pod of the current scale target (e.g. CPU or memory). Such metrics are
+ // built in to Kubernetes, and have special scaling options on top of those
+ // available to normal per-pod metrics using the "pods" source.
ContainerResource *ContainerResourceMetricSourceApplyConfiguration `json:"containerResource,omitempty"`
- External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"`
+ // external refers to a global metric that is not associated
+ // with any Kubernetes object. It allows autoscaling based on information
+ // coming from components running outside of cluster
+ // (for example length of queue in cloud messaging service, or
+ // QPS from loadbalancer running outside of cluster).
+ External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"`
}
// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
index f1204824..7c1a2cb0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricstatus.go
@@ -24,13 +24,37 @@ import (
// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
// with apply.
+//
+// MetricStatus describes the last-read state of a single metric.
type MetricStatusApplyConfiguration struct {
- Type *autoscalingv2.MetricSourceType `json:"type,omitempty"`
- Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
- Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
- Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
+ // type is the type of metric source. It will be one of "ContainerResource", "External",
+ // "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
+ Type *autoscalingv2.MetricSourceType `json:"type,omitempty"`
+ // object refers to a metric describing a single kubernetes object
+ // (for example, hits-per-second on an Ingress object).
+ Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
+ // pods refers to a metric describing each pod in the current scale target
+ // (for example, transactions-processed-per-second). The values will be
+ // averaged together before being compared to the target value.
+ Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
+ // resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
+ Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
+ // container resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing a single container in each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
ContainerResource *ContainerResourceMetricStatusApplyConfiguration `json:"containerResource,omitempty"`
- External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"`
+ // external refers to a global metric that is not associated
+ // with any Kubernetes object. It allows autoscaling based on information
+ // coming from components running outside of cluster
+ // (for example length of queue in cloud messaging service, or
+ // QPS from loadbalancer running outside of cluster).
+ External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"`
}
// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
index 13d2e936..cf1304a5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metrictarget.go
@@ -25,11 +25,21 @@ import (
// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use
// with apply.
+//
+// MetricTarget defines the target value, average value, or average utilization of a specific metric
type MetricTargetApplyConfiguration struct {
- Type *autoscalingv2.MetricTargetType `json:"type,omitempty"`
- Value *resource.Quantity `json:"value,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
- AverageUtilization *int32 `json:"averageUtilization,omitempty"`
+ // type represents whether the metric type is Utilization, Value, or AverageValue
+ Type *autoscalingv2.MetricTargetType `json:"type,omitempty"`
+ // value is the target value of the metric (as a quantity).
+ Value *resource.Quantity `json:"value,omitempty"`
+ // averageValue is the target value of the average of the
+ // metric across all relevant pods (as a quantity)
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ // averageUtilization is the target value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods.
+ // Currently only valid for Resource metric source type
+ AverageUtilization *int32 `json:"averageUtilization,omitempty"`
}
// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go
index 59732548..9cd743d3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/metricvaluestatus.go
@@ -24,10 +24,18 @@ import (
// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use
// with apply.
+//
+// MetricValueStatus holds the current value for a metric
type MetricValueStatusApplyConfiguration struct {
- Value *resource.Quantity `json:"value,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
- AverageUtilization *int32 `json:"averageUtilization,omitempty"`
+ // value is the current value of the metric (as a quantity).
+ Value *resource.Quantity `json:"value,omitempty"`
+ // averageValue is the current value of the average of the
+ // metric across all relevant pods (as a quantity)
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ // currentAverageUtilization is the current value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods.
+ AverageUtilization *int32 `json:"averageUtilization,omitempty"`
}
// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go
index 2391fa5c..f2004b14 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricsource.go
@@ -20,10 +20,16 @@ package v2
// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use
// with apply.
+//
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricSourceApplyConfiguration struct {
+ // describedObject specifies the descriptions of a object,such as kind,name apiVersion
DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
}
// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go
index 9ffd0c18..ec5da782 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/objectmetricstatus.go
@@ -20,9 +20,15 @@ package v2
// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use
// with apply.
+//
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricStatusApplyConfiguration struct {
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
- Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // current contains the current value for the given metric
+ Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
+ // DescribedObject specifies the descriptions of a object,such as kind,name apiVersion
DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go
index 28a35a2a..2dd3de71 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricsource.go
@@ -20,9 +20,16 @@ package v2
// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use
// with apply.
+//
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
type PodsMetricSourceApplyConfiguration struct {
+ // metric identifies the target metric by name and selector
Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
}
// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go
index 4614282c..fcd623b7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/podsmetricstatus.go
@@ -20,8 +20,13 @@ package v2
// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use
// with apply.
+//
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
type PodsMetricStatusApplyConfiguration struct {
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go
index ffc9042b..cfa2ec53 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricsource.go
@@ -24,8 +24,18 @@ import (
// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use
// with apply.
+//
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). The values will be averaged
+// together before being compared to the target. Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source. Only one "target" type
+// should be set.
type ResourceMetricSourceApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // target specifies the target value for the given metric
Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go
index 0fdbfcb5..bbff32f8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2/resourcemetricstatus.go
@@ -24,8 +24,16 @@ import (
// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use
// with apply.
+//
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
type ResourceMetricStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go
index f41c5af1..392e491c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricsource.go
@@ -25,11 +25,27 @@ import (
// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use
// with apply.
+//
+// ContainerResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). The values will be averaged
+// together before being compared to the target. Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source. Only one "target" type
+// should be set.
type ContainerResourceMetricSourceApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty"`
- TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
- Container *string `json:"container,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // targetAverageUtilization is the target value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods.
+ TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty"`
+ // targetAverageValue is the target value of the average of the
+ // resource metric across all relevant pods, as a raw value (instead of as
+ // a percentage of the request), similar to the "pods" metric source type.
+ TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
+ // container is the name of the container in the pods of the scaling target
+ Container *string `json:"container,omitempty"`
}
// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go
index 4cd56eea..e69d2bad 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/containerresourcemetricstatus.go
@@ -25,11 +25,28 @@ import (
// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use
// with apply.
+//
+// ContainerResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing a single container in each pod in the
+// current scale target (e.g. CPU or memory). Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
type ContainerResourceMetricStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty"`
- CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
- Container *string `json:"container,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // currentAverageUtilization is the current value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods. It will only be
+ // present if `targetAverageValue` was set in the corresponding metric
+ // specification.
+ CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty"`
+ // currentAverageValue is the current value of the average of the
+ // resource metric across all relevant pods, as a raw value (instead of as
+ // a percentage of the request), similar to the "pods" metric source type.
+ // It will always be set, regardless of the corresponding metric specification.
+ CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
+ // container is the name of the container in the pods of the scaling target
+ Container *string `json:"container,omitempty"`
}
// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go
index f0326161..40c8e450 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/crossversionobjectreference.go
@@ -20,9 +20,14 @@ package v2beta1
// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
// with apply.
+//
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReferenceApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // Name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name *string `json:"name,omitempty"`
+ // API version of the referent
APIVersion *string `json:"apiVersion,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go
index 8dce4529..3b072172 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricsource.go
@@ -25,11 +25,23 @@ import (
// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use
// with apply.
+//
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
+// Exactly one "target" type should be set.
type ExternalMetricSourceApplyConfiguration struct {
- MetricName *string `json:"metricName,omitempty"`
- MetricSelector *v1.LabelSelectorApplyConfiguration `json:"metricSelector,omitempty"`
- TargetValue *resource.Quantity `json:"targetValue,omitempty"`
- TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
+ // metricName is the name of the metric in question.
+ MetricName *string `json:"metricName,omitempty"`
+ // metricSelector is used to identify a specific time series
+ // within a given metric.
+ MetricSelector *v1.LabelSelectorApplyConfiguration `json:"metricSelector,omitempty"`
+ // targetValue is the target value of the metric (as a quantity).
+ // Mutually exclusive with TargetAverageValue.
+ TargetValue *resource.Quantity `json:"targetValue,omitempty"`
+ // targetAverageValue is the target per-pod value of global metric (as a quantity).
+ // Mutually exclusive with TargetValue.
+ TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
}
// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go
index 4034d7e5..c7e3629a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/externalmetricstatus.go
@@ -25,11 +25,20 @@ import (
// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use
// with apply.
+//
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
type ExternalMetricStatusApplyConfiguration struct {
- MetricName *string `json:"metricName,omitempty"`
- MetricSelector *v1.LabelSelectorApplyConfiguration `json:"metricSelector,omitempty"`
- CurrentValue *resource.Quantity `json:"currentValue,omitempty"`
- CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
+ // metricName is the name of a metric used for autoscaling in
+ // metric system.
+ MetricName *string `json:"metricName,omitempty"`
+ // metricSelector is used to identify a specific time series
+ // within a given metric.
+ MetricSelector *v1.LabelSelectorApplyConfiguration `json:"metricSelector,omitempty"`
+ // currentValue is the current value of the metric (as a quantity)
+ CurrentValue *resource.Quantity `json:"currentValue,omitempty"`
+ // currentAverageValue is the current value of metric averaged over autoscaled pods.
+ CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
}
// ExternalMetricStatusApplyConfiguration constructs a declarative configuration of the ExternalMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
index e6ac8c95..8a89398f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -29,11 +29,20 @@ import (
// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
// with apply.
+//
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
type HorizontalPodAutoscalerApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata is the standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the specification for the behaviour of the autoscaler.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current information about the autoscaler.
+ Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
}
// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
@@ -47,6 +56,27 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
return b
}
+// ExtractHorizontalPodAutoscalerFrom extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
+// ExtractHorizontalPodAutoscalerFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ b := &HorizontalPodAutoscalerApplyConfiguration{}
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(horizontalPodAutoscaler.Name)
+ b.WithNamespace(horizontalPodAutoscaler.Namespace)
+
+ b.WithKind("HorizontalPodAutoscaler")
+ b.WithAPIVersion("autoscaling/v2beta1")
+ return b, nil
+}
+
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +87,16 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "")
}
-// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractHorizontalPodAutoscalerStatus extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the status subresource.
func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "status")
}
-func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(horizontalPodAutoscaler.Name)
- b.WithNamespace(horizontalPodAutoscaler.Namespace)
-
- b.WithKind("HorizontalPodAutoscaler")
- b.WithAPIVersion("autoscaling/v2beta1")
- return b, nil
-}
func (b HorizontalPodAutoscalerApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
index 445cd55a..302df35a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalercondition.go
@@ -26,12 +26,22 @@ import (
// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
// with apply.
+//
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
type HorizontalPodAutoscalerConditionApplyConfiguration struct {
- Type *autoscalingv2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // type describes the current condition
+ Type *autoscalingv2beta1.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
+ // status is the status of the condition (True, False, Unknown)
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // lastTransitionTime is the last time the condition transitioned from
+ // one status to another
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // reason is the reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // message is a human-readable explanation containing details about
+ // the transition
+ Message *string `json:"message,omitempty"`
}
// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go
index 6f111cea..bbe6a8fe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerspec.go
@@ -20,11 +20,29 @@ package v2beta1
// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
// with apply.
+//
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpecApplyConfiguration struct {
+ // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+ // should be collected, as well as to actually change the replica count.
ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
- MinReplicas *int32 `json:"minReplicas,omitempty"`
- MaxReplicas *int32 `json:"maxReplicas,omitempty"`
- Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"`
+ // minReplicas is the lower limit for the number of replicas to which the autoscaler
+ // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
+ // alpha feature gate HPAScaleToZero is enabled and at least one Object or External
+ // metric is configured. Scaling is active as long as at least one metric value is
+ // available.
+ MinReplicas *int32 `json:"minReplicas,omitempty"`
+ // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+ // It cannot be less that minReplicas.
+ MaxReplicas *int32 `json:"maxReplicas,omitempty"`
+ // metrics contains the specifications for which to use to calculate the
+ // desired replica count (the maximum replica count across all metrics will
+ // be used). The desired replica count is calculated multiplying the
+ // ratio between the target value and the current value by the current
+ // number of pods. Ergo, metrics used must decrease as the pod count is
+ // increased, and vice-versa. See the individual metric source types for
+ // more information about how each type of metric must respond.
+ Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"`
}
// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go
index 391b5772..831ad5cf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscalerstatus.go
@@ -24,13 +24,25 @@ import (
// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
// with apply.
+//
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
- CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"`
- Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
+ // observedGeneration is the most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
+ // currentReplicas is current number of replicas of pods managed by this autoscaler,
+ // as last seen by the autoscaler.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+ // as last calculated by the autoscaler.
+ DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
+ // currentMetrics is the last read state of the metrics used by this autoscaler.
+ CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"`
+ // conditions is the set of conditions required for this autoscaler to scale its target,
+ // and indicates whether or not those conditions are met.
+ Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
}
// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
index 3a5faa3b..d48043c3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricspec.go
@@ -24,13 +24,38 @@ import (
// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
// with apply.
+//
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
type MetricSpecApplyConfiguration struct {
- Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"`
- Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
- Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
- Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
+ // type is the type of metric source. It should be one of "ContainerResource",
+ // "External", "Object", "Pods" or "Resource", each mapping to a matching field in the object.
+ Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"`
+ // object refers to a metric describing a single kubernetes object
+ // (for example, hits-per-second on an Ingress object).
+ Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
+ // pods refers to a metric describing each pod in the current scale target
+ // (for example, transactions-processed-per-second). The values will be
+ // averaged together before being compared to the target value.
+ Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
+ // resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
+ Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
+ // container resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing a single container in
+ // each pod of the current scale target (e.g. CPU or memory). Such metrics are
+ // built in to Kubernetes, and have special scaling options on top of those
+ // available to normal per-pod metrics using the "pods" source.
ContainerResource *ContainerResourceMetricSourceApplyConfiguration `json:"containerResource,omitempty"`
- External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"`
+ // external refers to a global metric that is not associated
+ // with any Kubernetes object. It allows autoscaling based on information
+ // coming from components running outside of cluster
+ // (for example length of queue in cloud messaging service, or
+ // QPS from loadbalancer running outside of cluster).
+ External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"`
}
// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
index f281e182..7ec38582 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/metricstatus.go
@@ -24,13 +24,37 @@ import (
// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
// with apply.
+//
+// MetricStatus describes the last-read state of a single metric.
type MetricStatusApplyConfiguration struct {
- Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"`
- Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
- Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
- Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
+ // type is the type of metric source. It will be one of "ContainerResource",
+ // "External", "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
+ Type *autoscalingv2beta1.MetricSourceType `json:"type,omitempty"`
+ // object refers to a metric describing a single kubernetes object
+ // (for example, hits-per-second on an Ingress object).
+ Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
+ // pods refers to a metric describing each pod in the current scale target
+ // (for example, transactions-processed-per-second). The values will be
+ // averaged together before being compared to the target value.
+ Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
+ // resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
+ Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
+ // container resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing a single container in each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
ContainerResource *ContainerResourceMetricStatusApplyConfiguration `json:"containerResource,omitempty"`
- External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"`
+ // external refers to a global metric that is not associated
+ // with any Kubernetes object. It allows autoscaling based on information
+ // coming from components running outside of cluster
+ // (for example length of queue in cloud messaging service, or
+ // QPS from loadbalancer running outside of cluster).
+ External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"`
}
// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go
index a9e2eead..103280e2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricsource.go
@@ -25,12 +25,23 @@ import (
// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use
// with apply.
+//
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricSourceApplyConfiguration struct {
- Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"`
- MetricName *string `json:"metricName,omitempty"`
- TargetValue *resource.Quantity `json:"targetValue,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ // target is the described Kubernetes object.
+ Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"`
+ // metricName is the name of the metric in question.
+ MetricName *string `json:"metricName,omitempty"`
+ // targetValue is the target value of the metric (as a quantity).
+ TargetValue *resource.Quantity `json:"targetValue,omitempty"`
+ // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+ // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+ // When unset, just the metricName will be used to gather metrics.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // averageValue is the target value of the average of the
+ // metric across all relevant pods (as a quantity)
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
}
// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go
index 4d3be8df..b58d0e28 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/objectmetricstatus.go
@@ -25,12 +25,23 @@ import (
// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use
// with apply.
+//
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricStatusApplyConfiguration struct {
- Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"`
- MetricName *string `json:"metricName,omitempty"`
- CurrentValue *resource.Quantity `json:"currentValue,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ // target is the described Kubernetes object.
+ Target *CrossVersionObjectReferenceApplyConfiguration `json:"target,omitempty"`
+ // metricName is the name of the metric in question.
+ MetricName *string `json:"metricName,omitempty"`
+ // currentValue is the current value of the metric (as a quantity).
+ CurrentValue *resource.Quantity `json:"currentValue,omitempty"`
+ // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+ // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+ // When unset, just the metricName will be used to gather metrics.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // averageValue is the current value of the average of the
+ // metric across all relevant pods (as a quantity)
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
}
// ObjectMetricStatusApplyConfiguration constructs a declarative configuration of the ObjectMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go
index cfcd752e..47ceaee5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricsource.go
@@ -25,10 +25,21 @@ import (
// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use
// with apply.
+//
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
type PodsMetricSourceApplyConfiguration struct {
- MetricName *string `json:"metricName,omitempty"`
- TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // metricName is the name of the metric in question
+ MetricName *string `json:"metricName,omitempty"`
+ // targetAverageValue is the target value of the average of the
+ // metric across all relevant pods (as a quantity)
+ TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
+ // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+ // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
+ // When unset, just the metricName will be used to gather metrics.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
}
// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go
index f7a7777f..140f0a22 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/podsmetricstatus.go
@@ -25,10 +25,19 @@ import (
// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use
// with apply.
+//
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
type PodsMetricStatusApplyConfiguration struct {
- MetricName *string `json:"metricName,omitempty"`
- CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // metricName is the name of the metric in question
+ MetricName *string `json:"metricName,omitempty"`
+ // currentAverageValue is the current value of the average of the
+ // metric across all relevant pods (as a quantity)
+ CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
+ // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+ // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+ // When unset, just the metricName will be used to gather metrics.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
}
// PodsMetricStatusApplyConfiguration constructs a declarative configuration of the PodsMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go
index ad97d83c..0c3c849e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricsource.go
@@ -25,10 +25,25 @@ import (
// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use
// with apply.
+//
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). The values will be averaged
+// together before being compared to the target. Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source. Only one "target" type
+// should be set.
type ResourceMetricSourceApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty"`
- TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // targetAverageUtilization is the target value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods.
+ TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty"`
+ // targetAverageValue is the target value of the average of the
+ // resource metric across all relevant pods, as a raw value (instead of as
+ // a percentage of the request), similar to the "pods" metric source type.
+ TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty"`
}
// ResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ResourceMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go
index 78fbeaad..61db2eae 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/resourcemetricstatus.go
@@ -25,10 +25,26 @@ import (
// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use
// with apply.
+//
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
type ResourceMetricStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty"`
- CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // currentAverageUtilization is the current value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods. It will only be
+ // present if `targetAverageValue` was set in the corresponding metric
+ // specification.
+ CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty"`
+ // currentAverageValue is the current value of the average of the
+ // resource metric across all relevant pods, as a raw value (instead of as
+ // a percentage of the request), similar to the "pods" metric source type.
+ // It will always be set, regardless of the corresponding metric specification.
+ CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty"`
}
// ResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ResourceMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go
index 1050165e..babb8e8b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricsource.go
@@ -24,10 +24,21 @@ import (
// ContainerResourceMetricSourceApplyConfiguration represents a declarative configuration of the ContainerResourceMetricSource type for use
// with apply.
+//
+// ContainerResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). The values will be averaged
+// together before being compared to the target. Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source. Only one "target" type
+// should be set.
type ContainerResourceMetricSourceApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
- Container *string `json:"container,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // container is the name of the container in the pods of the scaling target
+ Container *string `json:"container,omitempty"`
}
// ContainerResourceMetricSourceApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go
index 708f68bc..771e9fdf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/containerresourcemetricstatus.go
@@ -24,10 +24,19 @@ import (
// ContainerResourceMetricStatusApplyConfiguration represents a declarative configuration of the ContainerResourceMetricStatus type for use
// with apply.
+//
+// ContainerResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing a single container in each pod in the
+// current scale target (e.g. CPU or memory). Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
type ContainerResourceMetricStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
- Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
- Container *string `json:"container,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // current contains the current value for the given metric
+ Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
+ // container is the name of the container in the pods of the scaling target
+ Container *string `json:"container,omitempty"`
}
// ContainerResourceMetricStatusApplyConfiguration constructs a declarative configuration of the ContainerResourceMetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go
index c281084b..df6708a1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/crossversionobjectreference.go
@@ -20,9 +20,14 @@ package v2beta2
// CrossVersionObjectReferenceApplyConfiguration represents a declarative configuration of the CrossVersionObjectReference type for use
// with apply.
+//
+// CrossVersionObjectReference contains enough information to let you identify the referred resource.
type CrossVersionObjectReferenceApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name *string `json:"name,omitempty"`
+ // apiVersion is the API version of the referent
APIVersion *string `json:"apiVersion,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go
index d34ca114..6baca7f0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricsource.go
@@ -20,9 +20,15 @@ package v2beta2
// ExternalMetricSourceApplyConfiguration represents a declarative configuration of the ExternalMetricSource type for use
// with apply.
+//
+// ExternalMetricSource indicates how to scale on a metric not associated with
+// any Kubernetes object (for example length of queue in cloud
+// messaging service, or QPS from loadbalancer running outside of cluster).
type ExternalMetricSourceApplyConfiguration struct {
+ // metric identifies the target metric by name and selector
Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
}
// ExternalMetricSourceApplyConfiguration constructs a declarative configuration of the ExternalMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go
index be29e607..725f5635 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/externalmetricstatus.go
@@ -20,8 +20,13 @@ package v2beta2
// ExternalMetricStatusApplyConfiguration represents a declarative configuration of the ExternalMetricStatus type for use
// with apply.
+//
+// ExternalMetricStatus indicates the current value of a global metric
+// not associated with any Kubernetes object.
type ExternalMetricStatusApplyConfiguration struct {
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
index 93cdd789..9a4588bc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -29,11 +29,20 @@ import (
// HorizontalPodAutoscalerApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscaler type for use
// with apply.
+//
+// HorizontalPodAutoscaler is the configuration for a horizontal pod
+// autoscaler, which automatically manages the replica count of any resource
+// implementing the scale subresource based on the metrics specified.
type HorizontalPodAutoscalerApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata is the standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the specification for the behaviour of the autoscaler.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *HorizontalPodAutoscalerSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current information about the autoscaler.
+ Status *HorizontalPodAutoscalerStatusApplyConfiguration `json:"status,omitempty"`
}
// HorizontalPodAutoscaler constructs a declarative configuration of the HorizontalPodAutoscaler type for use with
@@ -47,6 +56,27 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
return b
}
+// ExtractHorizontalPodAutoscalerFrom extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
+// ExtractHorizontalPodAutoscalerFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ b := &HorizontalPodAutoscalerApplyConfiguration{}
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(horizontalPodAutoscaler.Name)
+ b.WithNamespace(horizontalPodAutoscaler.Namespace)
+
+ b.WithKind("HorizontalPodAutoscaler")
+ b.WithAPIVersion("autoscaling/v2beta2")
+ return b, nil
+}
+
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +87,16 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "")
}
-// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractHorizontalPodAutoscalerStatus extracts the applied configuration owned by fieldManager from
+// horizontalPodAutoscaler for the status subresource.
func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+ return ExtractHorizontalPodAutoscalerFrom(horizontalPodAutoscaler, fieldManager, "status")
}
-func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
- b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(horizontalPodAutoscaler.Name)
- b.WithNamespace(horizontalPodAutoscaler.Namespace)
-
- b.WithKind("HorizontalPodAutoscaler")
- b.WithAPIVersion("autoscaling/v2beta2")
- return b, nil
-}
func (b HorizontalPodAutoscalerApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go
index e9b1a9fb..355e15ff 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerbehavior.go
@@ -20,8 +20,20 @@ package v2beta2
// HorizontalPodAutoscalerBehaviorApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerBehavior type for use
// with apply.
+//
+// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target
+// in both Up and Down directions (scaleUp and scaleDown fields respectively).
type HorizontalPodAutoscalerBehaviorApplyConfiguration struct {
- ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"`
+ // scaleUp is scaling policy for scaling Up.
+ // If not set, the default value is the higher of:
+ // * increase no more than 4 pods per 60 seconds
+ // * double the number of pods per 60 seconds
+ // No stabilization is used.
+ ScaleUp *HPAScalingRulesApplyConfiguration `json:"scaleUp,omitempty"`
+ // scaleDown is scaling policy for scaling Down.
+ // If not set, the default value is to allow to scale down to minReplicas pods, with a
+ // 300 second stabilization window (i.e., the highest recommendation for
+ // the last 300sec is used).
ScaleDown *HPAScalingRulesApplyConfiguration `json:"scaleDown,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
index f8886912..a23e18b1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalercondition.go
@@ -26,12 +26,22 @@ import (
// HorizontalPodAutoscalerConditionApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerCondition type for use
// with apply.
+//
+// HorizontalPodAutoscalerCondition describes the state of
+// a HorizontalPodAutoscaler at a certain point.
type HorizontalPodAutoscalerConditionApplyConfiguration struct {
- Type *autoscalingv2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // type describes the current condition
+ Type *autoscalingv2beta2.HorizontalPodAutoscalerConditionType `json:"type,omitempty"`
+ // status is the status of the condition (True, False, Unknown)
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // lastTransitionTime is the last time the condition transitioned from
+ // one status to another
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // reason is the reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // message is a human-readable explanation containing details about
+ // the transition
+ Message *string `json:"message,omitempty"`
}
// HorizontalPodAutoscalerConditionApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go
index 9629e4bd..50ee3fae 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerspec.go
@@ -20,12 +20,34 @@ package v2beta2
// HorizontalPodAutoscalerSpecApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerSpec type for use
// with apply.
+//
+// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.
type HorizontalPodAutoscalerSpecApplyConfiguration struct {
- ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
- MinReplicas *int32 `json:"minReplicas,omitempty"`
- MaxReplicas *int32 `json:"maxReplicas,omitempty"`
- Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"`
- Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"`
+ // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics
+ // should be collected, as well as to actually change the replica count.
+ ScaleTargetRef *CrossVersionObjectReferenceApplyConfiguration `json:"scaleTargetRef,omitempty"`
+ // minReplicas is the lower limit for the number of replicas to which the autoscaler
+ // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the
+ // alpha feature gate HPAScaleToZero is enabled and at least one Object or External
+ // metric is configured. Scaling is active as long as at least one metric value is
+ // available.
+ MinReplicas *int32 `json:"minReplicas,omitempty"`
+ // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up.
+ // It cannot be less that minReplicas.
+ MaxReplicas *int32 `json:"maxReplicas,omitempty"`
+ // metrics contains the specifications for which to use to calculate the
+ // desired replica count (the maximum replica count across all metrics will
+ // be used). The desired replica count is calculated multiplying the
+ // ratio between the target value and the current value by the current
+ // number of pods. Ergo, metrics used must decrease as the pod count is
+ // increased, and vice-versa. See the individual metric source types for
+ // more information about how each type of metric must respond.
+ // If not set, the default metric will be set to 80% average CPU utilization.
+ Metrics []MetricSpecApplyConfiguration `json:"metrics,omitempty"`
+ // behavior configures the scaling behavior of the target
+ // in both Up and Down directions (scaleUp and scaleDown fields respectively).
+ // If not set, the default HPAScalingRules for scale up and scale down are used.
+ Behavior *HorizontalPodAutoscalerBehaviorApplyConfiguration `json:"behavior,omitempty"`
}
// HorizontalPodAutoscalerSpecApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go
index 1eee6450..ffbdc1c3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscalerstatus.go
@@ -24,13 +24,25 @@ import (
// HorizontalPodAutoscalerStatusApplyConfiguration represents a declarative configuration of the HorizontalPodAutoscalerStatus type for use
// with apply.
+//
+// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
type HorizontalPodAutoscalerStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
- CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
- DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
- CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"`
- Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
+ // observedGeneration is the most recent generation observed by this autoscaler.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods,
+ // used by the autoscaler to control how often the number of pods is changed.
+ LastScaleTime *v1.Time `json:"lastScaleTime,omitempty"`
+ // currentReplicas is current number of replicas of pods managed by this autoscaler,
+ // as last seen by the autoscaler.
+ CurrentReplicas *int32 `json:"currentReplicas,omitempty"`
+ // desiredReplicas is the desired number of replicas of pods managed by this autoscaler,
+ // as last calculated by the autoscaler.
+ DesiredReplicas *int32 `json:"desiredReplicas,omitempty"`
+ // currentMetrics is the last read state of the metrics used by this autoscaler.
+ CurrentMetrics []MetricStatusApplyConfiguration `json:"currentMetrics,omitempty"`
+ // conditions is the set of conditions required for this autoscaler to scale its target,
+ // and indicates whether or not those conditions are met.
+ Conditions []HorizontalPodAutoscalerConditionApplyConfiguration `json:"conditions,omitempty"`
}
// HorizontalPodAutoscalerStatusApplyConfiguration constructs a declarative configuration of the HorizontalPodAutoscalerStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
index 2bbbbdde..ca07910b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingpolicy.go
@@ -24,10 +24,17 @@ import (
// HPAScalingPolicyApplyConfiguration represents a declarative configuration of the HPAScalingPolicy type for use
// with apply.
+//
+// HPAScalingPolicy is a single policy which must hold true for a specified past interval.
type HPAScalingPolicyApplyConfiguration struct {
- Type *autoscalingv2beta2.HPAScalingPolicyType `json:"type,omitempty"`
- Value *int32 `json:"value,omitempty"`
- PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
+ // type is used to specify the scaling policy.
+ Type *autoscalingv2beta2.HPAScalingPolicyType `json:"type,omitempty"`
+ // value contains the amount of change which is permitted by the policy.
+ // It must be greater than zero
+ Value *int32 `json:"value,omitempty"`
+ // periodSeconds specifies the window of time for which the policy should hold true.
+ // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
+ PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
}
// HPAScalingPolicyApplyConfiguration constructs a declarative configuration of the HPAScalingPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
index 92aa449a..a87dc3d3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/hpascalingrules.go
@@ -24,10 +24,27 @@ import (
// HPAScalingRulesApplyConfiguration represents a declarative configuration of the HPAScalingRules type for use
// with apply.
+//
+// HPAScalingRules configures the scaling behavior for one direction.
+// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
+// They can limit the scaling velocity by specifying scaling policies.
+// They can prevent flapping by specifying the stabilization window, so that the
+// number of replicas is not set instantly, instead, the safest value from the stabilization
+// window is chosen.
type HPAScalingRulesApplyConfiguration struct {
- StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
- SelectPolicy *autoscalingv2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
- Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
+ // stabilizationWindowSeconds is the number of seconds for which past recommendations should be
+ // considered while scaling up or scaling down.
+ // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour).
+ // If not set, use the default values:
+ // - For scale up: 0 (i.e. no stabilization is done).
+ // - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
+ StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds,omitempty"`
+ // selectPolicy is used to specify which policy should be used.
+ // If not set, the default value MaxPolicySelect is used.
+ SelectPolicy *autoscalingv2beta2.ScalingPolicySelect `json:"selectPolicy,omitempty"`
+ // policies is a list of potential scaling polices which can be used during scaling.
+ // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
+ Policies []HPAScalingPolicyApplyConfiguration `json:"policies,omitempty"`
}
// HPAScalingRulesApplyConfiguration constructs a declarative configuration of the HPAScalingRules type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go
index e8b2abb0..9fd064cb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricidentifier.go
@@ -24,8 +24,14 @@ import (
// MetricIdentifierApplyConfiguration represents a declarative configuration of the MetricIdentifier type for use
// with apply.
+//
+// MetricIdentifier defines the name and optionally selector for a metric
type MetricIdentifierApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // name is the name of the given metric
+ Name *string `json:"name,omitempty"`
+ // selector is the string-encoded form of a standard kubernetes label selector for the given metric
+ // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
+ // When unset, just the metricName will be used to gather metrics.
Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
index 3da1617c..92eb2fa6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricspec.go
@@ -24,13 +24,38 @@ import (
// MetricSpecApplyConfiguration represents a declarative configuration of the MetricSpec type for use
// with apply.
+//
+// MetricSpec specifies how to scale based on a single metric
+// (only `type` and one other matching field should be set at once).
type MetricSpecApplyConfiguration struct {
- Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"`
- Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
- Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
- Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
+ // type is the type of metric source. It should be one of "ContainerResource", "External",
+ // "Object", "Pods" or "Resource", each mapping to a matching field in the object.
+ Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"`
+ // object refers to a metric describing a single kubernetes object
+ // (for example, hits-per-second on an Ingress object).
+ Object *ObjectMetricSourceApplyConfiguration `json:"object,omitempty"`
+ // pods refers to a metric describing each pod in the current scale target
+ // (for example, transactions-processed-per-second). The values will be
+ // averaged together before being compared to the target value.
+ Pods *PodsMetricSourceApplyConfiguration `json:"pods,omitempty"`
+ // resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
+ Resource *ResourceMetricSourceApplyConfiguration `json:"resource,omitempty"`
+ // container resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing a single container in
+ // each pod of the current scale target (e.g. CPU or memory). Such metrics are
+ // built in to Kubernetes, and have special scaling options on top of those
+ // available to normal per-pod metrics using the "pods" source.
ContainerResource *ContainerResourceMetricSourceApplyConfiguration `json:"containerResource,omitempty"`
- External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"`
+ // external refers to a global metric that is not associated
+ // with any Kubernetes object. It allows autoscaling based on information
+ // coming from components running outside of cluster
+ // (for example length of queue in cloud messaging service, or
+ // QPS from loadbalancer running outside of cluster).
+ External *ExternalMetricSourceApplyConfiguration `json:"external,omitempty"`
}
// MetricSpecApplyConfiguration constructs a declarative configuration of the MetricSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
index b528bd76..16dc6781 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricstatus.go
@@ -24,13 +24,37 @@ import (
// MetricStatusApplyConfiguration represents a declarative configuration of the MetricStatus type for use
// with apply.
+//
+// MetricStatus describes the last-read state of a single metric.
type MetricStatusApplyConfiguration struct {
- Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"`
- Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
- Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
- Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
+ // type is the type of metric source. It will be one of "ContainerResource", "External",
+ // "Object", "Pods" or "Resource", each corresponds to a matching field in the object.
+ Type *autoscalingv2beta2.MetricSourceType `json:"type,omitempty"`
+ // object refers to a metric describing a single kubernetes object
+ // (for example, hits-per-second on an Ingress object).
+ Object *ObjectMetricStatusApplyConfiguration `json:"object,omitempty"`
+ // pods refers to a metric describing each pod in the current scale target
+ // (for example, transactions-processed-per-second). The values will be
+ // averaged together before being compared to the target value.
+ Pods *PodsMetricStatusApplyConfiguration `json:"pods,omitempty"`
+ // resource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
+ Resource *ResourceMetricStatusApplyConfiguration `json:"resource,omitempty"`
+ // containerResource refers to a resource metric (such as those specified in
+ // requests and limits) known to Kubernetes describing a single container in each pod in the
+ // current scale target (e.g. CPU or memory). Such metrics are built in to
+ // Kubernetes, and have special scaling options on top of those available
+ // to normal per-pod metrics using the "pods" source.
ContainerResource *ContainerResourceMetricStatusApplyConfiguration `json:"containerResource,omitempty"`
- External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"`
+ // external refers to a global metric that is not associated
+ // with any Kubernetes object. It allows autoscaling based on information
+ // coming from components running outside of cluster
+ // (for example length of queue in cloud messaging service, or
+ // QPS from loadbalancer running outside of cluster).
+ External *ExternalMetricStatusApplyConfiguration `json:"external,omitempty"`
}
// MetricStatusApplyConfiguration constructs a declarative configuration of the MetricStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
index 286856d8..eedcd59d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metrictarget.go
@@ -25,11 +25,21 @@ import (
// MetricTargetApplyConfiguration represents a declarative configuration of the MetricTarget type for use
// with apply.
+//
+// MetricTarget defines the target value, average value, or average utilization of a specific metric
type MetricTargetApplyConfiguration struct {
- Type *autoscalingv2beta2.MetricTargetType `json:"type,omitempty"`
- Value *resource.Quantity `json:"value,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
- AverageUtilization *int32 `json:"averageUtilization,omitempty"`
+ // type represents whether the metric type is Utilization, Value, or AverageValue
+ Type *autoscalingv2beta2.MetricTargetType `json:"type,omitempty"`
+ // value is the target value of the metric (as a quantity).
+ Value *resource.Quantity `json:"value,omitempty"`
+ // averageValue is the target value of the average of the
+ // metric across all relevant pods (as a quantity)
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ // averageUtilization is the target value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods.
+ // Currently only valid for Resource metric source type
+ AverageUtilization *int32 `json:"averageUtilization,omitempty"`
}
// MetricTargetApplyConfiguration constructs a declarative configuration of the MetricTarget type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go
index cc409fc2..49141abc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/metricvaluestatus.go
@@ -24,10 +24,18 @@ import (
// MetricValueStatusApplyConfiguration represents a declarative configuration of the MetricValueStatus type for use
// with apply.
+//
+// MetricValueStatus holds the current value for a metric
type MetricValueStatusApplyConfiguration struct {
- Value *resource.Quantity `json:"value,omitempty"`
- AverageValue *resource.Quantity `json:"averageValue,omitempty"`
- AverageUtilization *int32 `json:"averageUtilization,omitempty"`
+ // value is the current value of the metric (as a quantity).
+ Value *resource.Quantity `json:"value,omitempty"`
+ // averageValue is the current value of the average of the
+ // metric across all relevant pods (as a quantity)
+ AverageValue *resource.Quantity `json:"averageValue,omitempty"`
+ // averageUtilization is the current value of the average of the
+ // resource metric across all relevant pods, represented as a percentage of
+ // the requested value of the resource for the pods.
+ AverageUtilization *int32 `json:"averageUtilization,omitempty"`
}
// MetricValueStatusApplyConfiguration constructs a declarative configuration of the MetricValueStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go
index 17b492fa..e99e081c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricsource.go
@@ -20,10 +20,15 @@ package v2beta2
// ObjectMetricSourceApplyConfiguration represents a declarative configuration of the ObjectMetricSource type for use
// with apply.
+//
+// ObjectMetricSource indicates how to scale on a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricSourceApplyConfiguration struct {
DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
}
// ObjectMetricSourceApplyConfiguration constructs a declarative configuration of the ObjectMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go
index e87417f2..4d5d0167 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/objectmetricstatus.go
@@ -20,8 +20,13 @@ package v2beta2
// ObjectMetricStatusApplyConfiguration represents a declarative configuration of the ObjectMetricStatus type for use
// with apply.
+//
+// ObjectMetricStatus indicates the current value of a metric describing a
+// kubernetes object (for example, hits-per-second on an Ingress object).
type ObjectMetricStatusApplyConfiguration struct {
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
DescribedObject *CrossVersionObjectReferenceApplyConfiguration `json:"describedObject,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go
index 6ecbb180..11dd2f6e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricsource.go
@@ -20,9 +20,16 @@ package v2beta2
// PodsMetricSourceApplyConfiguration represents a declarative configuration of the PodsMetricSource type for use
// with apply.
+//
+// PodsMetricSource indicates how to scale on a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
+// The values will be averaged together before being compared to the target
+// value.
type PodsMetricSourceApplyConfiguration struct {
+ // metric identifies the target metric by name and selector
Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
- Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
+ // target specifies the target value for the given metric
+ Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
}
// PodsMetricSourceApplyConfiguration constructs a declarative configuration of the PodsMetricSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go
index cd102972..929c02d4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/podsmetricstatus.go
@@ -20,8 +20,13 @@ package v2beta2
// PodsMetricStatusApplyConfiguration represents a declarative configuration of the PodsMetricStatus type for use
// with apply.
+//
+// PodsMetricStatus indicates the current value of a metric describing each pod in
+// the current scale target (for example, transactions-processed-per-second).
type PodsMetricStatusApplyConfiguration struct {
- Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // metric identifies the target metric by name and selector
+ Metric *MetricIdentifierApplyConfiguration `json:"metric,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go
index c482d75f..ce7e7e5f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricsource.go
@@ -24,8 +24,18 @@ import (
// ResourceMetricSourceApplyConfiguration represents a declarative configuration of the ResourceMetricSource type for use
// with apply.
+//
+// ResourceMetricSource indicates how to scale on a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). The values will be averaged
+// together before being compared to the target. Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source. Only one "target" type
+// should be set.
type ResourceMetricSourceApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // target specifies the target value for the given metric
Target *MetricTargetApplyConfiguration `json:"target,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go
index eb13e90b..9a6f3919 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/resourcemetricstatus.go
@@ -24,8 +24,16 @@ import (
// ResourceMetricStatusApplyConfiguration represents a declarative configuration of the ResourceMetricStatus type for use
// with apply.
+//
+// ResourceMetricStatus indicates the current value of a resource metric known to
+// Kubernetes, as specified in requests and limits, describing each pod in the
+// current scale target (e.g. CPU or memory). Such metrics are built in to
+// Kubernetes, and have special scaling options on top of those available to
+// normal per-pod metrics using the "pods" source.
type ResourceMetricStatusApplyConfiguration struct {
- Name *v1.ResourceName `json:"name,omitempty"`
+ // name is the name of the resource in question.
+ Name *v1.ResourceName `json:"name,omitempty"`
+ // current contains the current value for the given metric
Current *MetricValueStatusApplyConfiguration `json:"current,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
index 623b183c..2fbd4896 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
@@ -29,11 +29,19 @@ import (
// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use
// with apply.
+//
+// CronJob represents the configuration of a single cron job.
type CronJobApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"`
- Status *CronJobStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of a cron job, including the schedule.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"`
+ // Current status of a cron job.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *CronJobStatusApplyConfiguration `json:"status,omitempty"`
}
// CronJob constructs a declarative configuration of the CronJob type for use with
@@ -47,6 +55,27 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
return b
}
+// ExtractCronJobFrom extracts the applied configuration owned by fieldManager from
+// cronJob for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// cronJob must be a unmodified CronJob API object that was retrieved from the Kubernetes API.
+// ExtractCronJobFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCronJobFrom(cronJob *batchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
+ b := &CronJobApplyConfiguration{}
+ err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(cronJob.Name)
+ b.WithNamespace(cronJob.Namespace)
+
+ b.WithKind("CronJob")
+ b.WithAPIVersion("batch/v1")
+ return b, nil
+}
+
// ExtractCronJob extracts the applied configuration owned by fieldManager from
// cronJob. If no managedFields are found in cronJob for fieldManager, a
// CronJobApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +86,16 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// ExtractCronJob provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractCronJob(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
- return extractCronJob(cronJob, fieldManager, "")
+ return ExtractCronJobFrom(cronJob, fieldManager, "")
}
-// ExtractCronJobStatus is the same as ExtractCronJob except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractCronJobStatus extracts the applied configuration owned by fieldManager from
+// cronJob for the status subresource.
func ExtractCronJobStatus(cronJob *batchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
- return extractCronJob(cronJob, fieldManager, "status")
+ return ExtractCronJobFrom(cronJob, fieldManager, "status")
}
-func extractCronJob(cronJob *batchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
- b := &CronJobApplyConfiguration{}
- err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(cronJob.Name)
- b.WithNamespace(cronJob.Namespace)
-
- b.WithKind("CronJob")
- b.WithAPIVersion("batch/v1")
- return b, nil
-}
func (b CronJobApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
index f53d140d..f76d58ba 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobspec.go
@@ -24,15 +24,42 @@ import (
// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use
// with apply.
+//
+// CronJobSpec describes how the job execution will look like and when it will actually run.
type CronJobSpecApplyConfiguration struct {
- Schedule *string `json:"schedule,omitempty"`
- TimeZone *string `json:"timeZone,omitempty"`
- StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
- ConcurrencyPolicy *batchv1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
- Suspend *bool `json:"suspend,omitempty"`
- JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"`
- SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
- FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
+ // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ Schedule *string `json:"schedule,omitempty"`
+ // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
+ // If not specified, this will default to the time zone of the kube-controller-manager process.
+ // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone
+ // database by the API server during CronJob validation and the controller manager during execution.
+ // If no system-wide time zone database can be found a bundled version of the database is used instead.
+ // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host
+ // configuration, the controller will stop creating new new Jobs and will create a system event with the
+ // reason UnknownTimeZone.
+ // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
+ TimeZone *string `json:"timeZone,omitempty"`
+ // Optional deadline in seconds for starting the job if it misses scheduled
+ // time for any reason. Missed jobs executions will be counted as failed ones.
+ StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
+ // Specifies how to treat concurrent executions of a Job.
+ // Valid values are:
+ //
+ // - "Allow" (default): allows CronJobs to run concurrently;
+ // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
+ // - "Replace": cancels currently running job and replaces it with a new one
+ ConcurrencyPolicy *batchv1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
+ // This flag tells the controller to suspend subsequent executions, it does
+ // not apply to already started executions. Defaults to false.
+ Suspend *bool `json:"suspend,omitempty"`
+ // Specifies the job that will be created when executing a CronJob.
+ JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"`
+ // The number of successful finished jobs to retain. Value must be non-negative integer.
+ // Defaults to 3.
+ SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
+ // The number of failed finished jobs to retain. Value must be non-negative integer.
+ // Defaults to 1.
+ FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
}
// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
index d29d9e89..664b104a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjobstatus.go
@@ -25,10 +25,15 @@ import (
// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use
// with apply.
+//
+// CronJobStatus represents the current state of a cron job.
type CronJobStatusApplyConfiguration struct {
- Active []corev1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
- LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
- LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"`
+ // A list of pointers to currently running jobs.
+ Active []corev1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
+ // Information when was the last time the job was successfully scheduled.
+ LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
+ // Information when was the last time the job successfully completed.
+ LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"`
}
// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
index 8aeec8f3..02d59f0e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
@@ -29,11 +29,19 @@ import (
// JobApplyConfiguration represents a declarative configuration of the Job type for use
// with apply.
+//
+// Job represents the configuration of a single job.
type JobApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
- Status *JobStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of a job.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
+ // Current status of a job.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *JobStatusApplyConfiguration `json:"status,omitempty"`
}
// Job constructs a declarative configuration of the Job type for use with
@@ -47,6 +55,27 @@ func Job(name, namespace string) *JobApplyConfiguration {
return b
}
+// ExtractJobFrom extracts the applied configuration owned by fieldManager from
+// job for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// job must be a unmodified Job API object that was retrieved from the Kubernetes API.
+// ExtractJobFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractJobFrom(job *batchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) {
+ b := &JobApplyConfiguration{}
+ err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(job.Name)
+ b.WithNamespace(job.Namespace)
+
+ b.WithKind("Job")
+ b.WithAPIVersion("batch/v1")
+ return b, nil
+}
+
// ExtractJob extracts the applied configuration owned by fieldManager from
// job. If no managedFields are found in job for fieldManager, a
// JobApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +86,16 @@ func Job(name, namespace string) *JobApplyConfiguration {
// ExtractJob provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractJob(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
- return extractJob(job, fieldManager, "")
+ return ExtractJobFrom(job, fieldManager, "")
}
-// ExtractJobStatus is the same as ExtractJob except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractJobStatus extracts the applied configuration owned by fieldManager from
+// job for the status subresource.
func ExtractJobStatus(job *batchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
- return extractJob(job, fieldManager, "status")
+ return ExtractJobFrom(job, fieldManager, "status")
}
-func extractJob(job *batchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) {
- b := &JobApplyConfiguration{}
- err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(job.Name)
- b.WithNamespace(job.Namespace)
-
- b.WithKind("Job")
- b.WithAPIVersion("batch/v1")
- return b, nil
-}
func (b JobApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
index fb3c65ab..66487972 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobcondition.go
@@ -26,13 +26,21 @@ import (
// JobConditionApplyConfiguration represents a declarative configuration of the JobCondition type for use
// with apply.
+//
+// JobCondition describes current state of a job.
type JobConditionApplyConfiguration struct {
- Type *batchv1.JobConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of job condition, Complete or Failed.
+ Type *batchv1.JobConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition was checked.
+ LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // (brief) reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // Human readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// JobConditionApplyConfiguration constructs a declarative configuration of the JobCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
index 2104fe11..81064df7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobspec.go
@@ -26,23 +26,140 @@ import (
// JobSpecApplyConfiguration represents a declarative configuration of the JobSpec type for use
// with apply.
+//
+// JobSpec describes how the job execution will look like.
type JobSpecApplyConfiguration struct {
- Parallelism *int32 `json:"parallelism,omitempty"`
- Completions *int32 `json:"completions,omitempty"`
- ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
- PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"`
- SuccessPolicy *SuccessPolicyApplyConfiguration `json:"successPolicy,omitempty"`
- BackoffLimit *int32 `json:"backoffLimit,omitempty"`
- BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"`
- MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- ManualSelector *bool `json:"manualSelector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
- CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"`
- Suspend *bool `json:"suspend,omitempty"`
- PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"`
- ManagedBy *string `json:"managedBy,omitempty"`
+ // Specifies the maximum desired number of pods the job should
+ // run at any given time. The actual number of pods running in steady state will
+ // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
+ // i.e. when the work left to do is less than max parallelism.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ Parallelism *int32 `json:"parallelism,omitempty"`
+ // Specifies the desired number of successfully finished pods the
+ // job should be run with. Setting to null means that the success of any
+ // pod signals the success of all pods, and allows parallelism to have any positive
+ // value. Setting to 1 means that parallelism is limited to 1 and the success of that
+ // pod signals the success of the job.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ Completions *int32 `json:"completions,omitempty"`
+ // Specifies the duration in seconds relative to the startTime that the job
+ // may be continuously active before the system tries to terminate it; value
+ // must be positive integer. If a Job is suspended (at creation or through an
+ // update), this timer will effectively be stopped and reset when the Job is
+ // resumed again.
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
+ // Specifies the policy of handling failed pods. In particular, it allows to
+ // specify the set of actions and conditions which need to be
+ // satisfied to take the associated action.
+ // If empty, the default behaviour applies - the counter of failed pods,
+ // represented by the jobs's .status.failed field, is incremented and it is
+ // checked against the backoffLimit. This field cannot be used in combination
+ // with restartPolicy=OnFailure.
+ PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"`
+ // successPolicy specifies the policy when the Job can be declared as succeeded.
+ // If empty, the default behavior applies - the Job is declared as succeeded
+ // only when the number of succeeded pods equals to the completions.
+ // When the field is specified, it must be immutable and works only for the Indexed Jobs.
+ // Once the Job meets the SuccessPolicy, the lingering pods are terminated.
+ SuccessPolicy *SuccessPolicyApplyConfiguration `json:"successPolicy,omitempty"`
+ // Specifies the number of retries before marking this job failed.
+ // Defaults to 6, unless backoffLimitPerIndex (only Indexed Job) is specified.
+ // When backoffLimitPerIndex is specified, backoffLimit defaults to 2147483647.
+ BackoffLimit *int32 `json:"backoffLimit,omitempty"`
+ // Specifies the limit for the number of retries within an
+ // index before marking this index as failed. When enabled the number of
+ // failures per index is kept in the pod's
+ // batch.kubernetes.io/job-index-failure-count annotation. It can only
+ // be set when Job's completionMode=Indexed, and the Pod's restart
+ // policy is Never. The field is immutable.
+ BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"`
+ // Specifies the maximal number of failed indexes before marking the Job as
+ // failed, when backoffLimitPerIndex is set. Once the number of failed
+ // indexes exceeds this number the entire Job is marked as Failed and its
+ // execution is terminated. When left as null the job continues execution of
+ // all of its indexes and is marked with the `Complete` Job condition.
+ // It can only be specified when backoffLimitPerIndex is set.
+ // It can be null or up to completions. It is required and must be
+ // less than or equal to 10^4 when is completions greater than 10^5.
+ MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"`
+ // A label query over pods that should match the pod count.
+ // Normally, the system sets this field for you.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // manualSelector controls generation of pod labels and pod selectors.
+ // Leave `manualSelector` unset unless you are certain what you are doing.
+ // When false or unset, the system pick labels unique to this job
+ // and appends those labels to the pod template. When true,
+ // the user is responsible for picking unique labels and specifying
+ // the selector. Failure to pick a unique label may cause this
+ // and other jobs to not function correctly. However, You may see
+ // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
+ // API.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector
+ ManualSelector *bool `json:"manualSelector,omitempty"`
+ // Describes the pod that will be created when executing a job.
+ // The only allowed template.spec.restartPolicy values are "Never" or "OnFailure".
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // ttlSecondsAfterFinished limits the lifetime of a Job that has finished
+ // execution (either Complete or Failed). If this field is set,
+ // ttlSecondsAfterFinished after the Job finishes, it is eligible to be
+ // automatically deleted. When the Job is being deleted, its lifecycle
+ // guarantees (e.g. finalizers) will be honored. If this field is unset,
+ // the Job won't be automatically deleted. If this field is set to zero,
+ // the Job becomes eligible to be deleted immediately after it finishes.
+ TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"`
+ // completionMode specifies how Pod completions are tracked. It can be
+ // `NonIndexed` (default) or `Indexed`.
+ //
+ // `NonIndexed` means that the Job is considered complete when there have
+ // been .spec.completions successfully completed Pods. Each Pod completion is
+ // homologous to each other.
+ //
+ // `Indexed` means that the Pods of a
+ // Job get an associated completion index from 0 to (.spec.completions - 1),
+ // available in the annotation batch.kubernetes.io/job-completion-index.
+ // The Job is considered complete when there is one successfully completed Pod
+ // for each index.
+ // When value is `Indexed`, .spec.completions must be specified and
+ // `.spec.parallelism` must be less than or equal to 10^5.
+ // In addition, The Pod name takes the form
+ // `$(job-name)-$(index)-$(random-string)`,
+ // the Pod hostname takes the form `$(job-name)-$(index)`.
+ //
+ // More completion modes can be added in the future.
+ // If the Job controller observes a mode that it doesn't recognize, which
+ // is possible during upgrades due to version skew, the controller
+ // skips updates for the Job.
+ CompletionMode *batchv1.CompletionMode `json:"completionMode,omitempty"`
+ // suspend specifies whether the Job controller should create Pods or not. If
+ // a Job is created with suspend set to true, no Pods are created by the Job
+ // controller. If a Job is suspended after creation (i.e. the flag goes from
+ // false to true), the Job controller will delete all active Pods associated
+ // with this Job. Users must design their workload to gracefully handle this.
+ // Suspending a Job will reset the StartTime field of the Job, effectively
+ // resetting the ActiveDeadlineSeconds timer too. Defaults to false.
+ Suspend *bool `json:"suspend,omitempty"`
+ // podReplacementPolicy specifies when to create replacement Pods.
+ // Possible values are:
+ // - TerminatingOrFailed means that we recreate pods
+ // when they are terminating (has a metadata.deletionTimestamp) or failed.
+ // - Failed means to wait until a previously created Pod is fully terminated (has phase
+ // Failed or Succeeded) before creating a replacement Pod.
+ //
+ // When using podFailurePolicy, Failed is the the only allowed value.
+ // TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use.
+ PodReplacementPolicy *batchv1.PodReplacementPolicy `json:"podReplacementPolicy,omitempty"`
+ // ManagedBy field indicates the controller that manages a Job. The k8s Job
+ // controller reconciles jobs which don't have this field at all or the field
+ // value is the reserved string `kubernetes.io/job-controller`, but skips
+ // reconciling Jobs with a custom value for this field.
+ // The value must be a valid domain-prefixed path (e.g. acme.io/foo) -
+ // all characters before the first "/" must be a valid subdomain as defined
+ // by RFC 1123. All characters trailing the first "/" must be valid HTTP Path
+ // characters as defined by RFC 3986. The value cannot exceed 63 characters.
+ // This field is immutable.
+ ManagedBy *string `json:"managedBy,omitempty"`
}
// JobSpecApplyConfiguration constructs a declarative configuration of the JobSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
index 071a0153..0e11fd9b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
@@ -24,18 +24,92 @@ import (
// JobStatusApplyConfiguration represents a declarative configuration of the JobStatus type for use
// with apply.
+//
+// JobStatus represents the current state of a Job.
type JobStatusApplyConfiguration struct {
- Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"`
- StartTime *metav1.Time `json:"startTime,omitempty"`
- CompletionTime *metav1.Time `json:"completionTime,omitempty"`
- Active *int32 `json:"active,omitempty"`
- Succeeded *int32 `json:"succeeded,omitempty"`
- Failed *int32 `json:"failed,omitempty"`
- Terminating *int32 `json:"terminating,omitempty"`
- CompletedIndexes *string `json:"completedIndexes,omitempty"`
- FailedIndexes *string `json:"failedIndexes,omitempty"`
+ // The latest available observations of an object's current state. When a Job
+ // fails, one of the conditions will have type "Failed" and status true. When
+ // a Job is suspended, one of the conditions will have type "Suspended" and
+ // status true; when the Job is resumed, the status of this condition will
+ // become false. When a Job is completed, one of the conditions will have
+ // type "Complete" and status true.
+ //
+ // A job is considered finished when it is in a terminal condition, either
+ // "Complete" or "Failed". A Job cannot have both the "Complete" and "Failed" conditions.
+ // Additionally, it cannot be in the "Complete" and "FailureTarget" conditions.
+ // The "Complete", "Failed" and "FailureTarget" conditions cannot be disabled.
+ //
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/
+ Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Represents time when the job controller started processing a job. When a
+ // Job is created in the suspended state, this field is not set until the
+ // first time it is resumed. This field is reset every time a Job is resumed
+ // from suspension. It is represented in RFC3339 form and is in UTC.
+ //
+ // Once set, the field can only be removed when the job is suspended.
+ // The field cannot be modified while the job is unsuspended or finished.
+ StartTime *metav1.Time `json:"startTime,omitempty"`
+ // Represents time when the job was completed. It is not guaranteed to
+ // be set in happens-before order across separate operations.
+ // It is represented in RFC3339 form and is in UTC.
+ // The completion time is set when the job finishes successfully, and only then.
+ // The value cannot be updated or removed. The value indicates the same or
+ // later point in time as the startTime field.
+ CompletionTime *metav1.Time `json:"completionTime,omitempty"`
+ // The number of pending and running pods which are not terminating (without
+ // a deletionTimestamp).
+ // The value is zero for finished jobs.
+ Active *int32 `json:"active,omitempty"`
+ // The number of pods which reached phase Succeeded.
+ // The value increases monotonically for a given spec. However, it may
+ // decrease in reaction to scale down of elastic indexed jobs.
+ Succeeded *int32 `json:"succeeded,omitempty"`
+ // The number of pods which reached phase Failed.
+ // The value increases monotonically.
+ Failed *int32 `json:"failed,omitempty"`
+ // The number of pods which are terminating (in phase Pending or Running
+ // and have a deletionTimestamp).
+ //
+ // This field is beta-level. The job controller populates the field when
+ // the feature gate JobPodReplacementPolicy is enabled (enabled by default).
+ Terminating *int32 `json:"terminating,omitempty"`
+ // completedIndexes holds the completed indexes when .spec.completionMode =
+ // "Indexed" in a text format. The indexes are represented as decimal integers
+ // separated by commas. The numbers are listed in increasing order. Three or
+ // more consecutive numbers are compressed and represented by the first and
+ // last element of the series, separated by a hyphen.
+ // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
+ // represented as "1,3-5,7".
+ CompletedIndexes *string `json:"completedIndexes,omitempty"`
+ // FailedIndexes holds the failed indexes when spec.backoffLimitPerIndex is set.
+ // The indexes are represented in the text format analogous as for the
+ // `completedIndexes` field, ie. they are kept as decimal integers
+ // separated by commas. The numbers are listed in increasing order. Three or
+ // more consecutive numbers are compressed and represented by the first and
+ // last element of the series, separated by a hyphen.
+ // For example, if the failed indexes are 1, 3, 4, 5 and 7, they are
+ // represented as "1,3-5,7".
+ // The set of failed indexes cannot overlap with the set of completed indexes.
+ FailedIndexes *string `json:"failedIndexes,omitempty"`
+ // uncountedTerminatedPods holds the UIDs of Pods that have terminated but
+ // the job controller hasn't yet accounted for in the status counters.
+ //
+ // The job controller creates pods with a finalizer. When a pod terminates
+ // (succeeded or failed), the controller does three steps to account for it
+ // in the job status:
+ //
+ // 1. Add the pod UID to the arrays in this field.
+ // 2. Remove the pod finalizer.
+ // 3. Remove the pod UID from the arrays while increasing the corresponding
+ // counter.
+ //
+ // Old jobs might not be tracked using this field, in which case the field
+ // remains null.
+ // The structure is empty for finished jobs.
UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"`
- Ready *int32 `json:"ready,omitempty"`
+ // The number of active pods which have a Ready condition and are not
+ // terminating (without a deletionTimestamp).
+ Ready *int32 `json:"ready,omitempty"`
}
// JobStatusApplyConfiguration constructs a declarative configuration of the JobStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
index 149c5e8f..7fb67d2d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobtemplatespec.go
@@ -26,9 +26,15 @@ import (
// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use
// with apply.
+//
+// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpecApplyConfiguration struct {
+ // Standard object's metadata of the jobs created from this template.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the job.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *JobSpecApplyConfiguration `json:"spec,omitempty"`
}
// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go
index 05a68b3c..27b3e0a1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicy.go
@@ -20,7 +20,14 @@ package v1
// PodFailurePolicyApplyConfiguration represents a declarative configuration of the PodFailurePolicy type for use
// with apply.
+//
+// PodFailurePolicy describes how failed pods influence the backoffLimit.
type PodFailurePolicyApplyConfiguration struct {
+ // A list of pod failure policy rules. The rules are evaluated in order.
+ // Once a rule matches a Pod failure, the remaining of the rules are ignored.
+ // When no rule matches the Pod failure, the default handling applies - the
+ // counter of pod failures is incremented and it is checked against
+ // the backoffLimit. At most 20 elements are allowed.
Rules []PodFailurePolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
index aa4dfc4c..b72cf684 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonexitcodesrequirement.go
@@ -24,10 +24,38 @@ import (
// PodFailurePolicyOnExitCodesRequirementApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use
// with apply.
+//
+// PodFailurePolicyOnExitCodesRequirement describes the requirement for handling
+// a failed pod based on its container exit codes. In particular, it lookups the
+// .state.terminated.exitCode for each app container and init container status,
+// represented by the .status.containerStatuses and .status.initContainerStatuses
+// fields in the Pod status, respectively. Containers completed with success
+// (exit code 0) are excluded from the requirement check.
type PodFailurePolicyOnExitCodesRequirementApplyConfiguration struct {
- ContainerName *string `json:"containerName,omitempty"`
- Operator *batchv1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"`
- Values []int32 `json:"values,omitempty"`
+ // Restricts the check for exit codes to the container with the
+ // specified name. When null, the rule applies to all containers.
+ // When specified, it should match one the container or initContainer
+ // names in the pod template.
+ ContainerName *string `json:"containerName,omitempty"`
+ // Represents the relationship between the container exit code(s) and the
+ // specified values. Containers completed with success (exit code 0) are
+ // excluded from the requirement check. Possible values are:
+ //
+ // - In: the requirement is satisfied if at least one container exit code
+ // (might be multiple if there are multiple containers not restricted
+ // by the 'containerName' field) is in the set of specified values.
+ // - NotIn: the requirement is satisfied if at least one container exit code
+ // (might be multiple if there are multiple containers not restricted
+ // by the 'containerName' field) is not in the set of specified values.
+ // Additional values are considered to be added in the future. Clients should
+ // react to an unknown operator by assuming the requirement is not satisfied.
+ Operator *batchv1.PodFailurePolicyOnExitCodesOperator `json:"operator,omitempty"`
+ // Specifies the set of values. Each returned container exit code (might be
+ // multiple in case of multiple containers) is checked against this set of
+ // values with respect to the operator. The list of values must be ordered
+ // and must not contain duplicates. Value '0' cannot be used for the In operator.
+ // At least one element is required. At most 255 elements are allowed.
+ Values []int32 `json:"values,omitempty"`
}
// PodFailurePolicyOnExitCodesRequirementApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnExitCodesRequirement type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
index 6459a6e5..34f1a092 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyonpodconditionspattern.go
@@ -24,9 +24,17 @@ import (
// PodFailurePolicyOnPodConditionsPatternApplyConfiguration represents a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use
// with apply.
+//
+// PodFailurePolicyOnPodConditionsPattern describes a pattern for matching
+// an actual pod condition type.
type PodFailurePolicyOnPodConditionsPatternApplyConfiguration struct {
- Type *corev1.PodConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Specifies the required Pod condition type. To match a pod condition
+ // it is required that specified type equals the pod condition type.
+ Type *corev1.PodConditionType `json:"type,omitempty"`
+ // Specifies the required Pod condition status. To match a pod condition
+ // it is required that the specified status equals the pod condition status.
+ // Defaults to True.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
}
// PodFailurePolicyOnPodConditionsPatternApplyConfiguration constructs a declarative configuration of the PodFailurePolicyOnPodConditionsPattern type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
index 847ec7c9..d3a2e69f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/podfailurepolicyrule.go
@@ -24,9 +24,29 @@ import (
// PodFailurePolicyRuleApplyConfiguration represents a declarative configuration of the PodFailurePolicyRule type for use
// with apply.
+//
+// PodFailurePolicyRule describes how a pod failure is handled when the requirements are met.
+// One of onExitCodes and onPodConditions, but not both, can be used in each rule.
type PodFailurePolicyRuleApplyConfiguration struct {
- Action *batchv1.PodFailurePolicyAction `json:"action,omitempty"`
- OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"`
+ // Specifies the action taken on a pod failure when the requirements are satisfied.
+ // Possible values are:
+ //
+ // - FailJob: indicates that the pod's job is marked as Failed and all
+ // running pods are terminated.
+ // - FailIndex: indicates that the pod's index is marked as Failed and will
+ // not be restarted.
+ // - Ignore: indicates that the counter towards the .backoffLimit is not
+ // incremented and a replacement pod is created.
+ // - Count: indicates that the pod is handled in the default way - the
+ // counter towards the .backoffLimit is incremented.
+ // Additional values are considered to be added in the future. Clients should
+ // react to an unknown action by skipping the rule.
+ Action *batchv1.PodFailurePolicyAction `json:"action,omitempty"`
+ // Represents the requirement on the container exit codes.
+ OnExitCodes *PodFailurePolicyOnExitCodesRequirementApplyConfiguration `json:"onExitCodes,omitempty"`
+ // Represents the requirement on the pod conditions. The requirement is represented
+ // as a list of pod condition patterns. The requirement is satisfied if at
+ // least one pattern matches an actual pod condition. At most 20 elements are allowed.
OnPodConditions []PodFailurePolicyOnPodConditionsPatternApplyConfiguration `json:"onPodConditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go
index a3f4f39e..53ecd3eb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go
@@ -20,7 +20,15 @@ package v1
// SuccessPolicyApplyConfiguration represents a declarative configuration of the SuccessPolicy type for use
// with apply.
+//
+// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.
type SuccessPolicyApplyConfiguration struct {
+ // rules represents the list of alternative rules for the declaring the Jobs
+ // as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
+ // the "SuccessCriteriaMet" condition is added, and the lingering pods are removed.
+ // The terminal state for such a Job has the "Complete" condition.
+ // Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
+ // other rules are ignored. At most 20 elements are allowed.
Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go
index 2b5e3d91..430054ac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go
@@ -20,9 +20,33 @@ package v1
// SuccessPolicyRuleApplyConfiguration represents a declarative configuration of the SuccessPolicyRule type for use
// with apply.
+//
+// SuccessPolicyRule describes rule for declaring a Job as succeeded.
+// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
type SuccessPolicyRuleApplyConfiguration struct {
+ // succeededIndexes specifies the set of indexes
+ // which need to be contained in the actual set of the succeeded indexes for the Job.
+ // The list of indexes must be within 0 to ".spec.completions-1" and
+ // must not contain duplicates. At least one element is required.
+ // The indexes are represented as intervals separated by commas.
+ // The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
+ // The number are listed in represented by the first and last element of the series,
+ // separated by a hyphen.
+ // For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
+ // represented as "1,3-5,7".
+ // When this field is null, this field doesn't default to any value
+ // and is never evaluated at any time.
SucceededIndexes *string `json:"succeededIndexes,omitempty"`
- SucceededCount *int32 `json:"succeededCount,omitempty"`
+ // succeededCount specifies the minimal required size of the actual set of the succeeded indexes
+ // for the Job. When succeededCount is used along with succeededIndexes, the check is
+ // constrained only to the set of indexes specified by succeededIndexes.
+ // For example, given that succeededIndexes is "1-4", succeededCount is "3",
+ // and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
+ // because only "1" and "3" indexes are considered in that rules.
+ // When this field is null, this doesn't default to any value and
+ // is never evaluated at any time.
+ // When specified it needs to be a positive integer.
+ SucceededCount *int32 `json:"succeededCount,omitempty"`
}
// SuccessPolicyRuleApplyConfiguration constructs a declarative configuration of the SuccessPolicyRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
index ff6b57b8..0b9199d2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
@@ -24,9 +24,14 @@ import (
// UncountedTerminatedPodsApplyConfiguration represents a declarative configuration of the UncountedTerminatedPods type for use
// with apply.
+//
+// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
+// been accounted in Job status counters.
type UncountedTerminatedPodsApplyConfiguration struct {
+ // succeeded holds UIDs of succeeded Pods.
Succeeded []types.UID `json:"succeeded,omitempty"`
- Failed []types.UID `json:"failed,omitempty"`
+ // failed holds UIDs of failed Pods.
+ Failed []types.UID `json:"failed,omitempty"`
}
// UncountedTerminatedPodsApplyConfiguration constructs a declarative configuration of the UncountedTerminatedPods type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
index 89b181cd..f0ab32de 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
@@ -29,11 +29,19 @@ import (
// CronJobApplyConfiguration represents a declarative configuration of the CronJob type for use
// with apply.
+//
+// CronJob represents the configuration of a single cron job.
type CronJobApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"`
- Status *CronJobStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of a cron job, including the schedule.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *CronJobSpecApplyConfiguration `json:"spec,omitempty"`
+ // Current status of a cron job.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *CronJobStatusApplyConfiguration `json:"status,omitempty"`
}
// CronJob constructs a declarative configuration of the CronJob type for use with
@@ -47,6 +55,27 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
return b
}
+// ExtractCronJobFrom extracts the applied configuration owned by fieldManager from
+// cronJob for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// cronJob must be a unmodified CronJob API object that was retrieved from the Kubernetes API.
+// ExtractCronJobFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCronJobFrom(cronJob *batchv1beta1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
+ b := &CronJobApplyConfiguration{}
+ err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1beta1.CronJob"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(cronJob.Name)
+ b.WithNamespace(cronJob.Namespace)
+
+ b.WithKind("CronJob")
+ b.WithAPIVersion("batch/v1beta1")
+ return b, nil
+}
+
// ExtractCronJob extracts the applied configuration owned by fieldManager from
// cronJob. If no managedFields are found in cronJob for fieldManager, a
// CronJobApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +86,16 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// ExtractCronJob provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
- return extractCronJob(cronJob, fieldManager, "")
+ return ExtractCronJobFrom(cronJob, fieldManager, "")
}
-// ExtractCronJobStatus is the same as ExtractCronJob except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractCronJobStatus extracts the applied configuration owned by fieldManager from
+// cronJob for the status subresource.
func ExtractCronJobStatus(cronJob *batchv1beta1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
- return extractCronJob(cronJob, fieldManager, "status")
+ return ExtractCronJobFrom(cronJob, fieldManager, "status")
}
-func extractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
- b := &CronJobApplyConfiguration{}
- err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1beta1.CronJob"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(cronJob.Name)
- b.WithNamespace(cronJob.Namespace)
-
- b.WithKind("CronJob")
- b.WithAPIVersion("batch/v1beta1")
- return b, nil
-}
func (b CronJobApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
index 30604ac7..0a15bc89 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobspec.go
@@ -24,15 +24,44 @@ import (
// CronJobSpecApplyConfiguration represents a declarative configuration of the CronJobSpec type for use
// with apply.
+//
+// CronJobSpec describes how the job execution will look like and when it will actually run.
type CronJobSpecApplyConfiguration struct {
- Schedule *string `json:"schedule,omitempty"`
- TimeZone *string `json:"timeZone,omitempty"`
- StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
- ConcurrencyPolicy *batchv1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
- Suspend *bool `json:"suspend,omitempty"`
- JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"`
- SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
- FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
+ // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
+ Schedule *string `json:"schedule,omitempty"`
+ // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones.
+ // If not specified, this will default to the time zone of the kube-controller-manager process.
+ // The set of valid time zone names and the time zone offset is loaded from the system-wide time zone
+ // database by the API server during CronJob validation and the controller manager during execution.
+ // If no system-wide time zone database can be found a bundled version of the database is used instead.
+ // If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host
+ // configuration, the controller will stop creating new new Jobs and will create a system event with the
+ // reason UnknownTimeZone.
+ // More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones
+ TimeZone *string `json:"timeZone,omitempty"`
+ // Optional deadline in seconds for starting the job if it misses scheduled
+ // time for any reason. Missed jobs executions will be counted as failed ones.
+ StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty"`
+ // Specifies how to treat concurrent executions of a Job.
+ // Valid values are:
+ //
+ // - "Allow" (default): allows CronJobs to run concurrently;
+ // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet;
+ // - "Replace": cancels currently running job and replaces it with a new one
+ ConcurrencyPolicy *batchv1beta1.ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"`
+ // This flag tells the controller to suspend subsequent executions, it does
+ // not apply to already started executions. Defaults to false.
+ Suspend *bool `json:"suspend,omitempty"`
+ // Specifies the job that will be created when executing a CronJob.
+ JobTemplate *JobTemplateSpecApplyConfiguration `json:"jobTemplate,omitempty"`
+ // The number of successful finished jobs to retain.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 3.
+ SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"`
+ // The number of failed finished jobs to retain.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 1.
+ FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"`
}
// CronJobSpecApplyConfiguration constructs a declarative configuration of the CronJobSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go
index 335f9e0d..caf58aa6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjobstatus.go
@@ -25,10 +25,15 @@ import (
// CronJobStatusApplyConfiguration represents a declarative configuration of the CronJobStatus type for use
// with apply.
+//
+// CronJobStatus represents the current state of a cron job.
type CronJobStatusApplyConfiguration struct {
- Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
- LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
- LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"`
+ // A list of pointers to currently running jobs.
+ Active []v1.ObjectReferenceApplyConfiguration `json:"active,omitempty"`
+ // Information when was the last time the job was successfully scheduled.
+ LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty"`
+ // Information when was the last time the job successfully completed.
+ LastSuccessfulTime *metav1.Time `json:"lastSuccessfulTime,omitempty"`
}
// CronJobStatusApplyConfiguration constructs a declarative configuration of the CronJobStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
index 5f0fc492..3f6347ac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/jobtemplatespec.go
@@ -27,9 +27,15 @@ import (
// JobTemplateSpecApplyConfiguration represents a declarative configuration of the JobTemplateSpec type for use
// with apply.
+//
+// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpecApplyConfiguration struct {
+ // Standard object's metadata of the jobs created from this template.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the job.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *batchv1.JobSpecApplyConfiguration `json:"spec,omitempty"`
}
// JobTemplateSpecApplyConfiguration constructs a declarative configuration of the JobTemplateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
index e78702cb..0769c9f2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
@@ -29,11 +29,27 @@ import (
// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use
// with apply.
+//
+// CertificateSigningRequest objects provide a mechanism to obtain x509 certificates
+// by submitting a certificate signing request, and having it asynchronously approved and issued.
+//
+// Kubelets use this API to obtain:
+// 1. client certificates to authenticate to kube-apiserver (with the "kubernetes.io/kube-apiserver-client-kubelet" signerName).
+// 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the "kubernetes.io/kubelet-serving" signerName).
+//
+// This API can be used to request client certificates to authenticate to kube-apiserver
+// (with the "kubernetes.io/kube-apiserver-client" signerName),
+// or to obtain certificates from custom non-Kubernetes signers.
type CertificateSigningRequestApplyConfiguration struct {
metav1.TypeMetaApplyConfiguration `json:",inline"`
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"`
- Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
+ // spec contains the certificate request, and is immutable after creation.
+ // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
+ // Other fields are derived by Kubernetes and cannot be modified by users.
+ Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"`
+ // status contains information about whether the request is approved or denied,
+ // and the certificate issued by the signer, or the failure condition indicating signer failure.
+ Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
}
// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with
@@ -46,6 +62,26 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
return b
}
+// ExtractCertificateSigningRequestFrom extracts the applied configuration owned by fieldManager from
+// certificateSigningRequest for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// certificateSigningRequest must be a unmodified CertificateSigningRequest API object that was retrieved from the Kubernetes API.
+// ExtractCertificateSigningRequestFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCertificateSigningRequestFrom(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
+ b := &CertificateSigningRequestApplyConfiguration{}
+ err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(certificateSigningRequest.Name)
+
+ b.WithKind("CertificateSigningRequest")
+ b.WithAPIVersion("certificates.k8s.io/v1")
+ return b, nil
+}
+
// ExtractCertificateSigningRequest extracts the applied configuration owned by fieldManager from
// certificateSigningRequest. If no managedFields are found in certificateSigningRequest for fieldManager, a
// CertificateSigningRequestApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +92,22 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// ExtractCertificateSigningRequest provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
- return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "")
+ return ExtractCertificateSigningRequestFrom(certificateSigningRequest, fieldManager, "")
}
-// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
- return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status")
+// ExtractCertificateSigningRequestApproval extracts the applied configuration owned by fieldManager from
+// certificateSigningRequest for the approval subresource.
+func ExtractCertificateSigningRequestApproval(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+ return ExtractCertificateSigningRequestFrom(certificateSigningRequest, fieldManager, "approval")
}
-func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
- b := &CertificateSigningRequestApplyConfiguration{}
- err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(certificateSigningRequest.Name)
-
- b.WithKind("CertificateSigningRequest")
- b.WithAPIVersion("certificates.k8s.io/v1")
- return b, nil
+// ExtractCertificateSigningRequestStatus extracts the applied configuration owned by fieldManager from
+// certificateSigningRequest for the status subresource.
+func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+ return ExtractCertificateSigningRequestFrom(certificateSigningRequest, fieldManager, "status")
}
+
func (b CertificateSigningRequestApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
index a6dedcb5..f4f23f05 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestcondition.go
@@ -26,13 +26,38 @@ import (
// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use
// with apply.
+//
+// CertificateSigningRequestCondition describes a condition of a CertificateSigningRequest object
type CertificateSigningRequestConditionApplyConfiguration struct {
- Type *certificatesv1.RequestConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // type of the condition. Known conditions are "Approved", "Denied", and "Failed".
+ //
+ // An "Approved" condition is added via the /approval subresource,
+ // indicating the request was approved and should be issued by the signer.
+ //
+ // A "Denied" condition is added via the /approval subresource,
+ // indicating the request was denied and should not be issued by the signer.
+ //
+ // A "Failed" condition is added via the /status subresource,
+ // indicating the signer failed to issue the certificate.
+ //
+ // Approved and Denied conditions are mutually exclusive.
+ // Approved, Denied, and Failed conditions cannot be removed once added.
+ //
+ // Only one condition of a given type is allowed.
+ Type *certificatesv1.RequestConditionType `json:"type,omitempty"`
+ // status of the condition, one of True, False, Unknown.
+ // Approved, Denied, and Failed conditions may not be "False" or "Unknown".
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // reason indicates a brief reason for the request state
+ Reason *string `json:"reason,omitempty"`
+ // message contains a human readable message with details about the request state
+ Message *string `json:"message,omitempty"`
+ // lastUpdateTime is the time of the last update to this condition
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // lastTransitionTime is the time the condition last transitioned from one status to another.
+ // If unset, when a new condition type is added or an existing condition's status is changed,
+ // the server defaults this to the current time.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
}
// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
index 82da53c9..983ee001 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
@@ -24,15 +24,80 @@ import (
// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use
// with apply.
+//
+// CertificateSigningRequestSpec contains the certificate request.
type CertificateSigningRequestSpecApplyConfiguration struct {
- Request []byte `json:"request,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
- Usages []certificatesv1.KeyUsage `json:"usages,omitempty"`
- Username *string `json:"username,omitempty"`
- UID *string `json:"uid,omitempty"`
- Groups []string `json:"groups,omitempty"`
- Extra map[string]certificatesv1.ExtraValue `json:"extra,omitempty"`
+ // request contains an x509 certificate signing request encoded in a "CERTIFICATE REQUEST" PEM block.
+ // When serialized as JSON or YAML, the data is additionally base64-encoded.
+ Request []byte `json:"request,omitempty"`
+ // signerName indicates the requested signer, and is a qualified name.
+ //
+ // List/watch requests for CertificateSigningRequests can filter on this field using a "spec.signerName=NAME" fieldSelector.
+ //
+ // Well-known Kubernetes signers are:
+ // 1. "kubernetes.io/kube-apiserver-client": issues client certificates that can be used to authenticate to kube-apiserver.
+ // Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the "csrsigning" controller in kube-controller-manager.
+ // 2. "kubernetes.io/kube-apiserver-client-kubelet": issues client certificates that kubelets use to authenticate to kube-apiserver.
+ // Requests for this signer can be auto-approved by the "csrapproving" controller in kube-controller-manager, and can be issued by the "csrsigning" controller in kube-controller-manager.
+ // 3. "kubernetes.io/kubelet-serving" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.
+ // Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the "csrsigning" controller in kube-controller-manager.
+ //
+ // More details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers
+ //
+ // Custom signerNames can also be specified. The signer defines:
+ // 1. Trust distribution: how trust (CA bundles) are distributed.
+ // 2. Permitted subjects: and behavior when a disallowed subject is requested.
+ // 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.
+ // 4. Required, permitted, or forbidden key usages / extended key usages.
+ // 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.
+ // 6. Whether or not requests for CA certificates are allowed.
+ SignerName *string `json:"signerName,omitempty"`
+ // expirationSeconds is the requested duration of validity of the issued
+ // certificate. The certificate signer may issue a certificate with a different
+ // validity duration so a client must check the delta between the notBefore and
+ // and notAfter fields in the issued certificate to determine the actual duration.
+ //
+ // The v1.22+ in-tree implementations of the well-known Kubernetes signers will
+ // honor this field as long as the requested duration is not greater than the
+ // maximum duration they will honor per the --cluster-signing-duration CLI
+ // flag to the Kubernetes controller manager.
+ //
+ // Certificate signers may not honor this field for various reasons:
+ //
+ // 1. Old signer that is unaware of the field (such as the in-tree
+ // implementations prior to v1.22)
+ // 2. Signer whose configured maximum is shorter than the requested duration
+ // 3. Signer whose configured minimum is longer than the requested duration
+ //
+ // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes.
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
+ // usages specifies a set of key usages requested in the issued certificate.
+ //
+ // Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth".
+ //
+ // Requests for TLS serving certificates typically request: "key encipherment", "digital signature", "server auth".
+ //
+ // Valid values are:
+ // "signing", "digital signature", "content commitment",
+ // "key encipherment", "key agreement", "data encipherment",
+ // "cert sign", "crl sign", "encipher only", "decipher only", "any",
+ // "server auth", "client auth",
+ // "code signing", "email protection", "s/mime",
+ // "ipsec end system", "ipsec tunnel", "ipsec user",
+ // "timestamping", "ocsp signing", "microsoft sgc", "netscape sgc"
+ Usages []certificatesv1.KeyUsage `json:"usages,omitempty"`
+ // username contains the name of the user that created the CertificateSigningRequest.
+ // Populated by the API server on creation and immutable.
+ Username *string `json:"username,omitempty"`
+ // uid contains the uid of the user that created the CertificateSigningRequest.
+ // Populated by the API server on creation and immutable.
+ UID *string `json:"uid,omitempty"`
+ // groups contains group membership of the user that created the CertificateSigningRequest.
+ // Populated by the API server on creation and immutable.
+ Groups []string `json:"groups,omitempty"`
+ // extra contains extra attributes of the user that created the CertificateSigningRequest.
+ // Populated by the API server on creation and immutable.
+ Extra map[string]certificatesv1.ExtraValue `json:"extra,omitempty"`
}
// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go
index 897f6d1e..1eb79965 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequeststatus.go
@@ -20,9 +20,39 @@ package v1
// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use
// with apply.
+//
+// CertificateSigningRequestStatus contains conditions used to indicate
+// approved/denied/failed status of the request, and the issued certificate.
type CertificateSigningRequestStatusApplyConfiguration struct {
- Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"`
- Certificate []byte `json:"certificate,omitempty"`
+ // conditions applied to the request. Known conditions are "Approved", "Denied", and "Failed".
+ Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"`
+ // certificate is populated with an issued certificate by the signer after an Approved condition is present.
+ // This field is set via the /status subresource. Once populated, this field is immutable.
+ //
+ // If the certificate signing request is denied, a condition of type "Denied" is added and this field remains empty.
+ // If the signer cannot issue the certificate, a condition of type "Failed" is added and this field remains empty.
+ //
+ // Validation requirements:
+ // 1. certificate must contain one or more PEM blocks.
+ // 2. All PEM blocks must have the "CERTIFICATE" label, contain no headers, and the encoded data
+ // must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280.
+ // 3. Non-PEM content may appear before or after the "CERTIFICATE" PEM blocks and is unvalidated,
+ // to allow for explanatory text as described in section 5.2 of RFC7468.
+ //
+ // If more than one PEM block is present, and the definition of the requested spec.signerName
+ // does not indicate otherwise, the first block is the issued certificate,
+ // and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes.
+ //
+ // The certificate is encoded in PEM format.
+ //
+ // When serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of:
+ //
+ // base64(
+ // -----BEGIN CERTIFICATE-----
+ // ...
+ // -----END CERTIFICATE-----
+ // )
+ Certificate []byte `json:"certificate,omitempty"`
}
// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
index 82c2efc2..9c26cdb9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundle.go
@@ -29,10 +29,28 @@ import (
// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use
// with apply.
+//
+// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
+// (root certificates).
+//
+// ClusterTrustBundle objects are considered to be readable by any authenticated
+// user in the cluster, because they can be mounted by pods using the
+// `clusterTrustBundle` projection. All service accounts have read access to
+// ClusterTrustBundles by default. Users who only have namespace-level access
+// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount
+// that they have access to.
+//
+// It can be optionally associated with a particular assigner, in which case it
+// contains one valid set of trust anchors for that signer. Signers may have
+// multiple associated ClusterTrustBundles; each is an independent set of trust
+// anchors for that signer. Admission control is used to enforce that only users
+// with permissions on the signer can create or modify the corresponding bundle.
type ClusterTrustBundleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata contains the object metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec contains the signer (if any) and trust anchors.
+ Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"`
}
// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with
@@ -45,29 +63,14 @@ func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration {
return b
}
-// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from
-// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a
-// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterTrustBundleFrom extracts the applied configuration owned by fieldManager from
+// clusterTrustBundle for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API.
-// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterTrustBundleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) {
- return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "")
-}
-
-// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) {
- return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status")
-}
-
-func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) {
+func ExtractClusterTrustBundleFrom(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) {
b := &ClusterTrustBundleApplyConfiguration{}
err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.ClusterTrustBundle"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +82,21 @@ func extractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterT
b.WithAPIVersion("certificates.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from
+// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a
+// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API.
+// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1alpha1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) {
+ return ExtractClusterTrustBundleFrom(clusterTrustBundle, fieldManager, "")
+}
+
func (b ClusterTrustBundleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go
index 7bb36f70..14ba99c6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/clustertrustbundlespec.go
@@ -20,8 +20,39 @@ package v1alpha1
// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use
// with apply.
+//
+// ClusterTrustBundleSpec contains the signer and trust anchors.
type ClusterTrustBundleSpecApplyConfiguration struct {
- SignerName *string `json:"signerName,omitempty"`
+ // signerName indicates the associated signer, if any.
+ //
+ // In order to create or update a ClusterTrustBundle that sets signerName,
+ // you must have the following cluster-scoped permission:
+ // group=certificates.k8s.io resource=signers resourceName=
+ // verb=attest.
+ //
+ // If signerName is not empty, then the ClusterTrustBundle object must be
+ // named with the signer name as a prefix (translating slashes to colons).
+ // For example, for the signer name `example.com/foo`, valid
+ // ClusterTrustBundle object names include `example.com:foo:abc` and
+ // `example.com:foo:v1`.
+ //
+ // If signerName is empty, then the ClusterTrustBundle object's name must
+ // not have such a prefix.
+ //
+ // List/watch requests for ClusterTrustBundles can filter on this field
+ // using a `spec.signerName=NAME` field selector.
+ SignerName *string `json:"signerName,omitempty"`
+ // trustBundle contains the individual X.509 trust anchors for this
+ // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
+ //
+ // The data must consist only of PEM certificate blocks that parse as valid
+ // X.509 certificates. Each certificate must include a basic constraints
+ // extension with the CA bit set. The API server will reject objects that
+ // contain duplicate certificates, or that use PEM block headers.
+ //
+ // Users of ClusterTrustBundles, including Kubelet, are free to reorder and
+ // deduplicate certificate blocks in this file according to their own logic,
+ // as well as to drop PEM block headers and inter-block data.
TrustBundle *string `json:"trustBundle,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
index 49009d3b..05e16d20 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
@@ -29,11 +29,17 @@ import (
// CertificateSigningRequestApplyConfiguration represents a declarative configuration of the CertificateSigningRequest type for use
// with apply.
+//
+// Describes a certificate signing request
type CertificateSigningRequestApplyConfiguration struct {
v1.TypeMetaApplyConfiguration `json:",inline"`
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"`
- Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
+ // spec contains the certificate request, and is immutable after creation.
+ // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
+ // Other fields are derived by Kubernetes and cannot be modified by users.
+ Spec *CertificateSigningRequestSpecApplyConfiguration `json:"spec,omitempty"`
+ // Derived information about the request.
+ Status *CertificateSigningRequestStatusApplyConfiguration `json:"status,omitempty"`
}
// CertificateSigningRequest constructs a declarative configuration of the CertificateSigningRequest type for use with
@@ -46,6 +52,26 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
return b
}
+// ExtractCertificateSigningRequestFrom extracts the applied configuration owned by fieldManager from
+// certificateSigningRequest for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// certificateSigningRequest must be a unmodified CertificateSigningRequest API object that was retrieved from the Kubernetes API.
+// ExtractCertificateSigningRequestFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCertificateSigningRequestFrom(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
+ b := &CertificateSigningRequestApplyConfiguration{}
+ err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.CertificateSigningRequest"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(certificateSigningRequest.Name)
+
+ b.WithKind("CertificateSigningRequest")
+ b.WithAPIVersion("certificates.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractCertificateSigningRequest extracts the applied configuration owned by fieldManager from
// certificateSigningRequest. If no managedFields are found in certificateSigningRequest for fieldManager, a
// CertificateSigningRequestApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +82,16 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// ExtractCertificateSigningRequest provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
- return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "")
+ return ExtractCertificateSigningRequestFrom(certificateSigningRequest, fieldManager, "")
}
-// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractCertificateSigningRequestStatus extracts the applied configuration owned by fieldManager from
+// certificateSigningRequest for the status subresource.
func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
- return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status")
+ return ExtractCertificateSigningRequestFrom(certificateSigningRequest, fieldManager, "status")
}
-func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
- b := &CertificateSigningRequestApplyConfiguration{}
- err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.CertificateSigningRequest"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(certificateSigningRequest.Name)
-
- b.WithKind("CertificateSigningRequest")
- b.WithAPIVersion("certificates.k8s.io/v1beta1")
- return b, nil
-}
func (b CertificateSigningRequestApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
index a845ec40..f88fc10c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestcondition.go
@@ -27,12 +27,23 @@ import (
// CertificateSigningRequestConditionApplyConfiguration represents a declarative configuration of the CertificateSigningRequestCondition type for use
// with apply.
type CertificateSigningRequestConditionApplyConfiguration struct {
- Type *certificatesv1beta1.RequestConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // type of the condition. Known conditions include "Approved", "Denied", and "Failed".
+ Type *certificatesv1beta1.RequestConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ // Approved, Denied, and Failed conditions may not be "False" or "Unknown".
+ // Defaults to "True".
+ // If unset, should be treated as "True".
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // brief reason for the request state
+ Reason *string `json:"reason,omitempty"`
+ // human readable message with details about the request state
+ Message *string `json:"message,omitempty"`
+ // timestamp for the last update to this condition
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // lastTransitionTime is the time the condition last transitioned from one status to another.
+ // If unset, when a new condition type is added or an existing condition's status is changed,
+ // the server defaults this to the current time.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
}
// CertificateSigningRequestConditionApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
index ee4016c7..f5186620 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
@@ -24,15 +24,84 @@ import (
// CertificateSigningRequestSpecApplyConfiguration represents a declarative configuration of the CertificateSigningRequestSpec type for use
// with apply.
+//
+// CertificateSigningRequestSpec contains the certificate request.
type CertificateSigningRequestSpecApplyConfiguration struct {
- Request []byte `json:"request,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
- Usages []certificatesv1beta1.KeyUsage `json:"usages,omitempty"`
- Username *string `json:"username,omitempty"`
- UID *string `json:"uid,omitempty"`
- Groups []string `json:"groups,omitempty"`
- Extra map[string]certificatesv1beta1.ExtraValue `json:"extra,omitempty"`
+ // Base64-encoded PKCS#10 CSR data
+ Request []byte `json:"request,omitempty"`
+ // Requested signer for the request. It is a qualified name in the form:
+ // `scope-hostname.io/name`.
+ // If empty, it will be defaulted:
+ // 1. If it's a kubelet client certificate, it is assigned
+ // "kubernetes.io/kube-apiserver-client-kubelet".
+ // 2. If it's a kubelet serving certificate, it is assigned
+ // "kubernetes.io/kubelet-serving".
+ // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown".
+ // Distribution of trust for signers happens out of band.
+ // You can select on this field using `spec.signerName`.
+ SignerName *string `json:"signerName,omitempty"`
+ // expirationSeconds is the requested duration of validity of the issued
+ // certificate. The certificate signer may issue a certificate with a different
+ // validity duration so a client must check the delta between the notBefore and
+ // and notAfter fields in the issued certificate to determine the actual duration.
+ //
+ // The v1.22+ in-tree implementations of the well-known Kubernetes signers will
+ // honor this field as long as the requested duration is not greater than the
+ // maximum duration they will honor per the --cluster-signing-duration CLI
+ // flag to the Kubernetes controller manager.
+ //
+ // Certificate signers may not honor this field for various reasons:
+ //
+ // 1. Old signer that is unaware of the field (such as the in-tree
+ // implementations prior to v1.22)
+ // 2. Signer whose configured maximum is shorter than the requested duration
+ // 3. Signer whose configured minimum is longer than the requested duration
+ //
+ // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes.
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
+ // allowedUsages specifies a set of usage contexts the key will be
+ // valid for.
+ // See:
+ // https://tools.ietf.org/html/rfc5280#section-4.2.1.3
+ // https://tools.ietf.org/html/rfc5280#section-4.2.1.12
+ //
+ // Valid values are:
+ // "signing",
+ // "digital signature",
+ // "content commitment",
+ // "key encipherment",
+ // "key agreement",
+ // "data encipherment",
+ // "cert sign",
+ // "crl sign",
+ // "encipher only",
+ // "decipher only",
+ // "any",
+ // "server auth",
+ // "client auth",
+ // "code signing",
+ // "email protection",
+ // "s/mime",
+ // "ipsec end system",
+ // "ipsec tunnel",
+ // "ipsec user",
+ // "timestamping",
+ // "ocsp signing",
+ // "microsoft sgc",
+ // "netscape sgc"
+ Usages []certificatesv1beta1.KeyUsage `json:"usages,omitempty"`
+ // Information about the requesting user.
+ // See user.Info interface for details.
+ Username *string `json:"username,omitempty"`
+ // UID information about the requesting user.
+ // See user.Info interface for details.
+ UID *string `json:"uid,omitempty"`
+ // Group information about the requesting user.
+ // See user.Info interface for details.
+ Groups []string `json:"groups,omitempty"`
+ // Extra information about the requesting user.
+ // See user.Info interface for details.
+ Extra map[string]certificatesv1beta1.ExtraValue `json:"extra,omitempty"`
}
// CertificateSigningRequestSpecApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go
index f82e8aed..f2de2570 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequeststatus.go
@@ -21,8 +21,10 @@ package v1beta1
// CertificateSigningRequestStatusApplyConfiguration represents a declarative configuration of the CertificateSigningRequestStatus type for use
// with apply.
type CertificateSigningRequestStatusApplyConfiguration struct {
- Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"`
- Certificate []byte `json:"certificate,omitempty"`
+ // Conditions applied to the request, such as approval or denial.
+ Conditions []CertificateSigningRequestConditionApplyConfiguration `json:"conditions,omitempty"`
+ // If request was approved, the controller will place the issued certificate here.
+ Certificate []byte `json:"certificate,omitempty"`
}
// CertificateSigningRequestStatusApplyConfiguration constructs a declarative configuration of the CertificateSigningRequestStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go
index dc0dab1a..7f8be00c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundle.go
@@ -29,10 +29,28 @@ import (
// ClusterTrustBundleApplyConfiguration represents a declarative configuration of the ClusterTrustBundle type for use
// with apply.
+//
+// ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors
+// (root certificates).
+//
+// ClusterTrustBundle objects are considered to be readable by any authenticated
+// user in the cluster, because they can be mounted by pods using the
+// `clusterTrustBundle` projection. All service accounts have read access to
+// ClusterTrustBundles by default. Users who only have namespace-level access
+// to a cluster can read ClusterTrustBundles by impersonating a serviceaccount
+// that they have access to.
+//
+// It can be optionally associated with a particular assigner, in which case it
+// contains one valid set of trust anchors for that signer. Signers may have
+// multiple associated ClusterTrustBundles; each is an independent set of trust
+// anchors for that signer. Admission control is used to enforce that only users
+// with permissions on the signer can create or modify the corresponding bundle.
type ClusterTrustBundleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata contains the object metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec contains the signer (if any) and trust anchors.
+ Spec *ClusterTrustBundleSpecApplyConfiguration `json:"spec,omitempty"`
}
// ClusterTrustBundle constructs a declarative configuration of the ClusterTrustBundle type for use with
@@ -45,29 +63,14 @@ func ClusterTrustBundle(name string) *ClusterTrustBundleApplyConfiguration {
return b
}
-// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from
-// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a
-// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterTrustBundleFrom extracts the applied configuration owned by fieldManager from
+// clusterTrustBundle for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API.
-// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterTrustBundleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1beta1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) {
- return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "")
-}
-
-// ExtractClusterTrustBundleStatus is the same as ExtractClusterTrustBundle except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterTrustBundleStatus(clusterTrustBundle *certificatesv1beta1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) {
- return extractClusterTrustBundle(clusterTrustBundle, fieldManager, "status")
-}
-
-func extractClusterTrustBundle(clusterTrustBundle *certificatesv1beta1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) {
+func ExtractClusterTrustBundleFrom(clusterTrustBundle *certificatesv1beta1.ClusterTrustBundle, fieldManager string, subresource string) (*ClusterTrustBundleApplyConfiguration, error) {
b := &ClusterTrustBundleApplyConfiguration{}
err := managedfields.ExtractInto(clusterTrustBundle, internal.Parser().Type("io.k8s.api.certificates.v1beta1.ClusterTrustBundle"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +82,21 @@ func extractClusterTrustBundle(clusterTrustBundle *certificatesv1beta1.ClusterTr
b.WithAPIVersion("certificates.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractClusterTrustBundle extracts the applied configuration owned by fieldManager from
+// clusterTrustBundle. If no managedFields are found in clusterTrustBundle for fieldManager, a
+// ClusterTrustBundleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterTrustBundle must be a unmodified ClusterTrustBundle API object that was retrieved from the Kubernetes API.
+// ExtractClusterTrustBundle provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterTrustBundle(clusterTrustBundle *certificatesv1beta1.ClusterTrustBundle, fieldManager string) (*ClusterTrustBundleApplyConfiguration, error) {
+ return ExtractClusterTrustBundleFrom(clusterTrustBundle, fieldManager, "")
+}
+
func (b ClusterTrustBundleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go
index 157a9efa..a130051b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/clustertrustbundlespec.go
@@ -20,8 +20,39 @@ package v1beta1
// ClusterTrustBundleSpecApplyConfiguration represents a declarative configuration of the ClusterTrustBundleSpec type for use
// with apply.
+//
+// ClusterTrustBundleSpec contains the signer and trust anchors.
type ClusterTrustBundleSpecApplyConfiguration struct {
- SignerName *string `json:"signerName,omitempty"`
+ // signerName indicates the associated signer, if any.
+ //
+ // In order to create or update a ClusterTrustBundle that sets signerName,
+ // you must have the following cluster-scoped permission:
+ // group=certificates.k8s.io resource=signers resourceName=
+ // verb=attest.
+ //
+ // If signerName is not empty, then the ClusterTrustBundle object must be
+ // named with the signer name as a prefix (translating slashes to colons).
+ // For example, for the signer name `example.com/foo`, valid
+ // ClusterTrustBundle object names include `example.com:foo:abc` and
+ // `example.com:foo:v1`.
+ //
+ // If signerName is empty, then the ClusterTrustBundle object's name must
+ // not have such a prefix.
+ //
+ // List/watch requests for ClusterTrustBundles can filter on this field
+ // using a `spec.signerName=NAME` field selector.
+ SignerName *string `json:"signerName,omitempty"`
+ // trustBundle contains the individual X.509 trust anchors for this
+ // bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.
+ //
+ // The data must consist only of PEM certificate blocks that parse as valid
+ // X.509 certificates. Each certificate must include a basic constraints
+ // extension with the CA bit set. The API server will reject objects that
+ // contain duplicate certificates, or that use PEM block headers.
+ //
+ // Users of ClusterTrustBundles, including Kubelet, are free to reorder and
+ // deduplicate certificate blocks in this file according to their own logic,
+ // as well as to drop PEM block headers and inter-block data.
TrustBundle *string `json:"trustBundle,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequest.go
similarity index 85%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequest.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequest.go
index df6d15bf..44717255 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequest.go
@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
- certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -29,11 +29,19 @@ import (
// PodCertificateRequestApplyConfiguration represents a declarative configuration of the PodCertificateRequest type for use
// with apply.
+//
+// PodCertificateRequest encodes a pod requesting a certificate from a given
+// signer.
+//
+// Kubelets use this API to implement podCertificate projected volumes
type PodCertificateRequestApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata contains the object metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodCertificateRequestSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodCertificateRequestStatusApplyConfiguration `json:"status,omitempty"`
+ // spec contains the details about the certificate being requested.
+ Spec *PodCertificateRequestSpecApplyConfiguration `json:"spec,omitempty"`
+ // status contains the issued certificate, and a standard set of conditions.
+ Status *PodCertificateRequestStatusApplyConfiguration `json:"status,omitempty"`
}
// PodCertificateRequest constructs a declarative configuration of the PodCertificateRequest type for use with
@@ -43,10 +51,31 @@ func PodCertificateRequest(name, namespace string) *PodCertificateRequestApplyCo
b.WithName(name)
b.WithNamespace(namespace)
b.WithKind("PodCertificateRequest")
- b.WithAPIVersion("certificates.k8s.io/v1alpha1")
+ b.WithAPIVersion("certificates.k8s.io/v1beta1")
return b
}
+// ExtractPodCertificateRequestFrom extracts the applied configuration owned by fieldManager from
+// podCertificateRequest for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// podCertificateRequest must be a unmodified PodCertificateRequest API object that was retrieved from the Kubernetes API.
+// ExtractPodCertificateRequestFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPodCertificateRequestFrom(podCertificateRequest *certificatesv1beta1.PodCertificateRequest, fieldManager string, subresource string) (*PodCertificateRequestApplyConfiguration, error) {
+ b := &PodCertificateRequestApplyConfiguration{}
+ err := managedfields.ExtractInto(podCertificateRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.PodCertificateRequest"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(podCertificateRequest.Name)
+ b.WithNamespace(podCertificateRequest.Namespace)
+
+ b.WithKind("PodCertificateRequest")
+ b.WithAPIVersion("certificates.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractPodCertificateRequest extracts the applied configuration owned by fieldManager from
// podCertificateRequest. If no managedFields are found in podCertificateRequest for fieldManager, a
// PodCertificateRequestApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +86,16 @@ func PodCertificateRequest(name, namespace string) *PodCertificateRequestApplyCo
// ExtractPodCertificateRequest provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractPodCertificateRequest(podCertificateRequest *certificatesv1alpha1.PodCertificateRequest, fieldManager string) (*PodCertificateRequestApplyConfiguration, error) {
- return extractPodCertificateRequest(podCertificateRequest, fieldManager, "")
+func ExtractPodCertificateRequest(podCertificateRequest *certificatesv1beta1.PodCertificateRequest, fieldManager string) (*PodCertificateRequestApplyConfiguration, error) {
+ return ExtractPodCertificateRequestFrom(podCertificateRequest, fieldManager, "")
}
-// ExtractPodCertificateRequestStatus is the same as ExtractPodCertificateRequest except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractPodCertificateRequestStatus(podCertificateRequest *certificatesv1alpha1.PodCertificateRequest, fieldManager string) (*PodCertificateRequestApplyConfiguration, error) {
- return extractPodCertificateRequest(podCertificateRequest, fieldManager, "status")
+// ExtractPodCertificateRequestStatus extracts the applied configuration owned by fieldManager from
+// podCertificateRequest for the status subresource.
+func ExtractPodCertificateRequestStatus(podCertificateRequest *certificatesv1beta1.PodCertificateRequest, fieldManager string) (*PodCertificateRequestApplyConfiguration, error) {
+ return ExtractPodCertificateRequestFrom(podCertificateRequest, fieldManager, "status")
}
-func extractPodCertificateRequest(podCertificateRequest *certificatesv1alpha1.PodCertificateRequest, fieldManager string, subresource string) (*PodCertificateRequestApplyConfiguration, error) {
- b := &PodCertificateRequestApplyConfiguration{}
- err := managedfields.ExtractInto(podCertificateRequest, internal.Parser().Type("io.k8s.api.certificates.v1alpha1.PodCertificateRequest"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(podCertificateRequest.Name)
- b.WithNamespace(podCertificateRequest.Namespace)
-
- b.WithKind("PodCertificateRequest")
- b.WithAPIVersion("certificates.k8s.io/v1alpha1")
- return b, nil
-}
func (b PodCertificateRequestApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequestspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequestspec.go
similarity index 53%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequestspec.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequestspec.go
index 2ceb9bb2..234420c4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequestspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequestspec.go
@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
types "k8s.io/apimachinery/pkg/types"
@@ -24,17 +24,89 @@ import (
// PodCertificateRequestSpecApplyConfiguration represents a declarative configuration of the PodCertificateRequestSpec type for use
// with apply.
+//
+// PodCertificateRequestSpec describes the certificate request. All fields are
+// immutable after creation.
type PodCertificateRequestSpecApplyConfiguration struct {
- SignerName *string `json:"signerName,omitempty"`
- PodName *string `json:"podName,omitempty"`
- PodUID *types.UID `json:"podUID,omitempty"`
- ServiceAccountName *string `json:"serviceAccountName,omitempty"`
- ServiceAccountUID *types.UID `json:"serviceAccountUID,omitempty"`
- NodeName *types.NodeName `json:"nodeName,omitempty"`
- NodeUID *types.UID `json:"nodeUID,omitempty"`
- MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty"`
- PKIXPublicKey []byte `json:"pkixPublicKey,omitempty"`
- ProofOfPossession []byte `json:"proofOfPossession,omitempty"`
+ // signerName indicates the requested signer.
+ //
+ // All signer names beginning with `kubernetes.io` are reserved for use by
+ // the Kubernetes project. There is currently one well-known signer
+ // documented by the Kubernetes project,
+ // `kubernetes.io/kube-apiserver-client-pod`, which will issue client
+ // certificates understood by kube-apiserver. It is currently
+ // unimplemented.
+ SignerName *string `json:"signerName,omitempty"`
+ // podName is the name of the pod into which the certificate will be mounted.
+ PodName *string `json:"podName,omitempty"`
+ // podUID is the UID of the pod into which the certificate will be mounted.
+ PodUID *types.UID `json:"podUID,omitempty"`
+ // serviceAccountName is the name of the service account the pod is running as.
+ ServiceAccountName *string `json:"serviceAccountName,omitempty"`
+ // serviceAccountUID is the UID of the service account the pod is running as.
+ ServiceAccountUID *types.UID `json:"serviceAccountUID,omitempty"`
+ // nodeName is the name of the node the pod is assigned to.
+ NodeName *types.NodeName `json:"nodeName,omitempty"`
+ // nodeUID is the UID of the node the pod is assigned to.
+ NodeUID *types.UID `json:"nodeUID,omitempty"`
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty"`
+ // pkixPublicKey is the PKIX-serialized public key the signer will issue the
+ // certificate to.
+ //
+ // The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
+ // or ED25519. Note that this list may be expanded in the future.
+ //
+ // Signer implementations do not need to support all key types supported by
+ // kube-apiserver and kubelet. If a signer does not support the key type
+ // used for a given PodCertificateRequest, it must deny the request by
+ // setting a status.conditions entry with a type of "Denied" and a reason of
+ // "UnsupportedKeyType". It may also suggest a key type that it does support
+ // in the message field.
+ PKIXPublicKey []byte `json:"pkixPublicKey,omitempty"`
+ // proofOfPossession proves that the requesting kubelet holds the private
+ // key corresponding to pkixPublicKey.
+ //
+ // It is contructed by signing the ASCII bytes of the pod's UID using
+ // `pkixPublicKey`.
+ //
+ // kube-apiserver validates the proof of possession during creation of the
+ // PodCertificateRequest.
+ //
+ // If the key is an RSA key, then the signature is over the ASCII bytes of
+ // the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
+ // function crypto/rsa.SignPSS with nil options).
+ //
+ // If the key is an ECDSA key, then the signature is as described by [SEC 1,
+ // Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
+ // golang library function crypto/ecdsa.SignASN1)
+ //
+ // If the key is an ED25519 key, the the signature is as described by the
+ // [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
+ // the golang library crypto/ed25519.Sign).
+ ProofOfPossession []byte `json:"proofOfPossession,omitempty"`
+ // unverifiedUserAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ UnverifiedUserAnnotations map[string]string `json:"unverifiedUserAnnotations,omitempty"`
}
// PodCertificateRequestSpecApplyConfiguration constructs a declarative configuration of the PodCertificateRequestSpec type for use with
@@ -126,3 +198,17 @@ func (b *PodCertificateRequestSpecApplyConfiguration) WithProofOfPossession(valu
}
return b
}
+
+// WithUnverifiedUserAnnotations puts the entries into the UnverifiedUserAnnotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the UnverifiedUserAnnotations field,
+// overwriting an existing map entries in UnverifiedUserAnnotations field with the same key.
+func (b *PodCertificateRequestSpecApplyConfiguration) WithUnverifiedUserAnnotations(entries map[string]string) *PodCertificateRequestSpecApplyConfiguration {
+ if b.UnverifiedUserAnnotations == nil && len(entries) > 0 {
+ b.UnverifiedUserAnnotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.UnverifiedUserAnnotations[k] = v
+ }
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequeststatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequeststatus.go
similarity index 56%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequeststatus.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequeststatus.go
index ed5f52e7..ae3e9569 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1alpha1/podcertificaterequeststatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/podcertificaterequeststatus.go
@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,12 +25,60 @@ import (
// PodCertificateRequestStatusApplyConfiguration represents a declarative configuration of the PodCertificateRequestStatus type for use
// with apply.
+//
+// PodCertificateRequestStatus describes the status of the request, and holds
+// the certificate data if the request is issued.
type PodCertificateRequestStatusApplyConfiguration struct {
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
- CertificateChain *string `json:"certificateChain,omitempty"`
- NotBefore *metav1.Time `json:"notBefore,omitempty"`
- BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty"`
- NotAfter *metav1.Time `json:"notAfter,omitempty"`
+ // conditions applied to the request.
+ //
+ // The types "Issued", "Denied", and "Failed" have special handling. At
+ // most one of these conditions may be present, and they must have status
+ // "True".
+ //
+ // If the request is denied with `Reason=UnsupportedKeyType`, the signer may
+ // suggest a key type that will work in the message field.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // certificateChain is populated with an issued certificate by the signer.
+ // This field is set via the /status subresource. Once populated, this field
+ // is immutable.
+ //
+ // If the certificate signing request is denied, a condition of type
+ // "Denied" is added and this field remains empty. If the signer cannot
+ // issue the certificate, a condition of type "Failed" is added and this
+ // field remains empty.
+ //
+ // Validation requirements:
+ // 1. certificateChain must consist of one or more PEM-formatted certificates.
+ // 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
+ // described in section 4 of RFC5280.
+ //
+ // If more than one block is present, and the definition of the requested
+ // spec.signerName does not indicate otherwise, the first block is the
+ // issued certificate, and subsequent blocks should be treated as
+ // intermediate certificates and presented in TLS handshakes. When
+ // projecting the chain into a pod volume, kubelet will drop any data
+ // in-between the PEM blocks, as well as any PEM block headers.
+ CertificateChain *string `json:"certificateChain,omitempty"`
+ // notBefore is the time at which the certificate becomes valid. The value
+ // must be the same as the notBefore value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ NotBefore *metav1.Time `json:"notBefore,omitempty"`
+ // beginRefreshAt is the time at which the kubelet should begin trying to
+ // refresh the certificate. This field is set via the /status subresource,
+ // and must be set at the same time as certificateChain. Once populated,
+ // this field is immutable.
+ //
+ // This field is only a hint. Kubelet may start refreshing before or after
+ // this time if necessary.
+ BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty"`
+ // notAfter is the time at which the certificate expires. The value must be
+ // the same as the notAfter value in the leaf certificate in
+ // certificateChain. This field is set via the /status subresource. Once
+ // populated, it is immutable. The signer must set this field at the same
+ // time it sets certificateChain.
+ NotAfter *metav1.Time `json:"notAfter,omitempty"`
}
// PodCertificateRequestStatusApplyConfiguration constructs a declarative configuration of the PodCertificateRequestStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
index 19183456..de183397 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
@@ -29,10 +29,15 @@ import (
// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use
// with apply.
+//
+// Lease defines a lease concept.
type LeaseApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec contains the specification of the Lease.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
}
// Lease constructs a declarative configuration of the Lease type for use with
@@ -46,29 +51,14 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
return b
}
-// ExtractLease extracts the applied configuration owned by fieldManager from
-// lease. If no managedFields are found in lease for fieldManager, a
-// LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractLeaseFrom extracts the applied configuration owned by fieldManager from
+// lease for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// lease must be a unmodified Lease API object that was retrieved from the Kubernetes API.
-// ExtractLease provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractLeaseFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractLease(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
- return extractLease(lease, fieldManager, "")
-}
-
-// ExtractLeaseStatus is the same as ExtractLease except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractLeaseStatus(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
- return extractLease(lease, fieldManager, "status")
-}
-
-func extractLease(lease *coordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
+func ExtractLeaseFrom(lease *coordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
b := &LeaseApplyConfiguration{}
err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +71,21 @@ func extractLease(lease *coordinationv1.Lease, fieldManager string, subresource
b.WithAPIVersion("coordination.k8s.io/v1")
return b, nil
}
+
+// ExtractLease extracts the applied configuration owned by fieldManager from
+// lease. If no managedFields are found in lease for fieldManager, a
+// LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// lease must be a unmodified Lease API object that was retrieved from the Kubernetes API.
+// ExtractLease provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractLease(lease *coordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+ return ExtractLeaseFrom(lease, fieldManager, "")
+}
+
func (b LeaseApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
index d0099872..4dca58e2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/leasespec.go
@@ -25,14 +25,33 @@ import (
// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use
// with apply.
+//
+// LeaseSpec is a specification of a Lease.
type LeaseSpecApplyConfiguration struct {
- HolderIdentity *string `json:"holderIdentity,omitempty"`
- LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"`
- AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty"`
- RenewTime *metav1.MicroTime `json:"renewTime,omitempty"`
- LeaseTransitions *int32 `json:"leaseTransitions,omitempty"`
- Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
- PreferredHolder *string `json:"preferredHolder,omitempty"`
+ // holderIdentity contains the identity of the holder of a current lease.
+ // If Coordinated Leader Election is used, the holder identity must be
+ // equal to the elected LeaseCandidate.metadata.name field.
+ HolderIdentity *string `json:"holderIdentity,omitempty"`
+ // leaseDurationSeconds is a duration that candidates for a lease need
+ // to wait to force acquire it. This is measured against the time of last
+ // observed renewTime.
+ LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"`
+ // acquireTime is a time when the current lease was acquired.
+ AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty"`
+ // renewTime is a time when the current holder of a lease has last
+ // updated the lease.
+ RenewTime *metav1.MicroTime `json:"renewTime,omitempty"`
+ // leaseTransitions is the number of transitions of a lease between
+ // holders.
+ LeaseTransitions *int32 `json:"leaseTransitions,omitempty"`
+ // Strategy indicates the strategy for picking the leader for coordinated leader election.
+ // If the field is not specified, there is no active coordination for this lease.
+ // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
+ Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
+ // PreferredHolder signals to a lease holder that the lease has a
+ // more optimal holder and should be given up.
+ // This field can only be set if Strategy is also set.
+ PreferredHolder *string `json:"preferredHolder,omitempty"`
}
// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go
index e3d9b5ab..73150cb4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go
@@ -29,10 +29,16 @@ import (
// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use
// with apply.
+//
+// LeaseCandidate defines a candidate for a Lease object.
+// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
type LeaseCandidateApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec contains the specification of the Lease.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"`
}
// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with
@@ -46,29 +52,14 @@ func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration {
return b
}
-// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from
-// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a
-// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractLeaseCandidateFrom extracts the applied configuration owned by fieldManager from
+// leaseCandidate for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API.
-// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractLeaseCandidateFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
- return extractLeaseCandidate(leaseCandidate, fieldManager, "")
-}
-
-// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
- return extractLeaseCandidate(leaseCandidate, fieldManager, "status")
-}
-
-func extractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
+func ExtractLeaseCandidateFrom(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
b := &LeaseCandidateApplyConfiguration{}
err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1alpha2.LeaseCandidate"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate,
b.WithAPIVersion("coordination.k8s.io/v1alpha2")
return b, nil
}
+
+// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from
+// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a
+// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API.
+// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractLeaseCandidate(leaseCandidate *coordinationv1alpha2.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
+ return ExtractLeaseCandidateFrom(leaseCandidate, fieldManager, "")
+}
+
func (b LeaseCandidateApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go
index f52aaab2..44a2db06 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go
@@ -25,13 +25,37 @@ import (
// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use
// with apply.
+//
+// LeaseCandidateSpec is a specification of a Lease.
type LeaseCandidateSpecApplyConfiguration struct {
- LeaseName *string `json:"leaseName,omitempty"`
- PingTime *v1.MicroTime `json:"pingTime,omitempty"`
- RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
- BinaryVersion *string `json:"binaryVersion,omitempty"`
- EmulationVersion *string `json:"emulationVersion,omitempty"`
- Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
+ // LeaseName is the name of the lease for which this candidate is contending.
+ // This field is immutable.
+ LeaseName *string `json:"leaseName,omitempty"`
+ // PingTime is the last time that the server has requested the LeaseCandidate
+ // to renew. It is only done during leader election to check if any
+ // LeaseCandidates have become ineligible. When PingTime is updated, the
+ // LeaseCandidate will respond by updating RenewTime.
+ PingTime *v1.MicroTime `json:"pingTime,omitempty"`
+ // RenewTime is the time that the LeaseCandidate was last updated.
+ // Any time a Lease needs to do leader election, the PingTime field
+ // is updated to signal to the LeaseCandidate that they should update
+ // the RenewTime.
+ // Old LeaseCandidate objects are also garbage collected if it has been hours
+ // since the last renew. The PingTime field is updated regularly to prevent
+ // garbage collection for still active LeaseCandidates.
+ RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
+ // BinaryVersion is the binary version. It must be in a semver format without leading `v`.
+ // This field is required.
+ BinaryVersion *string `json:"binaryVersion,omitempty"`
+ // EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
+ // EmulationVersion must be less than or equal to BinaryVersion.
+ // This field is required when strategy is "OldestEmulationVersion"
+ EmulationVersion *string `json:"emulationVersion,omitempty"`
+ // Strategy is the strategy that coordinated leader election will use for picking the leader.
+ // If multiple candidates for the same Lease return different strategies, the strategy provided
+ // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
+ // this is a user error and coordinated leader election will not operate the Lease until resolved.
+ Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
}
// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
index 377d8f49..38263059 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
@@ -29,10 +29,15 @@ import (
// LeaseApplyConfiguration represents a declarative configuration of the Lease type for use
// with apply.
+//
+// Lease defines a lease concept.
type LeaseApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec contains the specification of the Lease.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *LeaseSpecApplyConfiguration `json:"spec,omitempty"`
}
// Lease constructs a declarative configuration of the Lease type for use with
@@ -46,29 +51,14 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
return b
}
-// ExtractLease extracts the applied configuration owned by fieldManager from
-// lease. If no managedFields are found in lease for fieldManager, a
-// LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractLeaseFrom extracts the applied configuration owned by fieldManager from
+// lease for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// lease must be a unmodified Lease API object that was retrieved from the Kubernetes API.
-// ExtractLease provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractLeaseFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractLease(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
- return extractLease(lease, fieldManager, "")
-}
-
-// ExtractLeaseStatus is the same as ExtractLease except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractLeaseStatus(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
- return extractLease(lease, fieldManager, "status")
-}
-
-func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
+func ExtractLeaseFrom(lease *coordinationv1beta1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
b := &LeaseApplyConfiguration{}
err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1beta1.Lease"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +71,21 @@ func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subreso
b.WithAPIVersion("coordination.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractLease extracts the applied configuration owned by fieldManager from
+// lease. If no managedFields are found in lease for fieldManager, a
+// LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// lease must be a unmodified Lease API object that was retrieved from the Kubernetes API.
+// ExtractLease provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractLease(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+ return ExtractLeaseFrom(lease, fieldManager, "")
+}
+
func (b LeaseApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go
index 57c0c859..4670d9d2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidate.go
@@ -29,10 +29,16 @@ import (
// LeaseCandidateApplyConfiguration represents a declarative configuration of the LeaseCandidate type for use
// with apply.
+//
+// LeaseCandidate defines a candidate for a Lease object.
+// Candidates are created such that coordinated leader election will pick the best leader from the list of candidates.
type LeaseCandidateApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec contains the specification of the Lease.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *LeaseCandidateSpecApplyConfiguration `json:"spec,omitempty"`
}
// LeaseCandidate constructs a declarative configuration of the LeaseCandidate type for use with
@@ -46,29 +52,14 @@ func LeaseCandidate(name, namespace string) *LeaseCandidateApplyConfiguration {
return b
}
-// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from
-// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a
-// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractLeaseCandidateFrom extracts the applied configuration owned by fieldManager from
+// leaseCandidate for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API.
-// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractLeaseCandidateFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractLeaseCandidate(leaseCandidate *coordinationv1beta1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
- return extractLeaseCandidate(leaseCandidate, fieldManager, "")
-}
-
-// ExtractLeaseCandidateStatus is the same as ExtractLeaseCandidate except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractLeaseCandidateStatus(leaseCandidate *coordinationv1beta1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
- return extractLeaseCandidate(leaseCandidate, fieldManager, "status")
-}
-
-func extractLeaseCandidate(leaseCandidate *coordinationv1beta1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
+func ExtractLeaseCandidateFrom(leaseCandidate *coordinationv1beta1.LeaseCandidate, fieldManager string, subresource string) (*LeaseCandidateApplyConfiguration, error) {
b := &LeaseCandidateApplyConfiguration{}
err := managedfields.ExtractInto(leaseCandidate, internal.Parser().Type("io.k8s.api.coordination.v1beta1.LeaseCandidate"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractLeaseCandidate(leaseCandidate *coordinationv1beta1.LeaseCandidate, f
b.WithAPIVersion("coordination.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractLeaseCandidate extracts the applied configuration owned by fieldManager from
+// leaseCandidate. If no managedFields are found in leaseCandidate for fieldManager, a
+// LeaseCandidateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// leaseCandidate must be a unmodified LeaseCandidate API object that was retrieved from the Kubernetes API.
+// ExtractLeaseCandidate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractLeaseCandidate(leaseCandidate *coordinationv1beta1.LeaseCandidate, fieldManager string) (*LeaseCandidateApplyConfiguration, error) {
+ return ExtractLeaseCandidateFrom(leaseCandidate, fieldManager, "")
+}
+
func (b LeaseCandidateApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go
index c3ea12c8..6b146dc4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasecandidatespec.go
@@ -25,13 +25,39 @@ import (
// LeaseCandidateSpecApplyConfiguration represents a declarative configuration of the LeaseCandidateSpec type for use
// with apply.
+//
+// LeaseCandidateSpec is a specification of a Lease.
type LeaseCandidateSpecApplyConfiguration struct {
- LeaseName *string `json:"leaseName,omitempty"`
- PingTime *v1.MicroTime `json:"pingTime,omitempty"`
- RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
- BinaryVersion *string `json:"binaryVersion,omitempty"`
- EmulationVersion *string `json:"emulationVersion,omitempty"`
- Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
+ // LeaseName is the name of the lease for which this candidate is contending.
+ // The limits on this field are the same as on Lease.name. Multiple lease candidates
+ // may reference the same Lease.name.
+ // This field is immutable.
+ LeaseName *string `json:"leaseName,omitempty"`
+ // PingTime is the last time that the server has requested the LeaseCandidate
+ // to renew. It is only done during leader election to check if any
+ // LeaseCandidates have become ineligible. When PingTime is updated, the
+ // LeaseCandidate will respond by updating RenewTime.
+ PingTime *v1.MicroTime `json:"pingTime,omitempty"`
+ // RenewTime is the time that the LeaseCandidate was last updated.
+ // Any time a Lease needs to do leader election, the PingTime field
+ // is updated to signal to the LeaseCandidate that they should update
+ // the RenewTime.
+ // Old LeaseCandidate objects are also garbage collected if it has been hours
+ // since the last renew. The PingTime field is updated regularly to prevent
+ // garbage collection for still active LeaseCandidates.
+ RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
+ // BinaryVersion is the binary version. It must be in a semver format without leading `v`.
+ // This field is required.
+ BinaryVersion *string `json:"binaryVersion,omitempty"`
+ // EmulationVersion is the emulation version. It must be in a semver format without leading `v`.
+ // EmulationVersion must be less than or equal to BinaryVersion.
+ // This field is required when strategy is "OldestEmulationVersion"
+ EmulationVersion *string `json:"emulationVersion,omitempty"`
+ // Strategy is the strategy that coordinated leader election will use for picking the leader.
+ // If multiple candidates for the same Lease return different strategies, the strategy provided
+ // by the candidate with the latest BinaryVersion will be used. If there is still conflict,
+ // this is a user error and coordinated leader election will not operate the Lease until resolved.
+ Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
}
// LeaseCandidateSpecApplyConfiguration constructs a declarative configuration of the LeaseCandidateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go
index 8c7fddfc..db40b835 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/leasespec.go
@@ -25,14 +25,31 @@ import (
// LeaseSpecApplyConfiguration represents a declarative configuration of the LeaseSpec type for use
// with apply.
+//
+// LeaseSpec is a specification of a Lease.
type LeaseSpecApplyConfiguration struct {
- HolderIdentity *string `json:"holderIdentity,omitempty"`
- LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"`
- AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"`
- RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
- LeaseTransitions *int32 `json:"leaseTransitions,omitempty"`
- Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
- PreferredHolder *string `json:"preferredHolder,omitempty"`
+ // holderIdentity contains the identity of the holder of a current lease.
+ // If Coordinated Leader Election is used, the holder identity must be
+ // equal to the elected LeaseCandidate.metadata.name field.
+ HolderIdentity *string `json:"holderIdentity,omitempty"`
+ // leaseDurationSeconds is a duration that candidates for a lease need
+ // to wait to force acquire it. This is measure against time of last
+ // observed renewTime.
+ LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty"`
+ // acquireTime is a time when the current lease was acquired.
+ AcquireTime *v1.MicroTime `json:"acquireTime,omitempty"`
+ // renewTime is a time when the current holder of a lease has last
+ // updated the lease.
+ RenewTime *v1.MicroTime `json:"renewTime,omitempty"`
+ // leaseTransitions is the number of transitions of a lease between
+ // holders.
+ LeaseTransitions *int32 `json:"leaseTransitions,omitempty"`
+ // Strategy indicates the strategy for picking the leader for coordinated leader election
+ // (Alpha) Using this field requires the CoordinatedLeaderElection feature gate to be enabled.
+ Strategy *coordinationv1.CoordinatedLeaseStrategy `json:"strategy,omitempty"`
+ // PreferredHolder signals to a lease holder that the lease has a
+ // more optimal holder and should be given up.
+ PreferredHolder *string `json:"preferredHolder,omitempty"`
}
// LeaseSpecApplyConfiguration constructs a declarative configuration of the LeaseSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go
index 45484f14..6ee627b8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/affinity.go
@@ -20,9 +20,14 @@ package v1
// AffinityApplyConfiguration represents a declarative configuration of the Affinity type for use
// with apply.
+//
+// Affinity is a group of affinity scheduling rules.
type AffinityApplyConfiguration struct {
- NodeAffinity *NodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"`
- PodAffinity *PodAffinityApplyConfiguration `json:"podAffinity,omitempty"`
+ // Describes node affinity scheduling rules for the pod.
+ NodeAffinity *NodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"`
+ // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ PodAffinity *PodAffinityApplyConfiguration `json:"podAffinity,omitempty"`
+ // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
PodAntiAffinity *PodAntiAffinityApplyConfiguration `json:"podAntiAffinity,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
index 3f7de21b..27a4289a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go
@@ -24,9 +24,20 @@ import (
// AppArmorProfileApplyConfiguration represents a declarative configuration of the AppArmorProfile type for use
// with apply.
+//
+// AppArmorProfile defines a pod or container's AppArmor settings.
type AppArmorProfileApplyConfiguration struct {
- Type *corev1.AppArmorProfileType `json:"type,omitempty"`
- LocalhostProfile *string `json:"localhostProfile,omitempty"`
+ // type indicates which kind of AppArmor profile will be applied.
+ // Valid options are:
+ // Localhost - a profile pre-loaded on the node.
+ // RuntimeDefault - the container runtime's default profile.
+ // Unconfined - no AppArmor enforcement.
+ Type *corev1.AppArmorProfileType `json:"type,omitempty"`
+ // localhostProfile indicates a profile loaded on the node that should be used.
+ // The profile must be preconfigured on the node to work.
+ // Must match the loaded name of the profile.
+ // Must be set if and only if type is "Localhost".
+ LocalhostProfile *string `json:"localhostProfile,omitempty"`
}
// AppArmorProfileApplyConfiguration constructs a declarative configuration of the AppArmorProfile type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
index 2c76161a..a3e95629 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/attachedvolume.go
@@ -24,9 +24,13 @@ import (
// AttachedVolumeApplyConfiguration represents a declarative configuration of the AttachedVolume type for use
// with apply.
+//
+// AttachedVolume describes a volume attached to a node
type AttachedVolumeApplyConfiguration struct {
- Name *corev1.UniqueVolumeName `json:"name,omitempty"`
- DevicePath *string `json:"devicePath,omitempty"`
+ // Name of the attached volume
+ Name *corev1.UniqueVolumeName `json:"name,omitempty"`
+ // DevicePath represents the device path where the volume should be available
+ DevicePath *string `json:"devicePath,omitempty"`
}
// AttachedVolumeApplyConfiguration constructs a declarative configuration of the AttachedVolume type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go
index d0878696..04f206f8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/awselasticblockstorevolumesource.go
@@ -20,11 +20,31 @@ package v1
// AWSElasticBlockStoreVolumeSourceApplyConfiguration represents a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use
// with apply.
+//
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSourceApplyConfiguration struct {
- VolumeID *string `json:"volumeID,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- Partition *int32 `json:"partition,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ VolumeID *string `json:"volumeID,omitempty"`
+ // fsType is the filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // partition is the partition in the volume that you want to mount.
+ // If omitted, the default is to mount by volume name.
+ // Examples: For volume /dev/sda1, you specify the partition as "1".
+ // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ Partition *int32 `json:"partition,omitempty"`
+ // readOnly value true will force the readOnly setting in VolumeMounts.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// AWSElasticBlockStoreVolumeSourceApplyConfiguration constructs a declarative configuration of the AWSElasticBlockStoreVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
index d4d20dfa..bb046426 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurediskvolumesource.go
@@ -24,13 +24,24 @@ import (
// AzureDiskVolumeSourceApplyConfiguration represents a declarative configuration of the AzureDiskVolumeSource type for use
// with apply.
+//
+// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSourceApplyConfiguration struct {
- DiskName *string `json:"diskName,omitempty"`
- DataDiskURI *string `json:"diskURI,omitempty"`
+ // diskName is the Name of the data disk in the blob storage
+ DiskName *string `json:"diskName,omitempty"`
+ // diskURI is the URI of data disk in the blob storage
+ DataDiskURI *string `json:"diskURI,omitempty"`
+ // cachingMode is the Host Caching mode: None, Read Only, Read Write.
CachingMode *corev1.AzureDataDiskCachingMode `json:"cachingMode,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- Kind *corev1.AzureDataDiskKind `json:"kind,omitempty"`
+ // fsType is Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
+ Kind *corev1.AzureDataDiskKind `json:"kind,omitempty"`
}
// AzureDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureDiskVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go
index 70a6b17b..db55eb1e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilepersistentvolumesource.go
@@ -20,10 +20,18 @@ package v1
// AzureFilePersistentVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFilePersistentVolumeSource type for use
// with apply.
+//
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSourceApplyConfiguration struct {
- SecretName *string `json:"secretName,omitempty"`
- ShareName *string `json:"shareName,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // secretName is the name of secret that contains Azure Storage Account Name and Key
+ SecretName *string `json:"secretName,omitempty"`
+ // shareName is the azure Share Name
+ ShareName *string `json:"shareName,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key
+ // default is the same as the Pod
SecretNamespace *string `json:"secretNamespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go
index ff0c8679..af5c6236 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/azurefilevolumesource.go
@@ -20,10 +20,16 @@ package v1
// AzureFileVolumeSourceApplyConfiguration represents a declarative configuration of the AzureFileVolumeSource type for use
// with apply.
+//
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSourceApplyConfiguration struct {
+ // secretName is the name of secret that contains Azure Storage Account Name and Key
SecretName *string `json:"secretName,omitempty"`
- ShareName *string `json:"shareName,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // shareName is the azure share Name
+ ShareName *string `json:"shareName,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// AzureFileVolumeSourceApplyConfiguration constructs a declarative configuration of the AzureFileVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
index e5c52b3c..f9f6b0e0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/capabilities.go
@@ -24,8 +24,12 @@ import (
// CapabilitiesApplyConfiguration represents a declarative configuration of the Capabilities type for use
// with apply.
+//
+// Adds and removes POSIX capabilities from running containers.
type CapabilitiesApplyConfiguration struct {
- Add []corev1.Capability `json:"add,omitempty"`
+ // Added capabilities
+ Add []corev1.Capability `json:"add,omitempty"`
+ // Removed capabilities
Drop []corev1.Capability `json:"drop,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go
index f3ee2d03..c2ce40a8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfspersistentvolumesource.go
@@ -20,13 +20,28 @@ package v1
// CephFSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSPersistentVolumeSource type for use
// with apply.
+//
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSourceApplyConfiguration struct {
- Monitors []string `json:"monitors,omitempty"`
- Path *string `json:"path,omitempty"`
- User *string `json:"user,omitempty"`
- SecretFile *string `json:"secretFile,omitempty"`
- SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // monitors is Required: Monitors is a collection of Ceph monitors
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ Monitors []string `json:"monitors,omitempty"`
+ // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+ Path *string `json:"path,omitempty"`
+ // user is Optional: User is the rados user name, default is admin
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ User *string `json:"user,omitempty"`
+ // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ SecretFile *string `json:"secretFile,omitempty"`
+ // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// CephFSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go
index 77d53d6e..0ce5126e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cephfsvolumesource.go
@@ -20,13 +20,28 @@ package v1
// CephFSVolumeSourceApplyConfiguration represents a declarative configuration of the CephFSVolumeSource type for use
// with apply.
+//
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSourceApplyConfiguration struct {
- Monitors []string `json:"monitors,omitempty"`
- Path *string `json:"path,omitempty"`
- User *string `json:"user,omitempty"`
- SecretFile *string `json:"secretFile,omitempty"`
- SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // monitors is Required: Monitors is a collection of Ceph monitors
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ Monitors []string `json:"monitors,omitempty"`
+ // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+ Path *string `json:"path,omitempty"`
+ // user is optional: User is the rados user name, default is admin
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ User *string `json:"user,omitempty"`
+ // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ SecretFile *string `json:"secretFile,omitempty"`
+ // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// CephFSVolumeSourceApplyConfiguration constructs a declarative configuration of the CephFSVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go
index b2657348..6771d874 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cinderpersistentvolumesource.go
@@ -20,10 +20,26 @@ package v1
// CinderPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CinderPersistentVolumeSource type for use
// with apply.
+//
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
type CinderPersistentVolumeSourceApplyConfiguration struct {
- VolumeID *string `json:"volumeID,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // volumeID used to identify the volume in cinder.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ VolumeID *string `json:"volumeID,omitempty"`
+ // fsType Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // secretRef is Optional: points to a secret object containing parameters used to connect
+ // to OpenStack.
SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go
index 131cbf21..b19fcee3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/cindervolumesource.go
@@ -20,10 +20,26 @@ package v1
// CinderVolumeSourceApplyConfiguration represents a declarative configuration of the CinderVolumeSource type for use
// with apply.
+//
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSourceApplyConfiguration struct {
- VolumeID *string `json:"volumeID,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // volumeID used to identify the volume in cinder.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ VolumeID *string `json:"volumeID,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // secretRef is optional: points to a secret object containing parameters used to connect
+ // to OpenStack.
SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go
index 02c4e55e..3fed4e35 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clientipconfig.go
@@ -20,7 +20,12 @@ package v1
// ClientIPConfigApplyConfiguration represents a declarative configuration of the ClientIPConfig type for use
// with apply.
+//
+// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfigApplyConfiguration struct {
+ // timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+ // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+ // Default value is 10800(for 3 hours).
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
index ab1c578c..00eec6b3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/clustertrustbundleprojection.go
@@ -24,12 +24,31 @@ import (
// ClusterTrustBundleProjectionApplyConfiguration represents a declarative configuration of the ClusterTrustBundleProjection type for use
// with apply.
+//
+// ClusterTrustBundleProjection describes how to select a set of
+// ClusterTrustBundle objects and project their contents into the pod
+// filesystem.
type ClusterTrustBundleProjectionApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
+ // Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ // with signerName and labelSelector.
+ Name *string `json:"name,omitempty"`
+ // Select all ClusterTrustBundles that match this signer name.
+ // Mutually-exclusive with name. The contents of all selected
+ // ClusterTrustBundles will be unified and deduplicated.
+ SignerName *string `json:"signerName,omitempty"`
+ // Select all ClusterTrustBundles that match this label selector. Only has
+ // effect if signerName is set. Mutually-exclusive with name. If unset,
+ // interpreted as "match nothing". If set but empty, interpreted as "match
+ // everything".
LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
- Optional *bool `json:"optional,omitempty"`
- Path *string `json:"path,omitempty"`
+ // If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ // aren't available. If using name, then the named ClusterTrustBundle is
+ // allowed not to exist. If using signerName, then the combination of
+ // signerName and labelSelector is allowed to match zero
+ // ClusterTrustBundles.
+ Optional *bool `json:"optional,omitempty"`
+ // Relative path from the volume root to write the bundle.
+ Path *string `json:"path,omitempty"`
}
// ClusterTrustBundleProjectionApplyConfiguration constructs a declarative configuration of the ClusterTrustBundleProjection type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
index 60be6fe8..954a7e4c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentcondition.go
@@ -24,11 +24,21 @@ import (
// ComponentConditionApplyConfiguration represents a declarative configuration of the ComponentCondition type for use
// with apply.
+//
+// Information about the condition of a component.
type ComponentConditionApplyConfiguration struct {
- Type *corev1.ComponentConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- Message *string `json:"message,omitempty"`
- Error *string `json:"error,omitempty"`
+ // Type of condition for a component.
+ // Valid value: "Healthy"
+ Type *corev1.ComponentConditionType `json:"type,omitempty"`
+ // Status of the condition for a component.
+ // Valid values for "Healthy": "True", "False", or "Unknown".
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Message about the condition for a component.
+ // For example, information about a health check.
+ Message *string `json:"message,omitempty"`
+ // Condition error code for a component.
+ // For example, a health check error code.
+ Error *string `json:"error,omitempty"`
}
// ComponentConditionApplyConfiguration constructs a declarative configuration of the ComponentCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
index 567446df..5b9b7f32 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
@@ -29,10 +29,16 @@ import (
// ComponentStatusApplyConfiguration represents a declarative configuration of the ComponentStatus type for use
// with apply.
+//
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+// Deprecated: This API is deprecated in v1.19+
type ComponentStatusApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"`
+ // List of component conditions observed
+ Conditions []ComponentConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ComponentStatus constructs a declarative configuration of the ComponentStatus type for use with
@@ -45,29 +51,14 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration {
return b
}
-// ExtractComponentStatus extracts the applied configuration owned by fieldManager from
-// componentStatus. If no managedFields are found in componentStatus for fieldManager, a
-// ComponentStatusApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractComponentStatusFrom extracts the applied configuration owned by fieldManager from
+// componentStatus for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// componentStatus must be a unmodified ComponentStatus API object that was retrieved from the Kubernetes API.
-// ExtractComponentStatus provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractComponentStatusFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
- return extractComponentStatus(componentStatus, fieldManager, "")
-}
-
-// ExtractComponentStatusStatus is the same as ExtractComponentStatus except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractComponentStatusStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
- return extractComponentStatus(componentStatus, fieldManager, "status")
-}
-
-func extractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) {
+func ExtractComponentStatusFrom(componentStatus *corev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) {
b := &ComponentStatusApplyConfiguration{}
err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +70,21 @@ func extractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManage
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractComponentStatus extracts the applied configuration owned by fieldManager from
+// componentStatus. If no managedFields are found in componentStatus for fieldManager, a
+// ComponentStatusApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// componentStatus must be a unmodified ComponentStatus API object that was retrieved from the Kubernetes API.
+// ExtractComponentStatus provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractComponentStatus(componentStatus *corev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
+ return ExtractComponentStatusFrom(componentStatus, fieldManager, "")
+}
+
func (b ComponentStatusApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
index 496f7cad..d6384820 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
@@ -29,12 +29,32 @@ import (
// ConfigMapApplyConfiguration represents a declarative configuration of the ConfigMap type for use
// with apply.
+//
+// ConfigMap holds configuration data for pods to consume.
type ConfigMapApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Immutable *bool `json:"immutable,omitempty"`
- Data map[string]string `json:"data,omitempty"`
- BinaryData map[string][]byte `json:"binaryData,omitempty"`
+ // Immutable, if set to true, ensures that data stored in the ConfigMap cannot
+ // be updated (only object metadata can be modified).
+ // If not set to true, the field can be modified at any time.
+ // Defaulted to nil.
+ Immutable *bool `json:"immutable,omitempty"`
+ // Data contains the configuration data.
+ // Each key must consist of alphanumeric characters, '-', '_' or '.'.
+ // Values with non-UTF-8 byte sequences must use the BinaryData field.
+ // The keys stored in Data must not overlap with the keys in
+ // the BinaryData field, this is enforced during validation process.
+ Data map[string]string `json:"data,omitempty"`
+ // BinaryData contains the binary data.
+ // Each key must consist of alphanumeric characters, '-', '_' or '.'.
+ // BinaryData can contain byte sequences that are not in the UTF-8 range.
+ // The keys stored in BinaryData must not overlap with the ones in
+ // the Data field, this is enforced during validation process.
+ // Using this field will require 1.10+ apiserver and
+ // kubelet.
+ BinaryData map[string][]byte `json:"binaryData,omitempty"`
}
// ConfigMap constructs a declarative configuration of the ConfigMap type for use with
@@ -48,29 +68,14 @@ func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration {
return b
}
-// ExtractConfigMap extracts the applied configuration owned by fieldManager from
-// configMap. If no managedFields are found in configMap for fieldManager, a
-// ConfigMapApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractConfigMapFrom extracts the applied configuration owned by fieldManager from
+// configMap for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// configMap must be a unmodified ConfigMap API object that was retrieved from the Kubernetes API.
-// ExtractConfigMap provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractConfigMapFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractConfigMap(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) {
- return extractConfigMap(configMap, fieldManager, "")
-}
-
-// ExtractConfigMapStatus is the same as ExtractConfigMap except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractConfigMapStatus(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) {
- return extractConfigMap(configMap, fieldManager, "status")
-}
-
-func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresource string) (*ConfigMapApplyConfiguration, error) {
+func ExtractConfigMapFrom(configMap *corev1.ConfigMap, fieldManager string, subresource string) (*ConfigMapApplyConfiguration, error) {
b := &ConfigMapApplyConfiguration{}
err := managedfields.ExtractInto(configMap, internal.Parser().Type("io.k8s.api.core.v1.ConfigMap"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +88,21 @@ func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresou
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractConfigMap extracts the applied configuration owned by fieldManager from
+// configMap. If no managedFields are found in configMap for fieldManager, a
+// ConfigMapApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// configMap must be a unmodified ConfigMap API object that was retrieved from the Kubernetes API.
+// ExtractConfigMap provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractConfigMap(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) {
+ return ExtractConfigMapFrom(configMap, fieldManager, "")
+}
+
func (b ConfigMapApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
index 4c0d2cbd..c2c067b2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapenvsource.go
@@ -20,9 +20,17 @@ package v1
// ConfigMapEnvSourceApplyConfiguration represents a declarative configuration of the ConfigMapEnvSource type for use
// with apply.
+//
+// ConfigMapEnvSource selects a ConfigMap to populate the environment
+// variables with.
+//
+// The contents of the target ConfigMap's Data field will represent the
+// key-value pairs as environment variables.
type ConfigMapEnvSourceApplyConfiguration struct {
+ // The ConfigMap to select from.
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Optional *bool `json:"optional,omitempty"`
+ // Specify whether the ConfigMap must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// ConfigMapEnvSourceApplyConfiguration constructs a declarative configuration of the ConfigMapEnvSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
index 97c0e721..415edded 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapkeyselector.go
@@ -20,10 +20,15 @@ package v1
// ConfigMapKeySelectorApplyConfiguration represents a declarative configuration of the ConfigMapKeySelector type for use
// with apply.
+//
+// Selects a key from a ConfigMap.
type ConfigMapKeySelectorApplyConfiguration struct {
+ // The ConfigMap to select from.
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Key *string `json:"key,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // The key to select.
+ Key *string `json:"key,omitempty"`
+ // Specify whether the ConfigMap or its key must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// ConfigMapKeySelectorApplyConfiguration constructs a declarative configuration of the ConfigMapKeySelector type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go
index 135bb7d4..4e1e227e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapnodeconfigsource.go
@@ -24,12 +24,25 @@ import (
// ConfigMapNodeConfigSourceApplyConfiguration represents a declarative configuration of the ConfigMapNodeConfigSource type for use
// with apply.
+//
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
type ConfigMapNodeConfigSourceApplyConfiguration struct {
- Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
- ResourceVersion *string `json:"resourceVersion,omitempty"`
- KubeletConfigKey *string `json:"kubeletConfigKey,omitempty"`
+ // Namespace is the metadata.namespace of the referenced ConfigMap.
+ // This field is required in all cases.
+ Namespace *string `json:"namespace,omitempty"`
+ // Name is the metadata.name of the referenced ConfigMap.
+ // This field is required in all cases.
+ Name *string `json:"name,omitempty"`
+ // UID is the metadata.UID of the referenced ConfigMap.
+ // This field is forbidden in Node.Spec, and required in Node.Status.
+ UID *types.UID `json:"uid,omitempty"`
+ // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+ // This field is forbidden in Node.Spec, and required in Node.Status.
+ ResourceVersion *string `json:"resourceVersion,omitempty"`
+ // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+ // This field is required in all cases.
+ KubeletConfigKey *string `json:"kubeletConfigKey,omitempty"`
}
// ConfigMapNodeConfigSourceApplyConfiguration constructs a declarative configuration of the ConfigMapNodeConfigSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
index d8c5e21d..0357ca99 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapprojection.go
@@ -20,10 +20,26 @@ package v1
// ConfigMapProjectionApplyConfiguration represents a declarative configuration of the ConfigMapProjection type for use
// with apply.
+//
+// Adapts a ConfigMap into a projected volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names,
+// unless the items element is populated with specific mappings of keys to paths.
+// Note that this is identical to a configmap volume source without the default
+// mode.
type ConfigMapProjectionApplyConfiguration struct {
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // items if unspecified, each key-value pair in the Data field of the referenced
+ // ConfigMap will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the ConfigMap,
+ // the volume setup will error unless it is marked optional. Paths must be
+ // relative and may not contain the '..' path or start with '..'.
+ Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
+ // optional specify whether the ConfigMap or its keys must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// ConfigMapProjectionApplyConfiguration constructs a declarative configuration of the ConfigMapProjection type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
index b5f41039..b8a6a333 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmapvolumesource.go
@@ -20,11 +20,33 @@ package v1
// ConfigMapVolumeSourceApplyConfiguration represents a declarative configuration of the ConfigMapVolumeSource type for use
// with apply.
+//
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSourceApplyConfiguration struct {
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
- DefaultMode *int32 `json:"defaultMode,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // items if unspecified, each key-value pair in the Data field of the referenced
+ // ConfigMap will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the ConfigMap,
+ // the volume setup will error unless it is marked optional. Paths must be
+ // relative and may not contain the '..' path or start with '..'.
+ Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
+ // defaultMode is optional: mode bits used to set permissions on created files by default.
+ // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ // Defaults to 0644.
+ // Directories within the path are not affected by this setting.
+ // This might be in conflict with other options that affect the file
+ // mode, like fsGroup, and the result can be other mode bits set.
+ DefaultMode *int32 `json:"defaultMode,omitempty"`
+ // optional specify whether the ConfigMap or its keys must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// ConfigMapVolumeSourceApplyConfiguration constructs a declarative configuration of the ConfigMapVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
index 4694b12f..262ee48c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/container.go
@@ -24,32 +24,161 @@ import (
// ContainerApplyConfiguration represents a declarative configuration of the Container type for use
// with apply.
+//
+// A single application container that you want to run within a pod.
type ContainerApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Image *string `json:"image,omitempty"`
- Command []string `json:"command,omitempty"`
- Args []string `json:"args,omitempty"`
- WorkingDir *string `json:"workingDir,omitempty"`
- Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"`
- EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"`
- Env []EnvVarApplyConfiguration `json:"env,omitempty"`
- Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
- ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"`
- RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
- RestartPolicyRules []ContainerRestartRuleApplyConfiguration `json:"restartPolicyRules,omitempty"`
- VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"`
- VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"`
- LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"`
- ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"`
- StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"`
- Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"`
- TerminationMessagePath *string `json:"terminationMessagePath,omitempty"`
- TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"`
- ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
- SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"`
- Stdin *bool `json:"stdin,omitempty"`
- StdinOnce *bool `json:"stdinOnce,omitempty"`
- TTY *bool `json:"tty,omitempty"`
+ // Name of the container specified as a DNS_LABEL.
+ // Each container in a pod must have a unique name (DNS_LABEL).
+ // Cannot be updated.
+ Name *string `json:"name,omitempty"`
+ // Container image name.
+ // More info: https://kubernetes.io/docs/concepts/containers/images
+ // This field is optional to allow higher level config management to default or override
+ // container images in workload controllers like Deployments and StatefulSets.
+ Image *string `json:"image,omitempty"`
+ // Entrypoint array. Not executed within a shell.
+ // The container image's ENTRYPOINT is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
+ // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ Command []string `json:"command,omitempty"`
+ // Arguments to the entrypoint.
+ // The container image's CMD is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
+ // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ Args []string `json:"args,omitempty"`
+ // Container's working directory.
+ // If not specified, the container runtime's default will be used, which
+ // might be configured in the container image.
+ // Cannot be updated.
+ WorkingDir *string `json:"workingDir,omitempty"`
+ // List of ports to expose from the container. Not specifying a port here
+ // DOES NOT prevent that port from being exposed. Any port which is
+ // listening on the default "0.0.0.0" address inside a container will be
+ // accessible from the network.
+ // Modifying this array with strategic merge patch may corrupt the data.
+ // For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ // Cannot be updated.
+ Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"`
+ // List of sources to populate environment variables in the container.
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // When a key exists in multiple
+ // sources, the value associated with the last source will take precedence.
+ // Values defined by an Env with a duplicate key will take precedence.
+ // Cannot be updated.
+ EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"`
+ // List of environment variables to set in the container.
+ // Cannot be updated.
+ Env []EnvVarApplyConfiguration `json:"env,omitempty"`
+ // Compute Resources required by this container.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
+ // Resources resize policy for the container.
+ // This field cannot be set on ephemeral containers.
+ ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"`
+ // RestartPolicy defines the restart behavior of individual containers in a pod.
+ // This overrides the pod-level restart policy. When this field is not specified,
+ // the restart behavior is defined by the Pod's restart policy and the container type.
+ // Additionally, setting the RestartPolicy as "Always" for the init container will
+ // have the following effect:
+ // this init container will be continually restarted on
+ // exit until all regular containers have terminated. Once all regular
+ // containers have completed, all init containers with restartPolicy "Always"
+ // will be shut down. This lifecycle differs from normal init containers and
+ // is often referred to as a "sidecar" container. Although this init
+ // container still starts in the init container sequence, it does not wait
+ // for the container to complete before proceeding to the next init
+ // container. Instead, the next init container starts immediately after this
+ // init container is started, or after any startupProbe has successfully
+ // completed.
+ RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
+ // Represents a list of rules to be checked to determine if the
+ // container should be restarted on exit. The rules are evaluated in
+ // order. Once a rule matches a container exit condition, the remaining
+ // rules are ignored. If no rule matches the container exit condition,
+ // the Container-level restart policy determines the whether the container
+ // is restarted or not. Constraints on the rules:
+ // - At most 20 rules are allowed.
+ // - Rules can have the same action.
+ // - Identical rules are not forbidden in validations.
+ // When rules are specified, container MUST set RestartPolicy explicitly
+ // even it if matches the Pod's RestartPolicy.
+ RestartPolicyRules []ContainerRestartRuleApplyConfiguration `json:"restartPolicyRules,omitempty"`
+ // Pod volumes to mount into the container's filesystem.
+ // Cannot be updated.
+ VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"`
+ // volumeDevices is the list of block devices to be used by the container.
+ VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"`
+ // Periodic probe of container liveness.
+ // Container will be restarted if the probe fails.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"`
+ // Periodic probe of container service readiness.
+ // Container will be removed from service endpoints if the probe fails.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"`
+ // StartupProbe indicates that the Pod has successfully initialized.
+ // If specified, no other probes are executed until this completes successfully.
+ // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ // when it might take a long time to load data or warm a cache, than during steady-state operation.
+ // This cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"`
+ // Actions that the management system should take in response to container lifecycle events.
+ // Cannot be updated.
+ Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"`
+ // Optional: Path at which the file to which the container's termination message
+ // will be written is mounted into the container's filesystem.
+ // Message written is intended to be brief final status, such as an assertion failure message.
+ // Will be truncated by the node if greater than 4096 bytes. The total message length across
+ // all containers will be limited to 12kb.
+ // Defaults to /dev/termination-log.
+ // Cannot be updated.
+ TerminationMessagePath *string `json:"terminationMessagePath,omitempty"`
+ // Indicate how the termination message should be populated. File will use the contents of
+ // terminationMessagePath to populate the container status message on both success and failure.
+ // FallbackToLogsOnError will use the last chunk of container log output if the termination
+ // message file is empty and the container exited with an error.
+ // The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ // Defaults to File.
+ // Cannot be updated.
+ TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"`
+ // Image pull policy.
+ // One of Always, Never, IfNotPresent.
+ // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
+ // SecurityContext defines the security options the container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"`
+ // Whether this container should allocate a buffer for stdin in the container runtime. If this
+ // is not set, reads from stdin in the container will always result in EOF.
+ // Default is false.
+ Stdin *bool `json:"stdin,omitempty"`
+ // Whether the container runtime should close the stdin channel after it has been opened by
+ // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ // at which time stdin is closed and remains closed until the container is restarted. If this
+ // flag is false, a container processes that reads from stdin will never receive an EOF.
+ // Default is false
+ StdinOnce *bool `json:"stdinOnce,omitempty"`
+ // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ // Default is false.
+ TTY *bool `json:"tty,omitempty"`
}
// ContainerApplyConfiguration constructs a declarative configuration of the Container type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerextendedresourcerequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerextendedresourcerequest.go
index 0b83b382..9131bba6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerextendedresourcerequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerextendedresourcerequest.go
@@ -20,10 +20,16 @@ package v1
// ContainerExtendedResourceRequestApplyConfiguration represents a declarative configuration of the ContainerExtendedResourceRequest type for use
// with apply.
+//
+// ContainerExtendedResourceRequest has the mapping of container name,
+// extended resource name to the device request name.
type ContainerExtendedResourceRequestApplyConfiguration struct {
+ // The name of the container requesting resources.
ContainerName *string `json:"containerName,omitempty"`
- ResourceName *string `json:"resourceName,omitempty"`
- RequestName *string `json:"requestName,omitempty"`
+ // The name of the extended resource in that container which gets backed by DRA.
+ ResourceName *string `json:"resourceName,omitempty"`
+ // The name of the request in the special ResourceClaim which corresponds to the extended resource.
+ RequestName *string `json:"requestName,omitempty"`
}
// ContainerExtendedResourceRequestApplyConfiguration constructs a declarative configuration of the ContainerExtendedResourceRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go
index bc9428fd..1c42e73c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerimage.go
@@ -20,9 +20,14 @@ package v1
// ContainerImageApplyConfiguration represents a declarative configuration of the ContainerImage type for use
// with apply.
+//
+// Describe a container image
type ContainerImageApplyConfiguration struct {
- Names []string `json:"names,omitempty"`
- SizeBytes *int64 `json:"sizeBytes,omitempty"`
+ // Names by which this image is known.
+ // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"]
+ Names []string `json:"names,omitempty"`
+ // The size of the image in bytes.
+ SizeBytes *int64 `json:"sizeBytes,omitempty"`
}
// ContainerImageApplyConfiguration constructs a declarative configuration of the ContainerImage type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
index 2ad47b3a..2fdabaab 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerport.go
@@ -24,12 +24,26 @@ import (
// ContainerPortApplyConfiguration represents a declarative configuration of the ContainerPort type for use
// with apply.
+//
+// ContainerPort represents a network port in a single container.
type ContainerPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- HostPort *int32 `json:"hostPort,omitempty"`
- ContainerPort *int32 `json:"containerPort,omitempty"`
- Protocol *corev1.Protocol `json:"protocol,omitempty"`
- HostIP *string `json:"hostIP,omitempty"`
+ // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ // named port in a pod must have a unique name. Name for the port that can be
+ // referred to by services.
+ Name *string `json:"name,omitempty"`
+ // Number of port to expose on the host.
+ // If specified, this must be a valid port number, 0 < x < 65536.
+ // If HostNetwork is specified, this must match ContainerPort.
+ // Most containers do not need this.
+ HostPort *int32 `json:"hostPort,omitempty"`
+ // Number of port to expose on the pod's IP address.
+ // This must be a valid port number, 0 < x < 65536.
+ ContainerPort *int32 `json:"containerPort,omitempty"`
+ // Protocol for port. Must be UDP, TCP, or SCTP.
+ // Defaults to "TCP".
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ // What host IP to bind the external port to.
+ HostIP *string `json:"hostIP,omitempty"`
}
// ContainerPortApplyConfiguration constructs a declarative configuration of the ContainerPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
index d45dbcea..4066727f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerresizepolicy.go
@@ -24,8 +24,14 @@ import (
// ContainerResizePolicyApplyConfiguration represents a declarative configuration of the ContainerResizePolicy type for use
// with apply.
+//
+// ContainerResizePolicy represents resource resize policy for the container.
type ContainerResizePolicyApplyConfiguration struct {
- ResourceName *corev1.ResourceName `json:"resourceName,omitempty"`
+ // Name of the resource to which this resource resize policy applies.
+ // Supported values: cpu, memory.
+ ResourceName *corev1.ResourceName `json:"resourceName,omitempty"`
+ // Restart policy to apply when specified resource is resized.
+ // If not specified, it defaults to NotRequired.
RestartPolicy *corev1.ResourceResizeRestartPolicy `json:"restartPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartrule.go
index 6ec09000..f44278e8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartrule.go
@@ -24,8 +24,14 @@ import (
// ContainerRestartRuleApplyConfiguration represents a declarative configuration of the ContainerRestartRule type for use
// with apply.
+//
+// ContainerRestartRule describes how a container exit is handled.
type ContainerRestartRuleApplyConfiguration struct {
- Action *corev1.ContainerRestartRuleAction `json:"action,omitempty"`
+ // Specifies the action taken on a container exit if the requirements
+ // are satisfied. The only possible value is "Restart" to restart the
+ // container.
+ Action *corev1.ContainerRestartRuleAction `json:"action,omitempty"`
+ // Represents the exit codes to check on container exits.
ExitCodes *ContainerRestartRuleOnExitCodesApplyConfiguration `json:"exitCodes,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartruleonexitcodes.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartruleonexitcodes.go
index 6bfd9619..f0cfbaf4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartruleonexitcodes.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerrestartruleonexitcodes.go
@@ -24,9 +24,20 @@ import (
// ContainerRestartRuleOnExitCodesApplyConfiguration represents a declarative configuration of the ContainerRestartRuleOnExitCodes type for use
// with apply.
+//
+// ContainerRestartRuleOnExitCodes describes the condition
+// for handling an exited container based on its exit codes.
type ContainerRestartRuleOnExitCodesApplyConfiguration struct {
+ // Represents the relationship between the container exit code(s) and the
+ // specified values. Possible values are:
+ // - In: the requirement is satisfied if the container exit code is in the
+ // set of specified values.
+ // - NotIn: the requirement is satisfied if the container exit code is
+ // not in the set of specified values.
Operator *corev1.ContainerRestartRuleOnExitCodesOperator `json:"operator,omitempty"`
- Values []int32 `json:"values,omitempty"`
+ // Specifies the set of values to check for container exit codes.
+ // At most 255 elements are allowed.
+ Values []int32 `json:"values,omitempty"`
}
// ContainerRestartRuleOnExitCodesApplyConfiguration constructs a declarative configuration of the ContainerRestartRuleOnExitCodes type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go
index b958e017..5b6cd96c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstate.go
@@ -20,9 +20,16 @@ package v1
// ContainerStateApplyConfiguration represents a declarative configuration of the ContainerState type for use
// with apply.
+//
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerStateApplyConfiguration struct {
- Waiting *ContainerStateWaitingApplyConfiguration `json:"waiting,omitempty"`
- Running *ContainerStateRunningApplyConfiguration `json:"running,omitempty"`
+ // Details about a waiting container
+ Waiting *ContainerStateWaitingApplyConfiguration `json:"waiting,omitempty"`
+ // Details about a running container
+ Running *ContainerStateRunningApplyConfiguration `json:"running,omitempty"`
+ // Details about a terminated container
Terminated *ContainerStateTerminatedApplyConfiguration `json:"terminated,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
index 0ed59c17..1a9d6ac0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstaterunning.go
@@ -24,7 +24,10 @@ import (
// ContainerStateRunningApplyConfiguration represents a declarative configuration of the ContainerStateRunning type for use
// with apply.
+//
+// ContainerStateRunning is a running state of a container.
type ContainerStateRunningApplyConfiguration struct {
+ // Time at which the container was last (re-)started
StartedAt *metav1.Time `json:"startedAt,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
index cfadd93c..63217e05 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstateterminated.go
@@ -24,14 +24,23 @@ import (
// ContainerStateTerminatedApplyConfiguration represents a declarative configuration of the ContainerStateTerminated type for use
// with apply.
+//
+// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminatedApplyConfiguration struct {
- ExitCode *int32 `json:"exitCode,omitempty"`
- Signal *int32 `json:"signal,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- StartedAt *metav1.Time `json:"startedAt,omitempty"`
- FinishedAt *metav1.Time `json:"finishedAt,omitempty"`
- ContainerID *string `json:"containerID,omitempty"`
+ // Exit status from the last termination of the container
+ ExitCode *int32 `json:"exitCode,omitempty"`
+ // Signal from the last termination of the container
+ Signal *int32 `json:"signal,omitempty"`
+ // (brief) reason from the last termination of the container
+ Reason *string `json:"reason,omitempty"`
+ // Message regarding the last termination of the container
+ Message *string `json:"message,omitempty"`
+ // Time at which previous execution of the container started
+ StartedAt *metav1.Time `json:"startedAt,omitempty"`
+ // Time at which the container last terminated
+ FinishedAt *metav1.Time `json:"finishedAt,omitempty"`
+ // Container's ID in the format '://'
+ ContainerID *string `json:"containerID,omitempty"`
}
// ContainerStateTerminatedApplyConfiguration constructs a declarative configuration of the ContainerStateTerminated type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go
index 7756c7da..8c99c00c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatewaiting.go
@@ -20,8 +20,12 @@ package v1
// ContainerStateWaitingApplyConfiguration represents a declarative configuration of the ContainerStateWaiting type for use
// with apply.
+//
+// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaitingApplyConfiguration struct {
- Reason *string `json:"reason,omitempty"`
+ // (brief) reason the container is not yet running.
+ Reason *string `json:"reason,omitempty"`
+ // Message regarding why the container is not yet running.
Message *string `json:"message,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go
index 8f64501b..a2e4a317 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containerstatus.go
@@ -24,22 +24,70 @@ import (
// ContainerStatusApplyConfiguration represents a declarative configuration of the ContainerStatus type for use
// with apply.
+//
+// ContainerStatus contains details for the current status of this container.
type ContainerStatusApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- State *ContainerStateApplyConfiguration `json:"state,omitempty"`
- LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"`
- Ready *bool `json:"ready,omitempty"`
- RestartCount *int32 `json:"restartCount,omitempty"`
- Image *string `json:"image,omitempty"`
- ImageID *string `json:"imageID,omitempty"`
- ContainerID *string `json:"containerID,omitempty"`
- Started *bool `json:"started,omitempty"`
- AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"`
- Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
- VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"`
- User *ContainerUserApplyConfiguration `json:"user,omitempty"`
- AllocatedResourcesStatus []ResourceStatusApplyConfiguration `json:"allocatedResourcesStatus,omitempty"`
- StopSignal *corev1.Signal `json:"stopSignal,omitempty"`
+ // Name is a DNS_LABEL representing the unique name of the container.
+ // Each container in a pod must have a unique name across all container types.
+ // Cannot be updated.
+ Name *string `json:"name,omitempty"`
+ // State holds details about the container's current condition.
+ State *ContainerStateApplyConfiguration `json:"state,omitempty"`
+ // LastTerminationState holds the last termination state of the container to
+ // help debug container crashes and restarts. This field is not
+ // populated if the container is still running and RestartCount is 0.
+ LastTerminationState *ContainerStateApplyConfiguration `json:"lastState,omitempty"`
+ // Ready specifies whether the container is currently passing its readiness check.
+ // The value will change as readiness probes keep executing. If no readiness
+ // probes are specified, this field defaults to true once the container is
+ // fully started (see Started field).
+ //
+ // The value is typically used to determine whether a container is ready to
+ // accept traffic.
+ Ready *bool `json:"ready,omitempty"`
+ // RestartCount holds the number of times the container has been restarted.
+ // Kubelet makes an effort to always increment the value, but there
+ // are cases when the state may be lost due to node restarts and then the value
+ // may be reset to 0. The value is never negative.
+ RestartCount *int32 `json:"restartCount,omitempty"`
+ // Image is the name of container image that the container is running.
+ // The container image may not match the image used in the PodSpec,
+ // as it may have been resolved by the runtime.
+ // More info: https://kubernetes.io/docs/concepts/containers/images.
+ Image *string `json:"image,omitempty"`
+ // ImageID is the image ID of the container's image. The image ID may not
+ // match the image ID of the image used in the PodSpec, as it may have been
+ // resolved by the runtime.
+ ImageID *string `json:"imageID,omitempty"`
+ // ContainerID is the ID of the container in the format '://'.
+ // Where type is a container runtime identifier, returned from Version call of CRI API
+ // (for example "containerd").
+ ContainerID *string `json:"containerID,omitempty"`
+ // Started indicates whether the container has finished its postStart lifecycle hook
+ // and passed its startup probe.
+ // Initialized as false, becomes true after startupProbe is considered
+ // successful. Resets to false when the container is restarted, or if kubelet
+ // loses state temporarily. In both cases, startup probes will run again.
+ // Is always true when no startupProbe is defined and container is running and
+ // has passed the postStart lifecycle hook. The null value must be treated the
+ // same as false.
+ Started *bool `json:"started,omitempty"`
+ // AllocatedResources represents the compute resources allocated for this container by the
+ // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
+ // and after successfully admitting desired pod resize.
+ AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"`
+ // Resources represents the compute resource requests and limits that have been successfully
+ // enacted on the running container after it has been started or has been successfully resized.
+ Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
+ // Status of volume mounts.
+ VolumeMounts []VolumeMountStatusApplyConfiguration `json:"volumeMounts,omitempty"`
+ // User represents user identity information initially attached to the first process of the container
+ User *ContainerUserApplyConfiguration `json:"user,omitempty"`
+ // AllocatedResourcesStatus represents the status of various resources
+ // allocated for this Pod.
+ AllocatedResourcesStatus []ResourceStatusApplyConfiguration `json:"allocatedResourcesStatus,omitempty"`
+ // StopSignal reports the effective stop signal for this container
+ StopSignal *corev1.Signal `json:"stopSignal,omitempty"`
}
// ContainerStatusApplyConfiguration constructs a declarative configuration of the ContainerStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go
index 34ec8e41..b8dcdb11 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go
@@ -20,7 +20,11 @@ package v1
// ContainerUserApplyConfiguration represents a declarative configuration of the ContainerUser type for use
// with apply.
+//
+// ContainerUser represents user identity information
type ContainerUserApplyConfiguration struct {
+ // Linux holds user identity information initially attached to the first process of the containers in Linux.
+ // Note that the actual running identity can be changed if the process has enough privilege to do so.
Linux *LinuxContainerUserApplyConfiguration `json:"linux,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go
index a614d108..c71d5b3d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csipersistentvolumesource.go
@@ -20,17 +20,54 @@ package v1
// CSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the CSIPersistentVolumeSource type for use
// with apply.
+//
+// Represents storage that is managed by an external CSI volume driver
type CSIPersistentVolumeSourceApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- VolumeHandle *string `json:"volumeHandle,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- VolumeAttributes map[string]string `json:"volumeAttributes,omitempty"`
+ // driver is the name of the driver to use for this volume.
+ // Required.
+ Driver *string `json:"driver,omitempty"`
+ // volumeHandle is the unique volume name returned by the CSI volume
+ // plugin’s CreateVolume to refer to the volume on all subsequent calls.
+ // Required.
+ VolumeHandle *string `json:"volumeHandle,omitempty"`
+ // readOnly value to pass to ControllerPublishVolumeRequest.
+ // Defaults to false (read/write).
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // fsType to mount. Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs".
+ FSType *string `json:"fsType,omitempty"`
+ // volumeAttributes of the volume to publish.
+ VolumeAttributes map[string]string `json:"volumeAttributes,omitempty"`
+ // controllerPublishSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // ControllerPublishVolume and ControllerUnpublishVolume calls.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
ControllerPublishSecretRef *SecretReferenceApplyConfiguration `json:"controllerPublishSecretRef,omitempty"`
- NodeStageSecretRef *SecretReferenceApplyConfiguration `json:"nodeStageSecretRef,omitempty"`
- NodePublishSecretRef *SecretReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"`
- ControllerExpandSecretRef *SecretReferenceApplyConfiguration `json:"controllerExpandSecretRef,omitempty"`
- NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"`
+ // nodeStageSecretRef is a reference to the secret object containing sensitive
+ // information to pass to the CSI driver to complete the CSI NodeStageVolume
+ // and NodeStageVolume and NodeUnstageVolume calls.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
+ NodeStageSecretRef *SecretReferenceApplyConfiguration `json:"nodeStageSecretRef,omitempty"`
+ // nodePublishSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // NodePublishVolume and NodeUnpublishVolume calls.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
+ NodePublishSecretRef *SecretReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"`
+ // controllerExpandSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // ControllerExpandVolume call.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
+ ControllerExpandSecretRef *SecretReferenceApplyConfiguration `json:"controllerExpandSecretRef,omitempty"`
+ // nodeExpandSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // NodeExpandVolume call.
+ // This field is optional, may be omitted if no secret is required. If the
+ // secret object contains more than one secret, all secrets are passed.
+ NodeExpandSecretRef *SecretReferenceApplyConfiguration `json:"nodeExpandSecretRef,omitempty"`
}
// CSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the CSIPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go
index b58d9bbb..f50ac94e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/csivolumesource.go
@@ -20,11 +20,27 @@ package v1
// CSIVolumeSourceApplyConfiguration represents a declarative configuration of the CSIVolumeSource type for use
// with apply.
+//
+// Represents a source location of a volume to mount, managed by an external CSI driver
type CSIVolumeSourceApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- VolumeAttributes map[string]string `json:"volumeAttributes,omitempty"`
+ // driver is the name of the CSI driver that handles this volume.
+ // Consult with your admin for the correct name as registered in the cluster.
+ Driver *string `json:"driver,omitempty"`
+ // readOnly specifies a read-only configuration for the volume.
+ // Defaults to false (read/write).
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ // If not provided, the empty value is passed to the associated CSI driver
+ // which will determine the default filesystem to apply.
+ FSType *string `json:"fsType,omitempty"`
+ // volumeAttributes stores driver-specific properties that are passed to the CSI
+ // driver. Consult your driver's documentation for supported values.
+ VolumeAttributes map[string]string `json:"volumeAttributes,omitempty"`
+ // nodePublishSecretRef is a reference to the secret object containing
+ // sensitive information to pass to the CSI driver to complete the CSI
+ // NodePublishVolume and NodeUnpublishVolume calls.
+ // This field is optional, and may be empty if no secret is required. If the
+ // secret object contains more than one secret, all secret references are passed.
NodePublishSecretRef *LocalObjectReferenceApplyConfiguration `json:"nodePublishSecretRef,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go
index 5be27ec0..4eba2032 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/daemonendpoint.go
@@ -20,7 +20,10 @@ package v1
// DaemonEndpointApplyConfiguration represents a declarative configuration of the DaemonEndpoint type for use
// with apply.
+//
+// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpointApplyConfiguration struct {
+ // Port number of the given endpoint.
Port *int32 `json:"Port,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go
index ed6b8b1b..c5aed20a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapiprojection.go
@@ -20,7 +20,12 @@ package v1
// DownwardAPIProjectionApplyConfiguration represents a declarative configuration of the DownwardAPIProjection type for use
// with apply.
+//
+// Represents downward API info for projecting into a projected volume.
+// Note that this is identical to a downwardAPI volume source without the default
+// mode.
type DownwardAPIProjectionApplyConfiguration struct {
+ // Items is a list of DownwardAPIVolume file
Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go
index ec9d013d..9028f313 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumefile.go
@@ -20,11 +20,23 @@ package v1
// DownwardAPIVolumeFileApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeFile type for use
// with apply.
+//
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFileApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
- FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"`
+ // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+ Path *string `json:"path,omitempty"`
+ // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.
+ FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"`
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
ResourceFieldRef *ResourceFieldSelectorApplyConfiguration `json:"resourceFieldRef,omitempty"`
- Mode *int32 `json:"mode,omitempty"`
+ // Optional: mode bits used to set permissions on this file, must be an octal value
+ // between 0000 and 0777 or a decimal value between 0 and 511.
+ // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ // If not specified, the volume defaultMode will be used.
+ // This might be in conflict with other options that affect the file
+ // mode, like fsGroup, and the result can be other mode bits set.
+ Mode *int32 `json:"mode,omitempty"`
}
// DownwardAPIVolumeFileApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeFile type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go
index eef9d7ef..42e72668 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/downwardapivolumesource.go
@@ -20,9 +20,21 @@ package v1
// DownwardAPIVolumeSourceApplyConfiguration represents a declarative configuration of the DownwardAPIVolumeSource type for use
// with apply.
+//
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSourceApplyConfiguration struct {
- Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"`
- DefaultMode *int32 `json:"defaultMode,omitempty"`
+ // Items is a list of downward API volume file
+ Items []DownwardAPIVolumeFileApplyConfiguration `json:"items,omitempty"`
+ // Optional: mode bits to use on created files by default. Must be a
+ // Optional: mode bits used to set permissions on created files by default.
+ // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ // Defaults to 0644.
+ // Directories within the path are not affected by this setting.
+ // This might be in conflict with other options that affect the file
+ // mode, like fsGroup, and the result can be other mode bits set.
+ DefaultMode *int32 `json:"defaultMode,omitempty"`
}
// DownwardAPIVolumeSourceApplyConfiguration constructs a declarative configuration of the DownwardAPIVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
index 63e9f56a..97f71e8d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/emptydirvolumesource.go
@@ -25,9 +25,22 @@ import (
// EmptyDirVolumeSourceApplyConfiguration represents a declarative configuration of the EmptyDirVolumeSource type for use
// with apply.
+//
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSourceApplyConfiguration struct {
- Medium *corev1.StorageMedium `json:"medium,omitempty"`
- SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
+ // medium represents what type of storage medium should back this directory.
+ // The default is "" which means to use the node's default medium.
+ // Must be an empty string (default) or Memory.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ Medium *corev1.StorageMedium `json:"medium,omitempty"`
+ // sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ // The size limit is also applicable for memory medium.
+ // The maximum usage on memory medium EmptyDir would be the minimum value between
+ // the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ // The default is nil which means that the limit is undefined.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ SizeLimit *resource.Quantity `json:"sizeLimit,omitempty"`
}
// EmptyDirVolumeSourceApplyConfiguration constructs a declarative configuration of the EmptyDirVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go
index 536e697a..b45448c9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointaddress.go
@@ -20,10 +20,19 @@ package v1
// EndpointAddressApplyConfiguration represents a declarative configuration of the EndpointAddress type for use
// with apply.
+//
+// EndpointAddress is a tuple that describes single IP address.
+// Deprecated: This API is deprecated in v1.33+.
type EndpointAddressApplyConfiguration struct {
- IP *string `json:"ip,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
+ // The IP of this endpoint.
+ // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10),
+ // or link-local multicast (224.0.0.0/24 or ff02::/16).
+ IP *string `json:"ip,omitempty"`
+ // The Hostname of this endpoint
+ Hostname *string `json:"hostname,omitempty"`
+ // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
+ NodeName *string `json:"nodeName,omitempty"`
+ // Reference to object providing the endpoint.
TargetRef *ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
index 05ee64dd..8ebfdc7c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointport.go
@@ -24,11 +24,37 @@ import (
// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
// with apply.
+//
+// EndpointPort is a tuple that describes a single port.
+// Deprecated: This API is deprecated in v1.33+.
type EndpointPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Port *int32 `json:"port,omitempty"`
- Protocol *corev1.Protocol `json:"protocol,omitempty"`
- AppProtocol *string `json:"appProtocol,omitempty"`
+ // The name of this port. This must match the 'name' field in the
+ // corresponding ServicePort.
+ // Must be a DNS_LABEL.
+ // Optional only if one port is defined.
+ Name *string `json:"name,omitempty"`
+ // The port number of the endpoint.
+ Port *int32 `json:"port,omitempty"`
+ // The IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ // The application protocol for this port.
+ // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ // This field follows standard Kubernetes label syntax.
+ // Valid values are either:
+ //
+ // * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ // RFC-6335 and https://www.iana.org/assignments/service-names).
+ //
+ // * Kubernetes-defined prefixed names:
+ // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+ //
+ // * Other protocols should use implementation-defined prefixed names such as
+ // mycompany.com/my-custom-protocol.
+ AppProtocol *string `json:"appProtocol,omitempty"`
}
// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
index 1cb1d40a..a13cd30e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
@@ -29,10 +29,38 @@ import (
// EndpointsApplyConfiguration represents a declarative configuration of the Endpoints type for use
// with apply.
+//
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+//
+// Name: "mysvc",
+// Subsets: [
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// },
+// {
+// Addresses: [{"ip": "10.10.3.3"}],
+// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+// },
+// ]
+//
+// Endpoints is a legacy API and does not contain information about all Service features.
+// Use discoveryv1.EndpointSlice for complete information about Service endpoints.
+//
+// Deprecated: This API is deprecated in v1.33+. Use discoveryv1.EndpointSlice.
type EndpointsApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"`
+ // The set of all endpoints is the union of all subsets. Addresses are placed into
+ // subsets according to the IPs they share. A single address with multiple ports,
+ // some of which are ready and some of which are not (because they come from
+ // different containers) will result in the address being displayed in different
+ // subsets for the different ports. No address will appear in both Addresses and
+ // NotReadyAddresses in the same subset.
+ // Sets of addresses and ports that comprise a service.
+ Subsets []EndpointSubsetApplyConfiguration `json:"subsets,omitempty"`
}
// Endpoints constructs a declarative configuration of the Endpoints type for use with
@@ -46,29 +74,14 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration {
return b
}
-// ExtractEndpoints extracts the applied configuration owned by fieldManager from
-// endpoints. If no managedFields are found in endpoints for fieldManager, a
-// EndpointsApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEndpointsFrom extracts the applied configuration owned by fieldManager from
+// endpoints for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// endpoints must be a unmodified Endpoints API object that was retrieved from the Kubernetes API.
-// ExtractEndpoints provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEndpointsFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEndpoints(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
- return extractEndpoints(endpoints, fieldManager, "")
-}
-
-// ExtractEndpointsStatus is the same as ExtractEndpoints except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEndpointsStatus(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
- return extractEndpoints(endpoints, fieldManager, "status")
-}
-
-func extractEndpoints(endpoints *corev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) {
+func ExtractEndpointsFrom(endpoints *corev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) {
b := &EndpointsApplyConfiguration{}
err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +94,21 @@ func extractEndpoints(endpoints *corev1.Endpoints, fieldManager string, subresou
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractEndpoints extracts the applied configuration owned by fieldManager from
+// endpoints. If no managedFields are found in endpoints for fieldManager, a
+// EndpointsApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// endpoints must be a unmodified Endpoints API object that was retrieved from the Kubernetes API.
+// ExtractEndpoints provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEndpoints(endpoints *corev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
+ return ExtractEndpointsFrom(endpoints, fieldManager, "")
+}
+
func (b EndpointsApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go
index 33cd8496..de1cbafa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpointsubset.go
@@ -20,10 +20,32 @@ package v1
// EndpointSubsetApplyConfiguration represents a declarative configuration of the EndpointSubset type for use
// with apply.
+//
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+//
+// {
+// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// }
+//
+// The resulting set of endpoints can be viewed as:
+//
+// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+//
+// Deprecated: This API is deprecated in v1.33+.
type EndpointSubsetApplyConfiguration struct {
- Addresses []EndpointAddressApplyConfiguration `json:"addresses,omitempty"`
+ // IP addresses which offer the related ports that are marked as ready. These endpoints
+ // should be considered safe for load balancers and clients to utilize.
+ Addresses []EndpointAddressApplyConfiguration `json:"addresses,omitempty"`
+ // IP addresses which offer the related ports but are not currently marked as ready
+ // because they have not yet finished starting, have recently failed a readiness check,
+ // or have recently failed a liveness check.
NotReadyAddresses []EndpointAddressApplyConfiguration `json:"notReadyAddresses,omitempty"`
- Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
+ // Port numbers available on the related IP addresses.
+ Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
}
// EndpointSubsetApplyConfiguration constructs a declarative configuration of the EndpointSubset type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go
index 7aa181cf..05e18bed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envfromsource.go
@@ -20,10 +20,16 @@ package v1
// EnvFromSourceApplyConfiguration represents a declarative configuration of the EnvFromSource type for use
// with apply.
+//
+// EnvFromSource represents the source of a set of ConfigMaps or Secrets
type EnvFromSourceApplyConfiguration struct {
- Prefix *string `json:"prefix,omitempty"`
+ // Optional text to prepend to the name of each environment variable.
+ // May consist of any printable ASCII characters except '='.
+ Prefix *string `json:"prefix,omitempty"`
+ // The ConfigMap to select from
ConfigMapRef *ConfigMapEnvSourceApplyConfiguration `json:"configMapRef,omitempty"`
- SecretRef *SecretEnvSourceApplyConfiguration `json:"secretRef,omitempty"`
+ // The Secret to select from
+ SecretRef *SecretEnvSourceApplyConfiguration `json:"secretRef,omitempty"`
}
// EnvFromSourceApplyConfiguration constructs a declarative configuration of the EnvFromSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go
index 5894166c..dfde1cb6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvar.go
@@ -20,9 +20,23 @@ package v1
// EnvVarApplyConfiguration represents a declarative configuration of the EnvVar type for use
// with apply.
+//
+// EnvVar represents an environment variable present in a Container.
type EnvVarApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Value *string `json:"value,omitempty"`
+ // Name of the environment variable.
+ // May consist of any printable ASCII characters except '='.
+ Name *string `json:"name,omitempty"`
+ // Variable references $(VAR_NAME) are expanded
+ // using the previously defined environment variables in the container and
+ // any service environment variables. If a variable cannot be resolved,
+ // the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ // Escaped references will never be expanded, regardless of whether the variable
+ // exists or not.
+ // Defaults to "".
+ Value *string `json:"value,omitempty"`
+ // Source for the environment variable's value. Cannot be used if value is not empty.
ValueFrom *EnvVarSourceApplyConfiguration `json:"valueFrom,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go
index 8705a2b6..84e2a661 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/envvarsource.go
@@ -20,12 +20,22 @@ package v1
// EnvVarSourceApplyConfiguration represents a declarative configuration of the EnvVarSource type for use
// with apply.
+//
+// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSourceApplyConfiguration struct {
- FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"`
+ // Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ FieldRef *ObjectFieldSelectorApplyConfiguration `json:"fieldRef,omitempty"`
+ // Selects a resource of the container: only resources limits and requests
+ // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
ResourceFieldRef *ResourceFieldSelectorApplyConfiguration `json:"resourceFieldRef,omitempty"`
- ConfigMapKeyRef *ConfigMapKeySelectorApplyConfiguration `json:"configMapKeyRef,omitempty"`
- SecretKeyRef *SecretKeySelectorApplyConfiguration `json:"secretKeyRef,omitempty"`
- FileKeyRef *FileKeySelectorApplyConfiguration `json:"fileKeyRef,omitempty"`
+ // Selects a key of a ConfigMap.
+ ConfigMapKeyRef *ConfigMapKeySelectorApplyConfiguration `json:"configMapKeyRef,omitempty"`
+ // Selects a key of a secret in the pod's namespace
+ SecretKeyRef *SecretKeySelectorApplyConfiguration `json:"secretKeyRef,omitempty"`
+ // FileKeyRef selects a key of the env file.
+ // Requires the EnvFiles feature gate to be enabled.
+ FileKeyRef *FileKeySelectorApplyConfiguration `json:"fileKeyRef,omitempty"`
}
// EnvVarSourceApplyConfiguration constructs a declarative configuration of the EnvVarSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
index d41c9853..43d97a6e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainer.go
@@ -24,9 +24,28 @@ import (
// EphemeralContainerApplyConfiguration represents a declarative configuration of the EphemeralContainer type for use
// with apply.
+//
+// An EphemeralContainer is a temporary container that you may add to an existing Pod for
+// user-initiated activities such as debugging. Ephemeral containers have no resource or
+// scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+// Pod to exceed its resource allocation.
+//
+// To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+// Pod. Ephemeral containers may not be removed or restarted.
type EphemeralContainerApplyConfiguration struct {
+ // Ephemeral containers have all of the fields of Container, plus additional fields
+ // specific to ephemeral containers. Fields in common with Container are in the
+ // following inlined struct so than an EphemeralContainer may easily be converted
+ // to a Container.
EphemeralContainerCommonApplyConfiguration `json:",inline"`
- TargetContainerName *string `json:"targetContainerName,omitempty"`
+ // If set, the name of the container from PodSpec that this ephemeral container targets.
+ // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ // If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+ //
+ // The container runtime must implement support for this feature. If the runtime does not
+ // support namespace targeting then the result of setting this field is undefined.
+ TargetContainerName *string `json:"targetContainerName,omitempty"`
}
// EphemeralContainerApplyConfiguration constructs a declarative configuration of the EphemeralContainer type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
index cd9bf08f..23ac3f08 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralcontainercommon.go
@@ -24,32 +24,119 @@ import (
// EphemeralContainerCommonApplyConfiguration represents a declarative configuration of the EphemeralContainerCommon type for use
// with apply.
+//
+// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
+// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer
+// to Container and allows separate documentation for the fields of EphemeralContainer.
+// When a new field is added to Container it must be added here as well.
type EphemeralContainerCommonApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Image *string `json:"image,omitempty"`
- Command []string `json:"command,omitempty"`
- Args []string `json:"args,omitempty"`
- WorkingDir *string `json:"workingDir,omitempty"`
- Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"`
- EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"`
- Env []EnvVarApplyConfiguration `json:"env,omitempty"`
- Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
- ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"`
- RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
- RestartPolicyRules []ContainerRestartRuleApplyConfiguration `json:"restartPolicyRules,omitempty"`
- VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"`
- VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"`
- LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"`
- ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"`
- StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"`
- Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"`
- TerminationMessagePath *string `json:"terminationMessagePath,omitempty"`
- TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"`
- ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
- SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"`
- Stdin *bool `json:"stdin,omitempty"`
- StdinOnce *bool `json:"stdinOnce,omitempty"`
- TTY *bool `json:"tty,omitempty"`
+ // Name of the ephemeral container specified as a DNS_LABEL.
+ // This name must be unique among all containers, init containers and ephemeral containers.
+ Name *string `json:"name,omitempty"`
+ // Container image name.
+ // More info: https://kubernetes.io/docs/concepts/containers/images
+ Image *string `json:"image,omitempty"`
+ // Entrypoint array. Not executed within a shell.
+ // The image's ENTRYPOINT is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
+ // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ Command []string `json:"command,omitempty"`
+ // Arguments to the entrypoint.
+ // The image's CMD is used if this is not provided.
+ // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
+ // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ Args []string `json:"args,omitempty"`
+ // Container's working directory.
+ // If not specified, the container runtime's default will be used, which
+ // might be configured in the container image.
+ // Cannot be updated.
+ WorkingDir *string `json:"workingDir,omitempty"`
+ // Ports are not allowed for ephemeral containers.
+ Ports []ContainerPortApplyConfiguration `json:"ports,omitempty"`
+ // List of sources to populate environment variables in the container.
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // When a key exists in multiple
+ // sources, the value associated with the last source will take precedence.
+ // Values defined by an Env with a duplicate key will take precedence.
+ // Cannot be updated.
+ EnvFrom []EnvFromSourceApplyConfiguration `json:"envFrom,omitempty"`
+ // List of environment variables to set in the container.
+ // Cannot be updated.
+ Env []EnvVarApplyConfiguration `json:"env,omitempty"`
+ // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ // already allocated to the pod.
+ Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
+ // Resources resize policy for the container.
+ ResizePolicy []ContainerResizePolicyApplyConfiguration `json:"resizePolicy,omitempty"`
+ // Restart policy for the container to manage the restart behavior of each
+ // container within a pod.
+ // You cannot set this field on ephemeral containers.
+ RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty"`
+ // Represents a list of rules to be checked to determine if the
+ // container should be restarted on exit. You cannot set this field on
+ // ephemeral containers.
+ RestartPolicyRules []ContainerRestartRuleApplyConfiguration `json:"restartPolicyRules,omitempty"`
+ // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ // Cannot be updated.
+ VolumeMounts []VolumeMountApplyConfiguration `json:"volumeMounts,omitempty"`
+ // volumeDevices is the list of block devices to be used by the container.
+ VolumeDevices []VolumeDeviceApplyConfiguration `json:"volumeDevices,omitempty"`
+ // Probes are not allowed for ephemeral containers.
+ LivenessProbe *ProbeApplyConfiguration `json:"livenessProbe,omitempty"`
+ // Probes are not allowed for ephemeral containers.
+ ReadinessProbe *ProbeApplyConfiguration `json:"readinessProbe,omitempty"`
+ // Probes are not allowed for ephemeral containers.
+ StartupProbe *ProbeApplyConfiguration `json:"startupProbe,omitempty"`
+ // Lifecycle is not allowed for ephemeral containers.
+ Lifecycle *LifecycleApplyConfiguration `json:"lifecycle,omitempty"`
+ // Optional: Path at which the file to which the container's termination message
+ // will be written is mounted into the container's filesystem.
+ // Message written is intended to be brief final status, such as an assertion failure message.
+ // Will be truncated by the node if greater than 4096 bytes. The total message length across
+ // all containers will be limited to 12kb.
+ // Defaults to /dev/termination-log.
+ // Cannot be updated.
+ TerminationMessagePath *string `json:"terminationMessagePath,omitempty"`
+ // Indicate how the termination message should be populated. File will use the contents of
+ // terminationMessagePath to populate the container status message on both success and failure.
+ // FallbackToLogsOnError will use the last chunk of container log output if the termination
+ // message file is empty and the container exited with an error.
+ // The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ // Defaults to File.
+ // Cannot be updated.
+ TerminationMessagePolicy *corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty"`
+ // Image pull policy.
+ // One of Always, Never, IfNotPresent.
+ // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
+ // Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ SecurityContext *SecurityContextApplyConfiguration `json:"securityContext,omitempty"`
+ // Whether this container should allocate a buffer for stdin in the container runtime. If this
+ // is not set, reads from stdin in the container will always result in EOF.
+ // Default is false.
+ Stdin *bool `json:"stdin,omitempty"`
+ // Whether the container runtime should close the stdin channel after it has been opened by
+ // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ // at which time stdin is closed and remains closed until the container is restarted. If this
+ // flag is false, a container processes that reads from stdin will never receive an EOF.
+ // Default is false
+ StdinOnce *bool `json:"stdinOnce,omitempty"`
+ // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ // Default is false.
+ TTY *bool `json:"tty,omitempty"`
}
// EphemeralContainerCommonApplyConfiguration constructs a declarative configuration of the EphemeralContainerCommon type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go
index d2c8c672..030107b4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/ephemeralvolumesource.go
@@ -20,7 +20,30 @@ package v1
// EphemeralVolumeSourceApplyConfiguration represents a declarative configuration of the EphemeralVolumeSource type for use
// with apply.
+//
+// Represents an ephemeral volume that is handled by a normal storage driver.
type EphemeralVolumeSourceApplyConfiguration struct {
+ // Will be used to create a stand-alone PVC to provision the volume.
+ // The pod in which this EphemeralVolumeSource is embedded will be the
+ // owner of the PVC, i.e. the PVC will be deleted together with the
+ // pod. The name of the PVC will be `-` where
+ // `` is the name from the `PodSpec.Volumes` array
+ // entry. Pod validation will reject the pod if the concatenated name
+ // is not valid for a PVC (for example, too long).
+ //
+ // An existing PVC with that name that is not owned by the pod
+ // will *not* be used for the pod to avoid using an unrelated
+ // volume by mistake. Starting the pod is then blocked until
+ // the unrelated PVC is removed. If such a pre-created PVC is
+ // meant to be used by the pod, the PVC has to updated with an
+ // owner reference to the pod once the pod exists. Normally
+ // this should not be necessary, but it may be useful when
+ // manually reconstructing a broken cluster.
+ //
+ // This field is read-only and no changes will be made by Kubernetes
+ // to the PVC after it has been created.
+ //
+ // Required, must not be nil.
VolumeClaimTemplate *PersistentVolumeClaimTemplateApplyConfiguration `json:"volumeClaimTemplate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
index a4f19050..676a719b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
@@ -29,23 +29,49 @@ import (
// EventApplyConfiguration represents a declarative configuration of the Event type for use
// with apply.
+//
+// Event is a report of an event somewhere in the cluster. Events
+// have a limited retention time and triggers and messages may evolve
+// with time. Event consumers should not rely on the timing of an event
+// with a given Reason reflecting a consistent underlying trigger, or the
+// continued existence of events with that Reason. Events should be
+// treated as informative, best-effort, supplemental data.
type EventApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
- Source *EventSourceApplyConfiguration `json:"source,omitempty"`
- FirstTimestamp *apismetav1.Time `json:"firstTimestamp,omitempty"`
- LastTimestamp *apismetav1.Time `json:"lastTimestamp,omitempty"`
- Count *int32 `json:"count,omitempty"`
- Type *string `json:"type,omitempty"`
- EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"`
- Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
- Action *string `json:"action,omitempty"`
- Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"`
- ReportingController *string `json:"reportingComponent,omitempty"`
- ReportingInstance *string `json:"reportingInstance,omitempty"`
+ // The object that this event is about.
+ InvolvedObject *ObjectReferenceApplyConfiguration `json:"involvedObject,omitempty"`
+ // This should be a short, machine understandable string that gives the reason
+ // for the transition into the object's current status.
+ // TODO: provide exact specification for format.
+ Reason *string `json:"reason,omitempty"`
+ // A human-readable description of the status of this operation.
+ // TODO: decide on maximum length.
+ Message *string `json:"message,omitempty"`
+ // The component reporting this event. Should be a short machine understandable string.
+ Source *EventSourceApplyConfiguration `json:"source,omitempty"`
+ // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+ FirstTimestamp *apismetav1.Time `json:"firstTimestamp,omitempty"`
+ // The time at which the most recent occurrence of this event was recorded.
+ LastTimestamp *apismetav1.Time `json:"lastTimestamp,omitempty"`
+ // The number of times this event has occurred.
+ Count *int32 `json:"count,omitempty"`
+ // Type of this event (Normal, Warning), new types could be added in the future
+ Type *string `json:"type,omitempty"`
+ // Time when this Event was first observed.
+ EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"`
+ // Data about the Event series this event represents or nil if it's a singleton Event.
+ Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
+ // What action was taken/failed regarding to the Regarding object.
+ Action *string `json:"action,omitempty"`
+ // Optional secondary object for more complex actions.
+ Related *ObjectReferenceApplyConfiguration `json:"related,omitempty"`
+ // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+ ReportingController *string `json:"reportingComponent,omitempty"`
+ // ID of the controller instance, e.g. `kubelet-xyzf`.
+ ReportingInstance *string `json:"reportingInstance,omitempty"`
}
// Event constructs a declarative configuration of the Event type for use with
@@ -59,29 +85,14 @@ func Event(name, namespace string) *EventApplyConfiguration {
return b
}
-// ExtractEvent extracts the applied configuration owned by fieldManager from
-// event. If no managedFields are found in event for fieldManager, a
-// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEventFrom extracts the applied configuration owned by fieldManager from
+// event for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
-// ExtractEvent provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEventFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEvent(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
- return extractEvent(event, fieldManager, "")
-}
-
-// ExtractEventStatus is the same as ExtractEvent except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEventStatus(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
- return extractEvent(event, fieldManager, "status")
-}
-
-func extractEvent(event *corev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
+func ExtractEventFrom(event *corev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b, subresource)
if err != nil {
@@ -94,6 +105,21 @@ func extractEvent(event *corev1.Event, fieldManager string, subresource string)
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractEvent extracts the applied configuration owned by fieldManager from
+// event. If no managedFields are found in event for fieldManager, a
+// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
+// ExtractEvent provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEvent(event *corev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return ExtractEventFrom(event, fieldManager, "")
+}
+
func (b EventApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
index c90954bc..fdaa06b6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventseries.go
@@ -24,8 +24,13 @@ import (
// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
// with apply.
+//
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
type EventSeriesApplyConfiguration struct {
- Count *int32 `json:"count,omitempty"`
+ // Number of occurrences in this series up to the last heartbeat time
+ Count *int32 `json:"count,omitempty"`
+ // Time of the last occurrence observed
LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go
index 97edb049..10ca1db8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/eventsource.go
@@ -20,9 +20,13 @@ package v1
// EventSourceApplyConfiguration represents a declarative configuration of the EventSource type for use
// with apply.
+//
+// EventSource contains information for an event.
type EventSourceApplyConfiguration struct {
+ // Component from which the event is generated.
Component *string `json:"component,omitempty"`
- Host *string `json:"host,omitempty"`
+ // Node name on which the event is generated.
+ Host *string `json:"host,omitempty"`
}
// EventSourceApplyConfiguration constructs a declarative configuration of the EventSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go
index b7208a91..de3e8590 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/execaction.go
@@ -20,7 +20,14 @@ package v1
// ExecActionApplyConfiguration represents a declarative configuration of the ExecAction type for use
// with apply.
+//
+// ExecAction describes a "run in container" action.
type ExecActionApplyConfiguration struct {
+ // Command is the command line to execute inside the container, the working directory for the
+ // command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ // a shell, you need to explicitly call out to that shell.
+ // Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
Command []string `json:"command,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go
index 000ff2cc..f77f82f9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/fcvolumesource.go
@@ -20,12 +20,26 @@ package v1
// FCVolumeSourceApplyConfiguration represents a declarative configuration of the FCVolumeSource type for use
// with apply.
+//
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSourceApplyConfiguration struct {
+ // targetWWNs is Optional: FC target worldwide names (WWNs)
TargetWWNs []string `json:"targetWWNs,omitempty"`
- Lun *int32 `json:"lun,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- WWIDs []string `json:"wwids,omitempty"`
+ // lun is Optional: FC target lun number
+ Lun *int32 `json:"lun,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // wwids Optional: FC volume world wide identifiers (wwids)
+ // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ WWIDs []string `json:"wwids,omitempty"`
}
// FCVolumeSourceApplyConfiguration constructs a declarative configuration of the FCVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/filekeyselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/filekeyselector.go
index d543e120..7c1f2128 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/filekeyselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/filekeyselector.go
@@ -20,11 +20,26 @@ package v1
// FileKeySelectorApplyConfiguration represents a declarative configuration of the FileKeySelector type for use
// with apply.
+//
+// FileKeySelector selects a key of the env file.
type FileKeySelectorApplyConfiguration struct {
+ // The name of the volume mount containing the env file.
VolumeName *string `json:"volumeName,omitempty"`
- Path *string `json:"path,omitempty"`
- Key *string `json:"key,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // The path within the volume from which to select the file.
+ // Must be relative and may not contain the '..' path or start with '..'.
+ Path *string `json:"path,omitempty"`
+ // The key within the env file. An invalid key will prevent the pod from starting.
+ // The keys defined within a source may consist of any printable ASCII characters except '='.
+ // During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters.
+ Key *string `json:"key,omitempty"`
+ // Specify whether the file or its key must be defined. If the file or key
+ // does not exist, then the env var is not published.
+ // If optional is set to true and the specified key does not exist,
+ // the environment variable will not be set in the Pod's containers.
+ //
+ // If optional is set to false and the specified key does not exist,
+ // an error will be returned during Pod creation.
+ Optional *bool `json:"optional,omitempty"`
}
// FileKeySelectorApplyConfiguration constructs a declarative configuration of the FileKeySelector type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go
index 355c2c82..a1a57bb7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexpersistentvolumesource.go
@@ -20,12 +20,27 @@ package v1
// FlexPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the FlexPersistentVolumeSource type for use
// with apply.
+//
+// FlexPersistentVolumeSource represents a generic persistent volume resource that is
+// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSourceApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- FSType *string `json:"fsType,omitempty"`
+ // driver is the name of the driver to use for this volume.
+ Driver *string `json:"driver,omitempty"`
+ // fsType is the Filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ FSType *string `json:"fsType,omitempty"`
+ // secretRef is Optional: SecretRef is reference to the secret object containing
+ // sensitive information to pass to the plugin scripts. This may be
+ // empty if no secret object is specified. If the secret object
+ // contains more than one secret, all secrets are passed to the plugin
+ // scripts.
SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- Options map[string]string `json:"options,omitempty"`
+ // readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // options is Optional: this field holds extra command options if any.
+ Options map[string]string `json:"options,omitempty"`
}
// FlexPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go
index 08ae9e1b..e9fc2950 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flexvolumesource.go
@@ -20,12 +20,27 @@ package v1
// FlexVolumeSourceApplyConfiguration represents a declarative configuration of the FlexVolumeSource type for use
// with apply.
+//
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using an exec based plugin.
type FlexVolumeSourceApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- FSType *string `json:"fsType,omitempty"`
+ // driver is the name of the driver to use for this volume.
+ Driver *string `json:"driver,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ FSType *string `json:"fsType,omitempty"`
+ // secretRef is Optional: secretRef is reference to the secret object containing
+ // sensitive information to pass to the plugin scripts. This may be
+ // empty if no secret object is specified. If the secret object
+ // contains more than one secret, all secrets are passed to the plugin
+ // scripts.
SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- Options map[string]string `json:"options,omitempty"`
+ // readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // options is Optional: this field holds extra command options if any.
+ Options map[string]string `json:"options,omitempty"`
}
// FlexVolumeSourceApplyConfiguration constructs a declarative configuration of the FlexVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go
index e4ecbba0..452105d9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/flockervolumesource.go
@@ -20,8 +20,15 @@ package v1
// FlockerVolumeSourceApplyConfiguration represents a declarative configuration of the FlockerVolumeSource type for use
// with apply.
+//
+// Represents a Flocker volume mounted by the Flocker agent.
+// One and only one of datasetName and datasetUUID should be set.
+// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSourceApplyConfiguration struct {
+ // datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ // should be considered as deprecated
DatasetName *string `json:"datasetName,omitempty"`
+ // datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
DatasetUUID *string `json:"datasetUUID,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go
index 56c4d03f..5283c30a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gcepersistentdiskvolumesource.go
@@ -20,11 +20,33 @@ package v1
// GCEPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the GCEPersistentDiskVolumeSource type for use
// with apply.
+//
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSourceApplyConfiguration struct {
- PDName *string `json:"pdName,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- Partition *int32 `json:"partition,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ PDName *string `json:"pdName,omitempty"`
+ // fsType is filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // partition is the partition in the volume that you want to mount.
+ // If omitted, the default is to mount by volume name.
+ // Examples: For volume /dev/sda1, you specify the partition as "1".
+ // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ Partition *int32 `json:"partition,omitempty"`
+ // readOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// GCEPersistentDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the GCEPersistentDiskVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go
index 4ed92317..37977d0f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/gitrepovolumesource.go
@@ -20,10 +20,24 @@ package v1
// GitRepoVolumeSourceApplyConfiguration represents a declarative configuration of the GitRepoVolumeSource type for use
// with apply.
+//
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
type GitRepoVolumeSourceApplyConfiguration struct {
+ // repository is the URL
Repository *string `json:"repository,omitempty"`
- Revision *string `json:"revision,omitempty"`
- Directory *string `json:"directory,omitempty"`
+ // revision is the commit hash for the specified revision.
+ Revision *string `json:"revision,omitempty"`
+ // directory is the target directory name.
+ // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ // git repository. Otherwise, if specified, the volume will contain the git repository in
+ // the subdirectory with the given name.
+ Directory *string `json:"directory,omitempty"`
}
// GitRepoVolumeSourceApplyConfiguration constructs a declarative configuration of the GitRepoVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go
index c9a23ca5..bd53ab40 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfspersistentvolumesource.go
@@ -20,10 +20,23 @@ package v1
// GlusterfsPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsPersistentVolumeSource type for use
// with apply.
+//
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsPersistentVolumeSourceApplyConfiguration struct {
- EndpointsName *string `json:"endpoints,omitempty"`
- Path *string `json:"path,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // endpoints is the endpoint name that details Glusterfs topology.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ EndpointsName *string `json:"endpoints,omitempty"`
+ // path is the Glusterfs volume path.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ Path *string `json:"path,omitempty"`
+ // readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // endpointsNamespace is the namespace that contains Glusterfs endpoint.
+ // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsNamespace *string `json:"endpointsNamespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go
index 8c27f8c7..f558c152 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/glusterfsvolumesource.go
@@ -20,10 +20,19 @@ package v1
// GlusterfsVolumeSourceApplyConfiguration represents a declarative configuration of the GlusterfsVolumeSource type for use
// with apply.
+//
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSourceApplyConfiguration struct {
+ // endpoints is the endpoint name that details Glusterfs topology.
EndpointsName *string `json:"endpoints,omitempty"`
- Path *string `json:"path,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // path is the Glusterfs volume path.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ Path *string `json:"path,omitempty"`
+ // readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// GlusterfsVolumeSourceApplyConfiguration constructs a declarative configuration of the GlusterfsVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go
index 0f3a8867..40f5f9ec 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/grpcaction.go
@@ -20,8 +20,15 @@ package v1
// GRPCActionApplyConfiguration represents a declarative configuration of the GRPCAction type for use
// with apply.
+//
+// GRPCAction specifies an action involving a GRPC service.
type GRPCActionApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
+ // Port number of the gRPC service. Number must be in the range 1 to 65535.
+ Port *int32 `json:"port,omitempty"`
+ // Service is the name of the service to place in the gRPC HealthCheckRequest
+ // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+ //
+ // If this is not specified, the default behavior is defined by gRPC.
Service *string `json:"service,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go
index ec9ea174..687c1eb9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostalias.go
@@ -20,8 +20,13 @@ package v1
// HostAliasApplyConfiguration represents a declarative configuration of the HostAlias type for use
// with apply.
+//
+// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+// pod's hosts file.
type HostAliasApplyConfiguration struct {
- IP *string `json:"ip,omitempty"`
+ // IP address of the host file entry.
+ IP *string `json:"ip,omitempty"`
+ // Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
index 439b5ce2..eeb30f9e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostip.go
@@ -20,7 +20,10 @@ package v1
// HostIPApplyConfiguration represents a declarative configuration of the HostIP type for use
// with apply.
+//
+// HostIP represents a single IP address allocated to the host.
type HostIPApplyConfiguration struct {
+ // IP is the IP address assigned to the host
IP *string `json:"ip,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
index 6a41d67c..a69e71e1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/hostpathvolumesource.go
@@ -24,8 +24,17 @@ import (
// HostPathVolumeSourceApplyConfiguration represents a declarative configuration of the HostPathVolumeSource type for use
// with apply.
+//
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSourceApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
+ // path of the directory on the host.
+ // If the path is a symlink, it will follow the link to the real path.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ Path *string `json:"path,omitempty"`
+ // type for HostPath Volume
+ // Defaults to ""
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Type *corev1.HostPathType `json:"type,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
index ca61c5ae..3f892c75 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpgetaction.go
@@ -25,11 +25,22 @@ import (
// HTTPGetActionApplyConfiguration represents a declarative configuration of the HTTPGetAction type for use
// with apply.
+//
+// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetActionApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
- Port *intstr.IntOrString `json:"port,omitempty"`
- Host *string `json:"host,omitempty"`
- Scheme *corev1.URIScheme `json:"scheme,omitempty"`
+ // Path to access on the HTTP server.
+ Path *string `json:"path,omitempty"`
+ // Name or number of the port to access on the container.
+ // Number must be in the range 1 to 65535.
+ // Name must be an IANA_SVC_NAME.
+ Port *intstr.IntOrString `json:"port,omitempty"`
+ // Host name to connect to, defaults to the pod IP. You probably want to set
+ // "Host" in httpHeaders instead.
+ Host *string `json:"host,omitempty"`
+ // Scheme to use for connecting to the host.
+ // Defaults to HTTP.
+ Scheme *corev1.URIScheme `json:"scheme,omitempty"`
+ // Custom headers to set in the request. HTTP allows repeated headers.
HTTPHeaders []HTTPHeaderApplyConfiguration `json:"httpHeaders,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go
index 25263716..c62c85ba 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/httpheader.go
@@ -20,8 +20,13 @@ package v1
// HTTPHeaderApplyConfiguration represents a declarative configuration of the HTTPHeader type for use
// with apply.
+//
+// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeaderApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // The header field name.
+ // This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ Name *string `json:"name,omitempty"`
+ // The header field value
Value *string `json:"value,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
index 9a146e68..90109d49 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go
@@ -24,8 +24,21 @@ import (
// ImageVolumeSourceApplyConfiguration represents a declarative configuration of the ImageVolumeSource type for use
// with apply.
+//
+// ImageVolumeSource represents a image volume resource.
type ImageVolumeSourceApplyConfiguration struct {
- Reference *string `json:"reference,omitempty"`
+ // Required: Image or artifact reference to be used.
+ // Behaves in the same way as pod.spec.containers[*].image.
+ // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+ // More info: https://kubernetes.io/docs/concepts/containers/images
+ // This field is optional to allow higher level config management to default or override
+ // container images in workload controllers like Deployments and StatefulSets.
+ Reference *string `json:"reference,omitempty"`
+ // Policy for pulling OCI objects. Possible values are:
+ // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
PullPolicy *corev1.PullPolicy `json:"pullPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go
index 42f420c5..02900a4d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsipersistentvolumesource.go
@@ -20,18 +20,43 @@ package v1
// ISCSIPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIPersistentVolumeSource type for use
// with apply.
+//
+// ISCSIPersistentVolumeSource represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSourceApplyConfiguration struct {
- TargetPortal *string `json:"targetPortal,omitempty"`
- IQN *string `json:"iqn,omitempty"`
- Lun *int32 `json:"lun,omitempty"`
- ISCSIInterface *string `json:"iscsiInterface,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- Portals []string `json:"portals,omitempty"`
- DiscoveryCHAPAuth *bool `json:"chapAuthDiscovery,omitempty"`
- SessionCHAPAuth *bool `json:"chapAuthSession,omitempty"`
- SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
- InitiatorName *string `json:"initiatorName,omitempty"`
+ // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ // is other than default (typically TCP ports 860 and 3260).
+ TargetPortal *string `json:"targetPortal,omitempty"`
+ // iqn is Target iSCSI Qualified Name.
+ IQN *string `json:"iqn,omitempty"`
+ // lun is iSCSI Target Lun number.
+ Lun *int32 `json:"lun,omitempty"`
+ // iscsiInterface is the interface Name that uses an iSCSI transport.
+ // Defaults to 'default' (tcp).
+ ISCSIInterface *string `json:"iscsiInterface,omitempty"`
+ // fsType is the filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
+ // is other than default (typically TCP ports 860 and 3260).
+ Portals []string `json:"portals,omitempty"`
+ // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+ DiscoveryCHAPAuth *bool `json:"chapAuthDiscovery,omitempty"`
+ // chapAuthSession defines whether support iSCSI Session CHAP authentication
+ SessionCHAPAuth *bool `json:"chapAuthSession,omitempty"`
+ // secretRef is the CHAP Secret for iSCSI target and initiator authentication
+ SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // initiatorName is the custom iSCSI Initiator Name.
+ // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ // : will be created for the connection.
+ InitiatorName *string `json:"initiatorName,omitempty"`
}
// ISCSIPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go
index 61055434..12fe844f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/iscsivolumesource.go
@@ -20,18 +20,43 @@ package v1
// ISCSIVolumeSourceApplyConfiguration represents a declarative configuration of the ISCSIVolumeSource type for use
// with apply.
+//
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSourceApplyConfiguration struct {
- TargetPortal *string `json:"targetPortal,omitempty"`
- IQN *string `json:"iqn,omitempty"`
- Lun *int32 `json:"lun,omitempty"`
- ISCSIInterface *string `json:"iscsiInterface,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- Portals []string `json:"portals,omitempty"`
- DiscoveryCHAPAuth *bool `json:"chapAuthDiscovery,omitempty"`
- SessionCHAPAuth *bool `json:"chapAuthSession,omitempty"`
- SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
- InitiatorName *string `json:"initiatorName,omitempty"`
+ // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ // is other than default (typically TCP ports 860 and 3260).
+ TargetPortal *string `json:"targetPortal,omitempty"`
+ // iqn is the target iSCSI Qualified Name.
+ IQN *string `json:"iqn,omitempty"`
+ // lun represents iSCSI Target Lun number.
+ Lun *int32 `json:"lun,omitempty"`
+ // iscsiInterface is the interface Name that uses an iSCSI transport.
+ // Defaults to 'default' (tcp).
+ ISCSIInterface *string `json:"iscsiInterface,omitempty"`
+ // fsType is the filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ // is other than default (typically TCP ports 860 and 3260).
+ Portals []string `json:"portals,omitempty"`
+ // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+ DiscoveryCHAPAuth *bool `json:"chapAuthDiscovery,omitempty"`
+ // chapAuthSession defines whether support iSCSI Session CHAP authentication
+ SessionCHAPAuth *bool `json:"chapAuthSession,omitempty"`
+ // secretRef is the CHAP Secret for iSCSI target and initiator authentication
+ SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // initiatorName is the custom iSCSI Initiator Name.
+ // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ // : will be created for the connection.
+ InitiatorName *string `json:"initiatorName,omitempty"`
}
// ISCSIVolumeSourceApplyConfiguration constructs a declarative configuration of the ISCSIVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go
index c961b079..75ce1130 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/keytopath.go
@@ -20,10 +20,23 @@ package v1
// KeyToPathApplyConfiguration represents a declarative configuration of the KeyToPath type for use
// with apply.
+//
+// Maps a string key to a path within a volume.
type KeyToPathApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // key is the key to project.
+ Key *string `json:"key,omitempty"`
+ // path is the relative path of the file to map the key to.
+ // May not be an absolute path.
+ // May not contain the path element '..'.
+ // May not start with the string '..'.
Path *string `json:"path,omitempty"`
- Mode *int32 `json:"mode,omitempty"`
+ // mode is Optional: mode bits used to set permissions on this file.
+ // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ // If not specified, the volume defaultMode will be used.
+ // This might be in conflict with other options that affect the file
+ // mode, like fsGroup, and the result can be other mode bits set.
+ Mode *int32 `json:"mode,omitempty"`
}
// KeyToPathApplyConfiguration constructs a declarative configuration of the KeyToPath type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go
index f8c18a75..8880934a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecycle.go
@@ -24,10 +24,30 @@ import (
// LifecycleApplyConfiguration represents a declarative configuration of the Lifecycle type for use
// with apply.
+//
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
type LifecycleApplyConfiguration struct {
- PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"`
- PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"`
- StopSignal *corev1.Signal `json:"stopSignal,omitempty"`
+ // PostStart is called immediately after a container is created. If the handler fails,
+ // the container is terminated and restarted according to its restart policy.
+ // Other management of the container blocks until the hook completes.
+ // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ PostStart *LifecycleHandlerApplyConfiguration `json:"postStart,omitempty"`
+ // PreStop is called immediately before a container is terminated due to an
+ // API request or management event such as liveness/startup probe failure,
+ // preemption, resource contention, etc. The handler is not called if the
+ // container crashes or exits. The Pod's termination grace period countdown begins before the
+ // PreStop hook is executed. Regardless of the outcome of the handler, the
+ // container will eventually terminate within the Pod's termination grace
+ // period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ // or until the termination grace period is reached.
+ // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ PreStop *LifecycleHandlerApplyConfiguration `json:"preStop,omitempty"`
+ // StopSignal defines which signal will be sent to a container when it is being stopped.
+ // If not specified, the default is defined by the container runtime in use.
+ // StopSignal can only be set for Pods with a non-empty .spec.os.name
+ StopSignal *corev1.Signal `json:"stopSignal,omitempty"`
}
// LifecycleApplyConfiguration constructs a declarative configuration of the Lifecycle type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go
index b7c706d5..d01d3964 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/lifecyclehandler.go
@@ -20,11 +20,20 @@ package v1
// LifecycleHandlerApplyConfiguration represents a declarative configuration of the LifecycleHandler type for use
// with apply.
+//
+// LifecycleHandler defines a specific action that should be taken in a lifecycle
+// hook. One and only one of the fields, except TCPSocket must be specified.
type LifecycleHandlerApplyConfiguration struct {
- Exec *ExecActionApplyConfiguration `json:"exec,omitempty"`
- HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"`
+ // Exec specifies a command to execute in the container.
+ Exec *ExecActionApplyConfiguration `json:"exec,omitempty"`
+ // HTTPGet specifies an HTTP GET request to perform.
+ HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"`
+ // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ // for backward compatibility. There is no validation of this field and
+ // lifecycle hooks will fail at runtime when it is specified.
TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"`
- Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"`
+ // Sleep represents a duration that the container should sleep.
+ Sleep *SleepActionApplyConfiguration `json:"sleep,omitempty"`
}
// LifecycleHandlerApplyConfiguration constructs a declarative configuration of the LifecycleHandler type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
index 349a212d..84a9fca3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
@@ -29,10 +29,16 @@ import (
// LimitRangeApplyConfiguration represents a declarative configuration of the LimitRange type for use
// with apply.
+//
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRangeApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec defines the limits enforced.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *LimitRangeSpecApplyConfiguration `json:"spec,omitempty"`
}
// LimitRange constructs a declarative configuration of the LimitRange type for use with
@@ -46,29 +52,14 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration {
return b
}
-// ExtractLimitRange extracts the applied configuration owned by fieldManager from
-// limitRange. If no managedFields are found in limitRange for fieldManager, a
-// LimitRangeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractLimitRangeFrom extracts the applied configuration owned by fieldManager from
+// limitRange for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// limitRange must be a unmodified LimitRange API object that was retrieved from the Kubernetes API.
-// ExtractLimitRange provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractLimitRangeFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractLimitRange(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
- return extractLimitRange(limitRange, fieldManager, "")
-}
-
-// ExtractLimitRangeStatus is the same as ExtractLimitRange except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractLimitRangeStatus(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
- return extractLimitRange(limitRange, fieldManager, "status")
-}
-
-func extractLimitRange(limitRange *corev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) {
+func ExtractLimitRangeFrom(limitRange *corev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) {
b := &LimitRangeApplyConfiguration{}
err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractLimitRange(limitRange *corev1.LimitRange, fieldManager string, subre
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractLimitRange extracts the applied configuration owned by fieldManager from
+// limitRange. If no managedFields are found in limitRange for fieldManager, a
+// LimitRangeApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// limitRange must be a unmodified LimitRange API object that was retrieved from the Kubernetes API.
+// ExtractLimitRange provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractLimitRange(limitRange *corev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
+ return ExtractLimitRangeFrom(limitRange, fieldManager, "")
+}
+
func (b LimitRangeApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
index 5ad8ac0e..af3d912d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangeitem.go
@@ -24,12 +24,20 @@ import (
// LimitRangeItemApplyConfiguration represents a declarative configuration of the LimitRangeItem type for use
// with apply.
+//
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItemApplyConfiguration struct {
- Type *corev1.LimitType `json:"type,omitempty"`
- Max *corev1.ResourceList `json:"max,omitempty"`
- Min *corev1.ResourceList `json:"min,omitempty"`
- Default *corev1.ResourceList `json:"default,omitempty"`
- DefaultRequest *corev1.ResourceList `json:"defaultRequest,omitempty"`
+ // Type of resource that this limit applies to.
+ Type *corev1.LimitType `json:"type,omitempty"`
+ // Max usage constraints on this kind by resource name.
+ Max *corev1.ResourceList `json:"max,omitempty"`
+ // Min usage constraints on this kind by resource name.
+ Min *corev1.ResourceList `json:"min,omitempty"`
+ // Default resource requirement limit value by resource name if resource limit is omitted.
+ Default *corev1.ResourceList `json:"default,omitempty"`
+ // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+ DefaultRequest *corev1.ResourceList `json:"defaultRequest,omitempty"`
+ // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
MaxLimitRequestRatio *corev1.ResourceList `json:"maxLimitRequestRatio,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go
index 8d69c1c0..198e1178 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrangespec.go
@@ -20,7 +20,10 @@ package v1
// LimitRangeSpecApplyConfiguration represents a declarative configuration of the LimitRangeSpec type for use
// with apply.
+//
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpecApplyConfiguration struct {
+ // Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItemApplyConfiguration `json:"limits,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go
index fbab4815..1dc8fc70 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go
@@ -20,9 +20,14 @@ package v1
// LinuxContainerUserApplyConfiguration represents a declarative configuration of the LinuxContainerUser type for use
// with apply.
+//
+// LinuxContainerUser represents user identity information in Linux containers
type LinuxContainerUserApplyConfiguration struct {
- UID *int64 `json:"uid,omitempty"`
- GID *int64 `json:"gid,omitempty"`
+ // UID is the primary uid initially attached to the first process in the container
+ UID *int64 `json:"uid,omitempty"`
+ // GID is the primary gid initially attached to the first process in the container
+ GID *int64 `json:"gid,omitempty"`
+ // SupplementalGroups are the supplemental groups initially attached to the first process in the container
SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
index ae5c410a..91c3225e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalanceringress.go
@@ -24,11 +24,26 @@ import (
// LoadBalancerIngressApplyConfiguration represents a declarative configuration of the LoadBalancerIngress type for use
// with apply.
+//
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngressApplyConfiguration struct {
- IP *string `json:"ip,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- IPMode *corev1.LoadBalancerIPMode `json:"ipMode,omitempty"`
- Ports []PortStatusApplyConfiguration `json:"ports,omitempty"`
+ // IP is set for load-balancer ingress points that are IP based
+ // (typically GCE or OpenStack load-balancers)
+ IP *string `json:"ip,omitempty"`
+ // Hostname is set for load-balancer ingress points that are DNS based
+ // (typically AWS load-balancers)
+ Hostname *string `json:"hostname,omitempty"`
+ // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
+ // Setting this to "VIP" indicates that traffic is delivered to the node with
+ // the destination set to the load-balancer's IP and port.
+ // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
+ // the destination set to the node's IP and node port or the pod's IP and port.
+ // Service implementations may use this information to adjust traffic routing.
+ IPMode *corev1.LoadBalancerIPMode `json:"ipMode,omitempty"`
+ // Ports is a list of records of service ports
+ // If used, every port defined in the service should have an entry in it
+ Ports []PortStatusApplyConfiguration `json:"ports,omitempty"`
}
// LoadBalancerIngressApplyConfiguration constructs a declarative configuration of the LoadBalancerIngress type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go
index bb3d616c..dd225514 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/loadbalancerstatus.go
@@ -20,7 +20,11 @@ package v1
// LoadBalancerStatusApplyConfiguration represents a declarative configuration of the LoadBalancerStatus type for use
// with apply.
+//
+// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatusApplyConfiguration struct {
+ // Ingress is a list containing ingress points for the load-balancer.
+ // Traffic intended for the service should be sent to these ingress points.
Ingress []LoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go
index c55d6803..2841adb6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localobjectreference.go
@@ -20,7 +20,27 @@ package v1
// LocalObjectReferenceApplyConfiguration represents a declarative configuration of the LocalObjectReference type for use
// with apply.
+//
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 3. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
type LocalObjectReferenceApplyConfiguration struct {
+ // Name of the referent.
+ // This field is effectively required, but due to backwards compatibility is
+ // allowed to be empty. Instances of this type with an empty value here are
+ // almost certainly wrong.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ // TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go
index db711d99..27827ed1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/localvolumesource.go
@@ -20,8 +20,16 @@ package v1
// LocalVolumeSourceApplyConfiguration represents a declarative configuration of the LocalVolumeSource type for use
// with apply.
+//
+// Local represents directly-attached storage with node affinity
type LocalVolumeSourceApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
+ // path of the full path to the volume on the node.
+ // It can be either a directory or block device (disk, partition, ...).
+ Path *string `json:"path,omitempty"`
+ // fsType is the filesystem type to mount.
+ // It applies only when the Path is a block device.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified.
FSType *string `json:"fsType,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
index 9a1a6af2..4fa86ce0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/modifyvolumestatus.go
@@ -24,9 +24,22 @@ import (
// ModifyVolumeStatusApplyConfiguration represents a declarative configuration of the ModifyVolumeStatus type for use
// with apply.
+//
+// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation
type ModifyVolumeStatusApplyConfiguration struct {
- TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"`
- Status *corev1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"`
+ // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled
+ TargetVolumeAttributesClassName *string `json:"targetVolumeAttributesClassName,omitempty"`
+ // status is the status of the ControllerModifyVolume operation. It can be in any of following states:
+ // - Pending
+ // Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
+ // the specified VolumeAttributesClass not existing.
+ // - InProgress
+ // InProgress indicates that the volume is being modified.
+ // - Infeasible
+ // Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
+ // resolve the error, a valid VolumeAttributesClass needs to be specified.
+ // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.
+ Status *corev1.PersistentVolumeClaimModifyVolumeStatus `json:"status,omitempty"`
}
// ModifyVolumeStatusApplyConfiguration constructs a declarative configuration of the ModifyVolumeStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
index 671a3cbc..a5bf0f94 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
@@ -29,11 +29,20 @@ import (
// NamespaceApplyConfiguration represents a declarative configuration of the Namespace type for use
// with apply.
+//
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
type NamespaceApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"`
- Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the behavior of the Namespace.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *NamespaceSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status describes the current status of a Namespace.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *NamespaceStatusApplyConfiguration `json:"status,omitempty"`
}
// Namespace constructs a declarative configuration of the Namespace type for use with
@@ -46,6 +55,26 @@ func Namespace(name string) *NamespaceApplyConfiguration {
return b
}
+// ExtractNamespaceFrom extracts the applied configuration owned by fieldManager from
+// namespace for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// namespace must be a unmodified Namespace API object that was retrieved from the Kubernetes API.
+// ExtractNamespaceFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractNamespaceFrom(namespace *corev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) {
+ b := &NamespaceApplyConfiguration{}
+ err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(namespace.Name)
+
+ b.WithKind("Namespace")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractNamespace extracts the applied configuration owned by fieldManager from
// namespace. If no managedFields are found in namespace for fieldManager, a
// NamespaceApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func Namespace(name string) *NamespaceApplyConfiguration {
// ExtractNamespace provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractNamespace(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
- return extractNamespace(namespace, fieldManager, "")
+ return ExtractNamespaceFrom(namespace, fieldManager, "")
}
-// ExtractNamespaceStatus is the same as ExtractNamespace except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractNamespaceStatus extracts the applied configuration owned by fieldManager from
+// namespace for the status subresource.
func ExtractNamespaceStatus(namespace *corev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
- return extractNamespace(namespace, fieldManager, "status")
+ return ExtractNamespaceFrom(namespace, fieldManager, "status")
}
-func extractNamespace(namespace *corev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) {
- b := &NamespaceApplyConfiguration{}
- err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(namespace.Name)
-
- b.WithKind("Namespace")
- b.WithAPIVersion("v1")
- return b, nil
-}
func (b NamespaceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
index 82b4cc1c..df2bfd2f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacecondition.go
@@ -25,12 +25,19 @@ import (
// NamespaceConditionApplyConfiguration represents a declarative configuration of the NamespaceCondition type for use
// with apply.
+//
+// NamespaceCondition contains details about state of namespace.
type NamespaceConditionApplyConfiguration struct {
- Type *corev1.NamespaceConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of namespace controller condition.
+ Type *corev1.NamespaceConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // Human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// NamespaceConditionApplyConfiguration constructs a declarative configuration of the NamespaceCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
index 1f8fcaf9..48813e2b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacespec.go
@@ -24,7 +24,11 @@ import (
// NamespaceSpecApplyConfiguration represents a declarative configuration of the NamespaceSpec type for use
// with apply.
+//
+// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpecApplyConfiguration struct {
+ // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+ // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
Finalizers []corev1.FinalizerName `json:"finalizers,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
index 1484be68..cde2cd1a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespacestatus.go
@@ -24,8 +24,13 @@ import (
// NamespaceStatusApplyConfiguration represents a declarative configuration of the NamespaceStatus type for use
// with apply.
+//
+// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatusApplyConfiguration struct {
- Phase *corev1.NamespacePhase `json:"phase,omitempty"`
+ // Phase is the current lifecycle phase of the namespace.
+ // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+ Phase *corev1.NamespacePhase `json:"phase,omitempty"`
+ // Represents the latest available observations of a namespace's current state.
Conditions []NamespaceConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go
index ed49a87a..a539c407 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nfsvolumesource.go
@@ -20,10 +20,20 @@ package v1
// NFSVolumeSourceApplyConfiguration represents a declarative configuration of the NFSVolumeSource type for use
// with apply.
+//
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSourceApplyConfiguration struct {
- Server *string `json:"server,omitempty"`
- Path *string `json:"path,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // server is the hostname or IP address of the NFS server.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ Server *string `json:"server,omitempty"`
+ // path that is exported by the NFS server.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ Path *string `json:"path,omitempty"`
+ // readOnly here will force the NFS export to be mounted with read-only permissions.
+ // Defaults to false.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// NFSVolumeSourceApplyConfiguration constructs a declarative configuration of the NFSVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
index 3682e62e..ff45b899 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
@@ -29,11 +29,22 @@ import (
// NodeApplyConfiguration represents a declarative configuration of the Node type for use
// with apply.
+//
+// Node is a worker node in Kubernetes.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
type NodeApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"`
- Status *NodeStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the behavior of a node.
+ // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *NodeSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the node.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *NodeStatusApplyConfiguration `json:"status,omitempty"`
}
// Node constructs a declarative configuration of the Node type for use with
@@ -46,6 +57,26 @@ func Node(name string) *NodeApplyConfiguration {
return b
}
+// ExtractNodeFrom extracts the applied configuration owned by fieldManager from
+// node for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// node must be a unmodified Node API object that was retrieved from the Kubernetes API.
+// ExtractNodeFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractNodeFrom(node *corev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) {
+ b := &NodeApplyConfiguration{}
+ err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(node.Name)
+
+ b.WithKind("Node")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractNode extracts the applied configuration owned by fieldManager from
// node. If no managedFields are found in node for fieldManager, a
// NodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +87,16 @@ func Node(name string) *NodeApplyConfiguration {
// ExtractNode provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractNode(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
- return extractNode(node, fieldManager, "")
+ return ExtractNodeFrom(node, fieldManager, "")
}
-// ExtractNodeStatus is the same as ExtractNode except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractNodeStatus extracts the applied configuration owned by fieldManager from
+// node for the status subresource.
func ExtractNodeStatus(node *corev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
- return extractNode(node, fieldManager, "status")
+ return ExtractNodeFrom(node, fieldManager, "status")
}
-func extractNode(node *corev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) {
- b := &NodeApplyConfiguration{}
- err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(node.Name)
-
- b.WithKind("Node")
- b.WithAPIVersion("v1")
- return b, nil
-}
func (b NodeApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
index 779fe0e2..0eb14a59 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaddress.go
@@ -24,9 +24,13 @@ import (
// NodeAddressApplyConfiguration represents a declarative configuration of the NodeAddress type for use
// with apply.
+//
+// NodeAddress contains information for the node's address.
type NodeAddressApplyConfiguration struct {
- Type *corev1.NodeAddressType `json:"type,omitempty"`
- Address *string `json:"address,omitempty"`
+ // Node address type, one of Hostname, ExternalIP or InternalIP.
+ Type *corev1.NodeAddressType `json:"type,omitempty"`
+ // The node address.
+ Address *string `json:"address,omitempty"`
}
// NodeAddressApplyConfiguration constructs a declarative configuration of the NodeAddress type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go
index 5d11d746..099cd0aa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeaffinity.go
@@ -20,8 +20,24 @@ package v1
// NodeAffinityApplyConfiguration represents a declarative configuration of the NodeAffinity type for use
// with apply.
+//
+// Node affinity is a group of node affinity scheduling rules.
type NodeAffinityApplyConfiguration struct {
- RequiredDuringSchedulingIgnoredDuringExecution *NodeSelectorApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to an update), the system
+ // may or may not try to eventually evict the pod from its node.
+ RequiredDuringSchedulingIgnoredDuringExecution *NodeSelectorApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node matches the corresponding matchExpressions; the
+ // node(s) with the highest sum are the most preferred.
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
index e3a2d3bb..0362ec20 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodecondition.go
@@ -25,13 +25,21 @@ import (
// NodeConditionApplyConfiguration represents a declarative configuration of the NodeCondition type for use
// with apply.
+//
+// NodeCondition contains condition information for a node.
type NodeConditionApplyConfiguration struct {
- Type *corev1.NodeConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of node condition.
+ Type *corev1.NodeConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Last time we got an update on a given condition.
+ LastHeartbeatTime *metav1.Time `json:"lastHeartbeatTime,omitempty"`
+ // Last time the condition transit from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // (brief) reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // Human readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// NodeConditionApplyConfiguration constructs a declarative configuration of the NodeCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go
index 00a671fc..ae63ae36 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigsource.go
@@ -20,7 +20,11 @@ package v1
// NodeConfigSourceApplyConfiguration represents a declarative configuration of the NodeConfigSource type for use
// with apply.
+//
+// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+// This API is deprecated since 1.22
type NodeConfigSourceApplyConfiguration struct {
+ // ConfigMap is a reference to a Node's ConfigMap
ConfigMap *ConfigMapNodeConfigSourceApplyConfiguration `json:"configMap,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go
index d5ccc45c..88c0b3c3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeconfigstatus.go
@@ -20,11 +20,48 @@ package v1
// NodeConfigStatusApplyConfiguration represents a declarative configuration of the NodeConfigStatus type for use
// with apply.
+//
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatusApplyConfiguration struct {
- Assigned *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"`
- Active *NodeConfigSourceApplyConfiguration `json:"active,omitempty"`
+ // Assigned reports the checkpointed config the node will try to use.
+ // When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+ // config payload to local disk, along with a record indicating intended
+ // config. The node refers to this record to choose its config checkpoint, and
+ // reports this record in Assigned. Assigned only updates in the status after
+ // the record has been checkpointed to disk. When the Kubelet is restarted,
+ // it tries to make the Assigned config the Active config by loading and
+ // validating the checkpointed payload identified by Assigned.
+ Assigned *NodeConfigSourceApplyConfiguration `json:"assigned,omitempty"`
+ // Active reports the checkpointed config the node is actively using.
+ // Active will represent either the current version of the Assigned config,
+ // or the current LastKnownGood config, depending on whether attempting to use the
+ // Assigned config results in an error.
+ Active *NodeConfigSourceApplyConfiguration `json:"active,omitempty"`
+ // LastKnownGood reports the checkpointed config the node will fall back to
+ // when it encounters an error attempting to use the Assigned config.
+ // The Assigned config becomes the LastKnownGood config when the node determines
+ // that the Assigned config is stable and correct.
+ // This is currently implemented as a 10-minute soak period starting when the local
+ // record of Assigned config is updated. If the Assigned config is Active at the end
+ // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+ // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+ // because the local default config is always assumed good.
+ // You should not make assumptions about the node's method of determining config stability
+ // and correctness, as this may change or become configurable in the future.
LastKnownGood *NodeConfigSourceApplyConfiguration `json:"lastKnownGood,omitempty"`
- Error *string `json:"error,omitempty"`
+ // Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+ // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+ // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+ // to load or validate the Assigned config, etc.
+ // Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+ // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+ // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+ // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+ // by fixing the config assigned in Spec.ConfigSource.
+ // You can find additional information for debugging by searching the error message in the Kubelet log.
+ // Error is a human-readable description of the error state; machines can check whether or not Error
+ // is empty, but should not rely on the stability of the Error text across Kubelet versions.
+ Error *string `json:"error,omitempty"`
}
// NodeConfigStatusApplyConfiguration constructs a declarative configuration of the NodeConfigStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go
index 11228b36..b36d2ec3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodedaemonendpoints.go
@@ -20,7 +20,10 @@ package v1
// NodeDaemonEndpointsApplyConfiguration represents a declarative configuration of the NodeDaemonEndpoints type for use
// with apply.
+//
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpointsApplyConfiguration struct {
+ // Endpoint on which Kubelet is listening.
KubeletEndpoint *DaemonEndpointApplyConfiguration `json:"kubeletEndpoint,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go
index 678b0e36..c3b46719 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go
@@ -20,7 +20,12 @@ package v1
// NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use
// with apply.
+//
+// NodeFeatures describes the set of features implemented by the CRI implementation.
+// The features contained in the NodeFeatures should depend only on the cri implementation
+// independent of runtime handlers.
type NodeFeaturesApplyConfiguration struct {
+ // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go
index c7c66497..afebc7fc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go
@@ -20,8 +20,13 @@ package v1
// NodeRuntimeHandlerApplyConfiguration represents a declarative configuration of the NodeRuntimeHandler type for use
// with apply.
+//
+// NodeRuntimeHandler is a set of runtime handler information.
type NodeRuntimeHandlerApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Runtime handler name.
+ // Empty for the default runtime handler.
+ Name *string `json:"name,omitempty"`
+ // Supported features.
Features *NodeRuntimeHandlerFeaturesApplyConfiguration `json:"features,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go
index a295b609..47866f8c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go
@@ -20,9 +20,13 @@ package v1
// NodeRuntimeHandlerFeaturesApplyConfiguration represents a declarative configuration of the NodeRuntimeHandlerFeatures type for use
// with apply.
+//
+// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
type NodeRuntimeHandlerFeaturesApplyConfiguration struct {
+ // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty"`
- UserNamespaces *bool `json:"userNamespaces,omitempty"`
+ // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
+ UserNamespaces *bool `json:"userNamespaces,omitempty"`
}
// NodeRuntimeHandlerFeaturesApplyConfiguration constructs a declarative configuration of the NodeRuntimeHandlerFeatures type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go
index 6eab1097..3809873c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselector.go
@@ -20,7 +20,12 @@ package v1
// NodeSelectorApplyConfiguration represents a declarative configuration of the NodeSelector type for use
// with apply.
+//
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
type NodeSelectorApplyConfiguration struct {
+ // Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTermApplyConfiguration `json:"nodeSelectorTerms,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
index 4dcbc9a2..4428d85c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorrequirement.go
@@ -24,10 +24,21 @@ import (
// NodeSelectorRequirementApplyConfiguration represents a declarative configuration of the NodeSelectorRequirement type for use
// with apply.
+//
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
type NodeSelectorRequirementApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // The label key that the selector applies to.
+ Key *string `json:"key,omitempty"`
+ // Represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator *corev1.NodeSelectorOperator `json:"operator,omitempty"`
- Values []string `json:"values,omitempty"`
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. If the operator is Gt or Lt, the values
+ // array must have a single element, which will be interpreted as an integer.
+ // This array is replaced during a strategic merge patch.
+ Values []string `json:"values,omitempty"`
}
// NodeSelectorRequirementApplyConfiguration constructs a declarative configuration of the NodeSelectorRequirement type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go
index 9d0d780f..98c1739b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeselectorterm.go
@@ -20,9 +20,15 @@ package v1
// NodeSelectorTermApplyConfiguration represents a declarative configuration of the NodeSelectorTerm type for use
// with apply.
+//
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
type NodeSelectorTermApplyConfiguration struct {
+ // A list of node selector requirements by node's labels.
MatchExpressions []NodeSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"`
- MatchFields []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"`
+ // A list of node selector requirements by node's fields.
+ MatchFields []NodeSelectorRequirementApplyConfiguration `json:"matchFields,omitempty"`
}
// NodeSelectorTermApplyConfiguration constructs a declarative configuration of the NodeSelectorTerm type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go
index 8ac34971..b53ed21d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodespec.go
@@ -20,14 +20,27 @@ package v1
// NodeSpecApplyConfiguration represents a declarative configuration of the NodeSpec type for use
// with apply.
+//
+// NodeSpec describes the attributes that a node is created with.
type NodeSpecApplyConfiguration struct {
- PodCIDR *string `json:"podCIDR,omitempty"`
- PodCIDRs []string `json:"podCIDRs,omitempty"`
- ProviderID *string `json:"providerID,omitempty"`
- Unschedulable *bool `json:"unschedulable,omitempty"`
- Taints []TaintApplyConfiguration `json:"taints,omitempty"`
- ConfigSource *NodeConfigSourceApplyConfiguration `json:"configSource,omitempty"`
- DoNotUseExternalID *string `json:"externalID,omitempty"`
+ // PodCIDR represents the pod IP range assigned to the node.
+ PodCIDR *string `json:"podCIDR,omitempty"`
+ // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this
+ // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for
+ // each of IPv4 and IPv6.
+ PodCIDRs []string `json:"podCIDRs,omitempty"`
+ // ID of the node assigned by the cloud provider in the format: ://
+ ProviderID *string `json:"providerID,omitempty"`
+ // Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+ // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
+ Unschedulable *bool `json:"unschedulable,omitempty"`
+ // If specified, the node's taints.
+ Taints []TaintApplyConfiguration `json:"taints,omitempty"`
+ // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.
+ ConfigSource *NodeConfigSourceApplyConfiguration `json:"configSource,omitempty"`
+ // Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+ // see: https://issues.k8s.io/61966
+ DoNotUseExternalID *string `json:"externalID,omitempty"`
}
// NodeSpecApplyConfiguration constructs a declarative configuration of the NodeSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
index 3859ccd5..1eec7057 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodestatus.go
@@ -24,20 +24,52 @@ import (
// NodeStatusApplyConfiguration represents a declarative configuration of the NodeStatus type for use
// with apply.
+//
+// NodeStatus is information about the current status of a node.
type NodeStatusApplyConfiguration struct {
- Capacity *corev1.ResourceList `json:"capacity,omitempty"`
- Allocatable *corev1.ResourceList `json:"allocatable,omitempty"`
- Phase *corev1.NodePhase `json:"phase,omitempty"`
- Conditions []NodeConditionApplyConfiguration `json:"conditions,omitempty"`
- Addresses []NodeAddressApplyConfiguration `json:"addresses,omitempty"`
+ // Capacity represents the total resources of a node.
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
+ Capacity *corev1.ResourceList `json:"capacity,omitempty"`
+ // Allocatable represents the resources of a node that are available for scheduling.
+ // Defaults to Capacity.
+ Allocatable *corev1.ResourceList `json:"allocatable,omitempty"`
+ // NodePhase is the recently observed lifecycle phase of the node.
+ // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
+ // The field is never populated, and now is deprecated.
+ Phase *corev1.NodePhase `json:"phase,omitempty"`
+ // Conditions is an array of current observed node conditions.
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#condition
+ Conditions []NodeConditionApplyConfiguration `json:"conditions,omitempty"`
+ // List of addresses reachable to the node.
+ // Queried from cloud provider, if available.
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
+ // Note: This field is declared as mergeable, but the merge key is not sufficiently
+ // unique, which can cause data corruption when it is merged. Callers should instead
+ // use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
+ // Consumers should assume that addresses can change during the
+ // lifetime of a Node. However, there are some exceptions where this may not
+ // be possible, such as Pods that inherit a Node's address in its own status or
+ // consumers of the downward API (status.hostIP).
+ Addresses []NodeAddressApplyConfiguration `json:"addresses,omitempty"`
+ // Endpoints of daemons running on the Node.
DaemonEndpoints *NodeDaemonEndpointsApplyConfiguration `json:"daemonEndpoints,omitempty"`
- NodeInfo *NodeSystemInfoApplyConfiguration `json:"nodeInfo,omitempty"`
- Images []ContainerImageApplyConfiguration `json:"images,omitempty"`
- VolumesInUse []corev1.UniqueVolumeName `json:"volumesInUse,omitempty"`
- VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"`
- Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"`
+ // Set of ids/uuids to uniquely identify the node.
+ // More info: https://kubernetes.io/docs/reference/node/node-status/#info
+ NodeInfo *NodeSystemInfoApplyConfiguration `json:"nodeInfo,omitempty"`
+ // List of container images on this node
+ Images []ContainerImageApplyConfiguration `json:"images,omitempty"`
+ // List of attachable volumes in use (mounted) by the node.
+ VolumesInUse []corev1.UniqueVolumeName `json:"volumesInUse,omitempty"`
+ // List of volumes that are attached to the node.
+ VolumesAttached []AttachedVolumeApplyConfiguration `json:"volumesAttached,omitempty"`
+ // Status of the config assigned to the node via the dynamic Kubelet config feature.
+ Config *NodeConfigStatusApplyConfiguration `json:"config,omitempty"`
+ // The available runtime handlers.
RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"`
- Features *NodeFeaturesApplyConfiguration `json:"features,omitempty"`
+ // Features describes the set of features implemented by the CRI implementation.
+ Features *NodeFeaturesApplyConfiguration `json:"features,omitempty"`
+ // DeclaredFeatures represents the features related to feature gates that are declared by the node.
+ DeclaredFeatures []string `json:"declaredFeatures,omitempty"`
}
// NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with
@@ -176,3 +208,13 @@ func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConf
b.Features = value
return b
}
+
+// WithDeclaredFeatures adds the given value to the DeclaredFeatures field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the DeclaredFeatures field.
+func (b *NodeStatusApplyConfiguration) WithDeclaredFeatures(values ...string) *NodeStatusApplyConfiguration {
+ for i := range values {
+ b.DeclaredFeatures = append(b.DeclaredFeatures, values[i])
+ }
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go
index 2a7a2e68..d9548e6b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodeswapstatus.go
@@ -20,7 +20,10 @@ package v1
// NodeSwapStatusApplyConfiguration represents a declarative configuration of the NodeSwapStatus type for use
// with apply.
+//
+// NodeSwapStatus represents swap memory information.
type NodeSwapStatusApplyConfiguration struct {
+ // Total amount of swap memory in bytes.
Capacity *int64 `json:"capacity,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go
index 55effd71..70b8a781 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/nodesysteminfo.go
@@ -20,18 +20,35 @@ package v1
// NodeSystemInfoApplyConfiguration represents a declarative configuration of the NodeSystemInfo type for use
// with apply.
+//
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfoApplyConfiguration struct {
- MachineID *string `json:"machineID,omitempty"`
- SystemUUID *string `json:"systemUUID,omitempty"`
- BootID *string `json:"bootID,omitempty"`
- KernelVersion *string `json:"kernelVersion,omitempty"`
- OSImage *string `json:"osImage,omitempty"`
- ContainerRuntimeVersion *string `json:"containerRuntimeVersion,omitempty"`
- KubeletVersion *string `json:"kubeletVersion,omitempty"`
- KubeProxyVersion *string `json:"kubeProxyVersion,omitempty"`
- OperatingSystem *string `json:"operatingSystem,omitempty"`
- Architecture *string `json:"architecture,omitempty"`
- Swap *NodeSwapStatusApplyConfiguration `json:"swap,omitempty"`
+ // MachineID reported by the node. For unique machine identification
+ // in the cluster this field is preferred. Learn more from man(5)
+ // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
+ MachineID *string `json:"machineID,omitempty"`
+ // SystemUUID reported by the node. For unique machine identification
+ // MachineID is preferred. This field is specific to Red Hat hosts
+ // https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
+ SystemUUID *string `json:"systemUUID,omitempty"`
+ // Boot ID reported by the node.
+ BootID *string `json:"bootID,omitempty"`
+ // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+ KernelVersion *string `json:"kernelVersion,omitempty"`
+ // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+ OSImage *string `json:"osImage,omitempty"`
+ // ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).
+ ContainerRuntimeVersion *string `json:"containerRuntimeVersion,omitempty"`
+ // Kubelet Version reported by the node.
+ KubeletVersion *string `json:"kubeletVersion,omitempty"`
+ // Deprecated: KubeProxy Version reported by the node.
+ KubeProxyVersion *string `json:"kubeProxyVersion,omitempty"`
+ // The Operating System reported by the node
+ OperatingSystem *string `json:"operatingSystem,omitempty"`
+ // The Architecture reported by the node
+ Architecture *string `json:"architecture,omitempty"`
+ // Swap Info reported by the node.
+ Swap *NodeSwapStatusApplyConfiguration `json:"swap,omitempty"`
}
// NodeSystemInfoApplyConfiguration constructs a declarative configuration of the NodeSystemInfo type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go
index c129c998..e941ac99 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectfieldselector.go
@@ -20,9 +20,13 @@ package v1
// ObjectFieldSelectorApplyConfiguration represents a declarative configuration of the ObjectFieldSelector type for use
// with apply.
+//
+// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelectorApplyConfiguration struct {
+ // Version of the schema the FieldPath is written in terms of, defaults to "v1".
APIVersion *string `json:"apiVersion,omitempty"`
- FieldPath *string `json:"fieldPath,omitempty"`
+ // Path of the field to select in the specified API version.
+ FieldPath *string `json:"fieldPath,omitempty"`
}
// ObjectFieldSelectorApplyConfiguration constructs a declarative configuration of the ObjectFieldSelector type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go
index 4cd3f226..1de02cd1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/objectreference.go
@@ -24,14 +24,50 @@ import (
// ObjectReferenceApplyConfiguration represents a declarative configuration of the ObjectReference type for use
// with apply.
+//
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
+// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
+// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
+// and the version of the actual struct is irrelevant.
+// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
type ObjectReferenceApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
- APIVersion *string `json:"apiVersion,omitempty"`
- ResourceVersion *string `json:"resourceVersion,omitempty"`
- FieldPath *string `json:"fieldPath,omitempty"`
+ // Kind of the referent.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // Namespace of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+ Namespace *string `json:"namespace,omitempty"`
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name *string `json:"name,omitempty"`
+ // UID of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+ UID *types.UID `json:"uid,omitempty"`
+ // API version of the referent.
+ APIVersion *string `json:"apiVersion,omitempty"`
+ // Specific resourceVersion to which this reference is made, if any.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ ResourceVersion *string `json:"resourceVersion,omitempty"`
+ // If referring to a piece of an object instead of an entire object, this string
+ // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+ // For example, if the object reference is to a container within a pod, this would take on a value like:
+ // "spec.containers{name}" (where "name" refers to the name of the container that triggered
+ // the event) or if no container name is specified "spec.containers[2]" (container with
+ // index 2 in this pod). This syntax is chosen only to have some well-defined way of
+ // referencing a part of an object.
+ // TODO: this design is not final and this field is subject to change in the future.
+ FieldPath *string `json:"fieldPath,omitempty"`
}
// ObjectReferenceApplyConfiguration constructs a declarative configuration of the ObjectReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
index 25a0c69d..4ef8ed4b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
@@ -29,11 +29,24 @@ import (
// PersistentVolumeApplyConfiguration represents a declarative configuration of the PersistentVolume type for use
// with apply.
+//
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolumeApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"`
+ // spec defines a specification of a persistent volume owned by the cluster.
+ // Provisioned by an administrator.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+ Spec *PersistentVolumeSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents the current information/status for the persistent volume.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+ Status *PersistentVolumeStatusApplyConfiguration `json:"status,omitempty"`
}
// PersistentVolume constructs a declarative configuration of the PersistentVolume type for use with
@@ -46,6 +59,26 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration {
return b
}
+// ExtractPersistentVolumeFrom extracts the applied configuration owned by fieldManager from
+// persistentVolume for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// persistentVolume must be a unmodified PersistentVolume API object that was retrieved from the Kubernetes API.
+// ExtractPersistentVolumeFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPersistentVolumeFrom(persistentVolume *corev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) {
+ b := &PersistentVolumeApplyConfiguration{}
+ err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(persistentVolume.Name)
+
+ b.WithKind("PersistentVolume")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractPersistentVolume extracts the applied configuration owned by fieldManager from
// persistentVolume. If no managedFields are found in persistentVolume for fieldManager, a
// PersistentVolumeApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +89,16 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration {
// ExtractPersistentVolume provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
- return extractPersistentVolume(persistentVolume, fieldManager, "")
+ return ExtractPersistentVolumeFrom(persistentVolume, fieldManager, "")
}
-// ExtractPersistentVolumeStatus is the same as ExtractPersistentVolume except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPersistentVolumeStatus extracts the applied configuration owned by fieldManager from
+// persistentVolume for the status subresource.
func ExtractPersistentVolumeStatus(persistentVolume *corev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
- return extractPersistentVolume(persistentVolume, fieldManager, "status")
+ return ExtractPersistentVolumeFrom(persistentVolume, fieldManager, "status")
}
-func extractPersistentVolume(persistentVolume *corev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) {
- b := &PersistentVolumeApplyConfiguration{}
- err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(persistentVolume.Name)
-
- b.WithKind("PersistentVolume")
- b.WithAPIVersion("v1")
- return b, nil
-}
func (b PersistentVolumeApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
index e42d443b..d77417f3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
@@ -29,11 +29,20 @@ import (
// PersistentVolumeClaimApplyConfiguration represents a declarative configuration of the PersistentVolumeClaim type for use
// with apply.
+//
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaimApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"`
+ // spec defines the desired characteristics of a volume requested by a pod author.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents the current information/status of a persistent volume claim.
+ // Read-only.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ Status *PersistentVolumeClaimStatusApplyConfiguration `json:"status,omitempty"`
}
// PersistentVolumeClaim constructs a declarative configuration of the PersistentVolumeClaim type for use with
@@ -47,6 +56,27 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo
return b
}
+// ExtractPersistentVolumeClaimFrom extracts the applied configuration owned by fieldManager from
+// persistentVolumeClaim for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// persistentVolumeClaim must be a unmodified PersistentVolumeClaim API object that was retrieved from the Kubernetes API.
+// ExtractPersistentVolumeClaimFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPersistentVolumeClaimFrom(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) {
+ b := &PersistentVolumeClaimApplyConfiguration{}
+ err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(persistentVolumeClaim.Name)
+ b.WithNamespace(persistentVolumeClaim.Namespace)
+
+ b.WithKind("PersistentVolumeClaim")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractPersistentVolumeClaim extracts the applied configuration owned by fieldManager from
// persistentVolumeClaim. If no managedFields are found in persistentVolumeClaim for fieldManager, a
// PersistentVolumeClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +87,16 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo
// ExtractPersistentVolumeClaim provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
- return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "")
+ return ExtractPersistentVolumeClaimFrom(persistentVolumeClaim, fieldManager, "")
}
-// ExtractPersistentVolumeClaimStatus is the same as ExtractPersistentVolumeClaim except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPersistentVolumeClaimStatus extracts the applied configuration owned by fieldManager from
+// persistentVolumeClaim for the status subresource.
func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
- return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "status")
+ return ExtractPersistentVolumeClaimFrom(persistentVolumeClaim, fieldManager, "status")
}
-func extractPersistentVolumeClaim(persistentVolumeClaim *corev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) {
- b := &PersistentVolumeClaimApplyConfiguration{}
- err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(persistentVolumeClaim.Name)
- b.WithNamespace(persistentVolumeClaim.Namespace)
-
- b.WithKind("PersistentVolumeClaim")
- b.WithAPIVersion("v1")
- return b, nil
-}
func (b PersistentVolumeClaimApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
index 40025d53..03b32abc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimcondition.go
@@ -25,13 +25,26 @@ import (
// PersistentVolumeClaimConditionApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimCondition type for use
// with apply.
+//
+// PersistentVolumeClaimCondition contains details about state of pvc
type PersistentVolumeClaimConditionApplyConfiguration struct {
- Type *corev1.PersistentVolumeClaimConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type is the type of the condition.
+ // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
+ Type *corev1.PersistentVolumeClaimConditionType `json:"type,omitempty"`
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // lastProbeTime is the time we probed the condition.
+ LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
+ // lastTransitionTime is the time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // reason is a unique, this should be a short, machine understandable string that gives the reason
+ // for condition's last transition. If it reports "Resizing" that means the underlying
+ // persistent volume is being resized.
+ Reason *string `json:"reason,omitempty"`
+ // message is the human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// PersistentVolumeClaimConditionApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
index 2c2be16b..8710769b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
@@ -25,16 +25,73 @@ import (
// PersistentVolumeClaimSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimSpec type for use
// with apply.
+//
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpecApplyConfiguration struct {
- AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
- VolumeName *string `json:"volumeName,omitempty"`
- StorageClassName *string `json:"storageClassName,omitempty"`
- VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
- DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"`
- DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"`
- VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"`
+ // accessModes contains the desired access modes the volume should have.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ // selector is a label query over volumes to consider for binding.
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // resources represents the minimum resources the volume should have.
+ // Users are allowed to specify resource requirements
+ // that are lower than previous value but must still be higher than capacity recorded in the
+ // status field of the claim.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ Resources *VolumeResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
+ // volumeName is the binding reference to the PersistentVolume backing this claim.
+ VolumeName *string `json:"volumeName,omitempty"`
+ // storageClassName is the name of the StorageClass required by the claim.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ // volumeMode defines what type of volume is required by the claim.
+ // Value of Filesystem is implied when not included in claim spec.
+ VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
+ // dataSource field can be used to specify either:
+ // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ // * An existing PVC (PersistentVolumeClaim)
+ // If the provisioner or an external controller can support the specified data source,
+ // it will create a new volume based on the contents of the specified data source.
+ // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ // If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"`
+ // dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ // volume is desired. This may be any object from a non-empty API group (non
+ // core object) or a PersistentVolumeClaim object.
+ // When this field is specified, volume binding will only succeed if the type of
+ // the specified object matches some installed volume populator or dynamic
+ // provisioner.
+ // This field will replace the functionality of the dataSource field and as such
+ // if both fields are non-empty, they must have the same value. For backwards
+ // compatibility, when namespace isn't specified in dataSourceRef,
+ // both fields (dataSource and dataSourceRef) will be set to the same
+ // value automatically if one of them is empty and the other is non-empty.
+ // When namespace is specified in dataSourceRef,
+ // dataSource isn't set to the same value and must be empty.
+ // There are three important differences between dataSource and dataSourceRef:
+ // * While dataSource only allows two specific types of objects, dataSourceRef
+ // allows any non-core object, as well as PersistentVolumeClaim objects.
+ // * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ // preserves all values, and generates an error if a disallowed value is
+ // specified.
+ // * While dataSource only allows local objects, dataSourceRef allows objects
+ // in any namespaces.
+ // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ DataSourceRef *TypedObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"`
+ // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ // If specified, the CSI driver will create or update the volume with the attributes defined
+ // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ // it can be changed after the claim is created. An empty string or nil value indicates that no
+ // VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state,
+ // this field can be reset to its previous value (including nil) to cancel the modification.
+ // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ // exists.
+ // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+ VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"`
}
// PersistentVolumeClaimSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
index 6cea23a2..ee2d5adb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimstatus.go
@@ -24,15 +24,80 @@ import (
// PersistentVolumeClaimStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimStatus type for use
// with apply.
+//
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatusApplyConfiguration struct {
- Phase *corev1.PersistentVolumeClaimPhase `json:"phase,omitempty"`
- AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
- Capacity *corev1.ResourceList `json:"capacity,omitempty"`
- Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"`
- AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"`
- AllocatedResourceStatuses map[corev1.ResourceName]corev1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"`
- CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"`
- ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"`
+ // phase represents the current phase of PersistentVolumeClaim.
+ Phase *corev1.PersistentVolumeClaimPhase `json:"phase,omitempty"`
+ // accessModes contains the actual access modes the volume backing the PVC has.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ // capacity represents the actual resources of the underlying volume.
+ Capacity *corev1.ResourceList `json:"capacity,omitempty"`
+ // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being
+ // resized then the Condition will be set to 'Resizing'.
+ Conditions []PersistentVolumeClaimConditionApplyConfiguration `json:"conditions,omitempty"`
+ // allocatedResources tracks the resources allocated to a PVC including its capacity.
+ // Key names follow standard Kubernetes label syntax. Valid values are either:
+ // * Un-prefixed keys:
+ // - storage - the capacity of the volume.
+ // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+ // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+ // reserved and hence may not be used.
+ //
+ // Capacity reported here may be larger than the actual capacity when a volume expansion operation
+ // is requested.
+ // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
+ // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
+ // If a volume expansion capacity request is lowered, allocatedResources is only
+ // lowered if there are no expansion operations in progress and if the actual volume capacity
+ // is equal or lower than the requested capacity.
+ //
+ // A controller that receives PVC update with previously unknown resourceName
+ // should ignore the update for the purpose it was designed. For example - a controller that
+ // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+ // resources associated with PVC.
+ AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"`
+ // allocatedResourceStatuses stores status of resource being resized for the given PVC.
+ // Key names follow standard Kubernetes label syntax. Valid values are either:
+ // * Un-prefixed keys:
+ // - storage - the capacity of the volume.
+ // * Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+ // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+ // reserved and hence may not be used.
+ //
+ // ClaimResourceStatus can be in any of following states:
+ // - ControllerResizeInProgress:
+ // State set when resize controller starts resizing the volume in control-plane.
+ // - ControllerResizeFailed:
+ // State set when resize has failed in resize controller with a terminal error.
+ // - NodeResizePending:
+ // State set when resize controller has finished resizing the volume but further resizing of
+ // volume is needed on the node.
+ // - NodeResizeInProgress:
+ // State set when kubelet starts resizing the volume.
+ // - NodeResizeFailed:
+ // State set when resizing has failed in kubelet with a terminal error. Transient errors don't set
+ // NodeResizeFailed.
+ // For example: if expanding a PVC for more capacity - this field can be one of the following states:
+ // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress"
+ // - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress"
+ // - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed"
+ // When this field is not set, it means that no resize operation is in progress for the given PVC.
+ //
+ // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus
+ // should ignore the update for the purpose it was designed. For example - a controller that
+ // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+ // resources associated with PVC.
+ AllocatedResourceStatuses map[corev1.ResourceName]corev1.ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty"`
+ // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
+ // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
+ CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty"`
+ // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
+ // When this is unset, there is no ModifyVolume operation being attempted.
+ ModifyVolumeStatus *ModifyVolumeStatusApplyConfiguration `json:"modifyVolumeStatus,omitempty"`
}
// PersistentVolumeClaimStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
index 8d031c9e..db5ce71a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimtemplate.go
@@ -26,9 +26,19 @@ import (
// PersistentVolumeClaimTemplateApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimTemplate type for use
// with apply.
+//
+// PersistentVolumeClaimTemplate is used to produce
+// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
type PersistentVolumeClaimTemplateApplyConfiguration struct {
+ // May contain labels and annotations that will be copied into the PVC
+ // when creating it. No other fields are allowed and will be rejected during
+ // validation.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // The specification for the PersistentVolumeClaim. The entire content is
+ // copied unchanged into the PVC that gets created from this
+ // template. The same fields as in a PersistentVolumeClaim
+ // are also valid here.
+ Spec *PersistentVolumeClaimSpecApplyConfiguration `json:"spec,omitempty"`
}
// PersistentVolumeClaimTemplateApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimTemplate type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go
index ccccdfb4..a171ebc1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimvolumesource.go
@@ -20,9 +20,18 @@ package v1
// PersistentVolumeClaimVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeClaimVolumeSource type for use
// with apply.
+//
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSourceApplyConfiguration struct {
+ // claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName *string `json:"claimName,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // readOnly Will force the ReadOnly setting in VolumeMounts.
+ // Default false.
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// PersistentVolumeClaimVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeClaimVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go
index aba01246..8627148b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumesource.go
@@ -20,29 +20,94 @@ package v1
// PersistentVolumeSourceApplyConfiguration represents a declarative configuration of the PersistentVolumeSource type for use
// with apply.
+//
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSourceApplyConfiguration struct {
- GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"`
+ // gcePersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"`
+ // awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSourceApplyConfiguration `json:"awsElasticBlockStore,omitempty"`
- HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"`
- Glusterfs *GlusterfsPersistentVolumeSourceApplyConfiguration `json:"glusterfs,omitempty"`
- NFS *NFSVolumeSourceApplyConfiguration `json:"nfs,omitempty"`
- RBD *RBDPersistentVolumeSourceApplyConfiguration `json:"rbd,omitempty"`
- ISCSI *ISCSIPersistentVolumeSourceApplyConfiguration `json:"iscsi,omitempty"`
- Cinder *CinderPersistentVolumeSourceApplyConfiguration `json:"cinder,omitempty"`
- CephFS *CephFSPersistentVolumeSourceApplyConfiguration `json:"cephfs,omitempty"`
- FC *FCVolumeSourceApplyConfiguration `json:"fc,omitempty"`
- Flocker *FlockerVolumeSourceApplyConfiguration `json:"flocker,omitempty"`
- FlexVolume *FlexPersistentVolumeSourceApplyConfiguration `json:"flexVolume,omitempty"`
- AzureFile *AzureFilePersistentVolumeSourceApplyConfiguration `json:"azureFile,omitempty"`
- VsphereVolume *VsphereVirtualDiskVolumeSourceApplyConfiguration `json:"vsphereVolume,omitempty"`
- Quobyte *QuobyteVolumeSourceApplyConfiguration `json:"quobyte,omitempty"`
- AzureDisk *AzureDiskVolumeSourceApplyConfiguration `json:"azureDisk,omitempty"`
+ // hostPath represents a directory on the host.
+ // Provisioned by a developer or tester.
+ // This is useful for single-node development and testing only!
+ // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"`
+ // glusterfs represents a Glusterfs volume that is attached to a host and
+ // exposed to the pod. Provisioned by an admin.
+ // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+ // More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ Glusterfs *GlusterfsPersistentVolumeSourceApplyConfiguration `json:"glusterfs,omitempty"`
+ // nfs represents an NFS mount on the host. Provisioned by an admin.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ NFS *NFSVolumeSourceApplyConfiguration `json:"nfs,omitempty"`
+ // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md
+ RBD *RBDPersistentVolumeSourceApplyConfiguration `json:"rbd,omitempty"`
+ // iscsi represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+ ISCSI *ISCSIPersistentVolumeSourceApplyConfiguration `json:"iscsi,omitempty"`
+ // cinder represents a cinder volume attached and mounted on kubelets host machine.
+ // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ // are redirected to the cinder.csi.openstack.org CSI driver.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ Cinder *CinderPersistentVolumeSourceApplyConfiguration `json:"cinder,omitempty"`
+ // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+ CephFS *CephFSPersistentVolumeSourceApplyConfiguration `json:"cephfs,omitempty"`
+ // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ FC *FCVolumeSourceApplyConfiguration `json:"fc,omitempty"`
+ // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
+ // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+ Flocker *FlockerVolumeSourceApplyConfiguration `json:"flocker,omitempty"`
+ // flexVolume represents a generic volume resource that is
+ // provisioned/attached using an exec based plugin.
+ // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+ FlexVolume *FlexPersistentVolumeSourceApplyConfiguration `json:"flexVolume,omitempty"`
+ // azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ // are redirected to the file.csi.azure.com CSI driver.
+ AzureFile *AzureFilePersistentVolumeSourceApplyConfiguration `json:"azureFile,omitempty"`
+ // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ // are redirected to the csi.vsphere.vmware.com CSI driver.
+ VsphereVolume *VsphereVirtualDiskVolumeSourceApplyConfiguration `json:"vsphereVolume,omitempty"`
+ // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+ Quobyte *QuobyteVolumeSourceApplyConfiguration `json:"quobyte,omitempty"`
+ // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ // are redirected to the disk.csi.azure.com CSI driver.
+ AzureDisk *AzureDiskVolumeSourceApplyConfiguration `json:"azureDisk,omitempty"`
+ // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
PhotonPersistentDisk *PhotonPersistentDiskVolumeSourceApplyConfiguration `json:"photonPersistentDisk,omitempty"`
- PortworxVolume *PortworxVolumeSourceApplyConfiguration `json:"portworxVolume,omitempty"`
- ScaleIO *ScaleIOPersistentVolumeSourceApplyConfiguration `json:"scaleIO,omitempty"`
- Local *LocalVolumeSourceApplyConfiguration `json:"local,omitempty"`
- StorageOS *StorageOSPersistentVolumeSourceApplyConfiguration `json:"storageos,omitempty"`
- CSI *CSIPersistentVolumeSourceApplyConfiguration `json:"csi,omitempty"`
+ // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ // is on.
+ PortworxVolume *PortworxVolumeSourceApplyConfiguration `json:"portworxVolume,omitempty"`
+ // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+ ScaleIO *ScaleIOPersistentVolumeSourceApplyConfiguration `json:"scaleIO,omitempty"`
+ // local represents directly-attached storage with node affinity
+ Local *LocalVolumeSourceApplyConfiguration `json:"local,omitempty"`
+ // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
+ // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+ // More info: https://examples.k8s.io/volumes/storageos/README.md
+ StorageOS *StorageOSPersistentVolumeSourceApplyConfiguration `json:"storageos,omitempty"`
+ // csi represents storage that is handled by an external CSI driver.
+ CSI *CSIPersistentVolumeSourceApplyConfiguration `json:"csi,omitempty"`
}
// PersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the PersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
index 792e3b94..8c166102 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumespec.go
@@ -24,17 +24,49 @@ import (
// PersistentVolumeSpecApplyConfiguration represents a declarative configuration of the PersistentVolumeSpec type for use
// with apply.
+//
+// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpecApplyConfiguration struct {
- Capacity *corev1.ResourceList `json:"capacity,omitempty"`
+ // capacity is the description of the persistent volume's resources and capacity.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+ Capacity *corev1.ResourceList `json:"capacity,omitempty"`
+ // persistentVolumeSource is the actual volume backing the persistent volume.
PersistentVolumeSourceApplyConfiguration `json:",inline"`
- AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
- ClaimRef *ObjectReferenceApplyConfiguration `json:"claimRef,omitempty"`
- PersistentVolumeReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"`
- StorageClassName *string `json:"storageClassName,omitempty"`
- MountOptions []string `json:"mountOptions,omitempty"`
- VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
- NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"`
- VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"`
+ // accessModes contains all ways the volume can be mounted.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
+ AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"`
+ // claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+ // Expected to be non-nil when bound.
+ // claim.VolumeName is the authoritative bind between PV and PVC.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
+ ClaimRef *ObjectReferenceApplyConfiguration `json:"claimRef,omitempty"`
+ // persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim.
+ // Valid options are Retain (default for manually created PersistentVolumes), Delete (default
+ // for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
+ // Recycle must be supported by the volume plugin underlying this PersistentVolume.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
+ PersistentVolumeReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty"`
+ // storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value
+ // means that this volume does not belong to any StorageClass.
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ // mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
+ // simply fail if one is invalid.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
+ MountOptions []string `json:"mountOptions,omitempty"`
+ // volumeMode defines if a volume is intended to be used with a formatted filesystem
+ // or to remain in raw block state. Value of Filesystem is implied when not included in spec.
+ VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"`
+ // nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+ // This field influences the scheduling of pods that use this volume.
+ // This field is mutable if MutablePVNodeAffinity feature gate is enabled.
+ NodeAffinity *VolumeNodeAffinityApplyConfiguration `json:"nodeAffinity,omitempty"`
+ // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
+ // is not allowed. When this field is not set, it indicates that this volume does not belong to any
+ // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver
+ // after a volume has been updated successfully to a new class.
+ // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
+ // PersistentVolumeClaims during the binding process.
+ VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty"`
}
// PersistentVolumeSpecApplyConfiguration constructs a declarative configuration of the PersistentVolumeSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
index 0bb077ae..9d6e5340 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumestatus.go
@@ -25,11 +25,20 @@ import (
// PersistentVolumeStatusApplyConfiguration represents a declarative configuration of the PersistentVolumeStatus type for use
// with apply.
+//
+// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatusApplyConfiguration struct {
- Phase *corev1.PersistentVolumePhase `json:"phase,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *string `json:"reason,omitempty"`
- LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"`
+ // phase indicates if a volume is available, bound to a claim, or released by a claim.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
+ Phase *corev1.PersistentVolumePhase `json:"phase,omitempty"`
+ // message is a human-readable message indicating details about why the volume is in this state.
+ Message *string `json:"message,omitempty"`
+ // reason is a brief CamelCase string that describes any failure and is meant
+ // for machine parsing and tidy display in the CLI.
+ Reason *string `json:"reason,omitempty"`
+ // lastPhaseTransitionTime is the time the phase transitioned from one to another
+ // and automatically resets to current time everytime a volume phase transitions.
+ LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty"`
}
// PersistentVolumeStatusApplyConfiguration constructs a declarative configuration of the PersistentVolumeStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go
index d8dc103e..585e50ea 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/photonpersistentdiskvolumesource.go
@@ -20,8 +20,14 @@ package v1
// PhotonPersistentDiskVolumeSourceApplyConfiguration represents a declarative configuration of the PhotonPersistentDiskVolumeSource type for use
// with apply.
+//
+// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSourceApplyConfiguration struct {
- PdID *string `json:"pdID,omitempty"`
+ // pdID is the ID that identifies Photon Controller persistent disk
+ PdID *string `json:"pdID,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType *string `json:"fsType,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
index df4e99b3..d10d38de 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
@@ -29,11 +29,23 @@ import (
// PodApplyConfiguration represents a declarative configuration of the Pod type for use
// with apply.
+//
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
type PodApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the pod.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the pod.
+ // This data may not be up to date.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *PodStatusApplyConfiguration `json:"status,omitempty"`
}
// Pod constructs a declarative configuration of the Pod type for use with
@@ -47,6 +59,27 @@ func Pod(name, namespace string) *PodApplyConfiguration {
return b
}
+// ExtractPodFrom extracts the applied configuration owned by fieldManager from
+// pod for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// pod must be a unmodified Pod API object that was retrieved from the Kubernetes API.
+// ExtractPodFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPodFrom(pod *corev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) {
+ b := &PodApplyConfiguration{}
+ err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(pod.Name)
+ b.WithNamespace(pod.Namespace)
+
+ b.WithKind("Pod")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractPod extracts the applied configuration owned by fieldManager from
// pod. If no managedFields are found in pod for fieldManager, a
// PodApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +90,28 @@ func Pod(name, namespace string) *PodApplyConfiguration {
// ExtractPod provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPod(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
- return extractPod(pod, fieldManager, "")
+ return ExtractPodFrom(pod, fieldManager, "")
}
-// ExtractPodStatus is the same as ExtractPod except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractPodStatus(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
- return extractPod(pod, fieldManager, "status")
+// ExtractPodEphemeralcontainers extracts the applied configuration owned by fieldManager from
+// pod for the ephemeralcontainers subresource.
+func ExtractPodEphemeralcontainers(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+ return ExtractPodFrom(pod, fieldManager, "ephemeralcontainers")
}
-func extractPod(pod *corev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) {
- b := &PodApplyConfiguration{}
- err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(pod.Name)
- b.WithNamespace(pod.Namespace)
+// ExtractPodResize extracts the applied configuration owned by fieldManager from
+// pod for the resize subresource.
+func ExtractPodResize(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+ return ExtractPodFrom(pod, fieldManager, "resize")
+}
- b.WithKind("Pod")
- b.WithAPIVersion("v1")
- return b, nil
+// ExtractPodStatus extracts the applied configuration owned by fieldManager from
+// pod for the status subresource.
+func ExtractPodStatus(pod *corev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+ return ExtractPodFrom(pod, fieldManager, "status")
}
+
func (b PodApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go
index 23fed954..dda9727a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinity.go
@@ -20,8 +20,26 @@ package v1
// PodAffinityApplyConfiguration represents a declarative configuration of the PodAffinity type for use
// with apply.
+//
+// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinityApplyConfiguration struct {
- RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // If the affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and adding
+ // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
index 1cc1ca0d..8dbf0c66 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podaffinityterm.go
@@ -24,13 +24,52 @@ import (
// PodAffinityTermApplyConfiguration represents a declarative configuration of the PodAffinityTerm type for use
// with apply.
+//
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key matches that of any node on which
+// a pod of the set of pods is running
type PodAffinityTermApplyConfiguration struct {
- LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
- Namespaces []string `json:"namespaces,omitempty"`
- TopologyKey *string `json:"topologyKey,omitempty"`
+ // A label query over a set of resources, in this case pods.
+ // If it's null, this PodAffinityTerm matches with no Pods.
+ LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
+ // namespaces specifies a static list of namespace names that the term applies to.
+ // The term is applied to the union of the namespaces listed in this field
+ // and the ones selected by namespaceSelector.
+ // null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ Namespaces []string `json:"namespaces,omitempty"`
+ // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ // the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ // whose value of the label with key topologyKey matches that of any node on which any of the
+ // selected pods is running.
+ // Empty topologyKey is not allowed.
+ TopologyKey *string `json:"topologyKey,omitempty"`
+ // A label query over the set of namespaces that the term applies to.
+ // The term is applied to the union of the namespaces selected by this field
+ // and the ones listed in the namespaces field.
+ // null selector and null or empty namespaces list means "this pod's namespace".
+ // An empty selector ({}) matches all namespaces.
NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
- MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"`
+ // MatchLabelKeys is a set of pod label keys to select which pods will
+ // be taken into consideration. The keys are used to lookup values from the
+ // incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+ // to select the group of existing pods which pods will be taken into consideration
+ // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ // pod labels will be ignored. The default value is empty.
+ // The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+ // Also, matchLabelKeys cannot be set when labelSelector isn't set.
+ MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
+ // MismatchLabelKeys is a set of pod label keys to select which pods will
+ // be taken into consideration. The keys are used to lookup values from the
+ // incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+ // to select the group of existing pods which pods will be taken into consideration
+ // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ // pod labels will be ignored. The default value is empty.
+ // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+ // Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+ MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty"`
}
// PodAffinityTermApplyConfiguration constructs a declarative configuration of the PodAffinityTerm type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go
index ae984896..5e1ece19 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podantiaffinity.go
@@ -20,8 +20,26 @@ package v1
// PodAntiAffinityApplyConfiguration represents a declarative configuration of the PodAntiAffinity type for use
// with apply.
+//
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinityApplyConfiguration struct {
- RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // If the anti-affinity requirements specified by this field are not met at
+ // scheduling time, the pod will not be scheduled onto the node.
+ // If the anti-affinity requirements specified by this field cease to be met
+ // at some point during pod execution (e.g. due to a pod label update), the
+ // system may or may not try to eventually evict the pod from its node.
+ // When there are multiple elements, the lists of nodes corresponding to each
+ // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTermApplyConfiguration `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
+ // The scheduler will prefer to schedule pods to nodes that satisfy
+ // the anti-affinity expressions specified by this field, but it may choose
+ // a node that violates one or more of the expressions. The node that is
+ // most preferred is the one with the greatest sum of weights, i.e.
+ // for each node that meets all of the scheduling requirements (resource
+ // request, requiredDuringScheduling anti-affinity expressions, etc.),
+ // compute a sum by iterating through the elements of this field and subtracting
+ // "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ // node(s) with the highest sum are the most preferred.
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTermApplyConfiguration `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcertificateprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcertificateprojection.go
index 1b6ffffb..0d951a89 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcertificateprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcertificateprojection.go
@@ -20,13 +20,75 @@ package v1
// PodCertificateProjectionApplyConfiguration represents a declarative configuration of the PodCertificateProjection type for use
// with apply.
+//
+// PodCertificateProjection provides a private key and X.509 certificate in the
+// pod filesystem.
type PodCertificateProjectionApplyConfiguration struct {
- SignerName *string `json:"signerName,omitempty"`
- KeyType *string `json:"keyType,omitempty"`
- MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty"`
+ // Kubelet's generated CSRs will be addressed to this signer.
+ SignerName *string `json:"signerName,omitempty"`
+ // The type of keypair Kubelet will generate for the pod.
+ //
+ // Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
+ // "ECDSAP521", and "ED25519".
+ KeyType *string `json:"keyType,omitempty"`
+ // maxExpirationSeconds is the maximum lifetime permitted for the
+ // certificate.
+ //
+ // Kubelet copies this value verbatim into the PodCertificateRequests it
+ // generates for this projection.
+ //
+ // If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
+ // will reject values shorter than 3600 (1 hour). The maximum allowable
+ // value is 7862400 (91 days).
+ //
+ // The signer implementation is then free to issue a certificate with any
+ // lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
+ // seconds (1 hour). This constraint is enforced by kube-apiserver.
+ // `kubernetes.io` signers will never issue certificates with a lifetime
+ // longer than 24 hours.
+ MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty"`
+ // Write the credential bundle at this path in the projected volume.
+ //
+ // The credential bundle is a single file that contains multiple PEM blocks.
+ // The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
+ // key.
+ //
+ // The remaining blocks are CERTIFICATE blocks, containing the issued
+ // certificate chain from the signer (leaf and any intermediates).
+ //
+ // Using credentialBundlePath lets your Pod's application code make a single
+ // atomic read that retrieves a consistent key and certificate chain. If you
+ // project them to separate files, your application code will need to
+ // additionally check that the leaf certificate was issued to the key.
CredentialBundlePath *string `json:"credentialBundlePath,omitempty"`
- KeyPath *string `json:"keyPath,omitempty"`
+ // Write the key at this path in the projected volume.
+ //
+ // Most applications should use credentialBundlePath. When using keyPath
+ // and certificateChainPath, your application needs to check that the key
+ // and leaf certificate are consistent, because it is possible to read the
+ // files mid-rotation.
+ KeyPath *string `json:"keyPath,omitempty"`
+ // Write the certificate chain at this path in the projected volume.
+ //
+ // Most applications should use credentialBundlePath. When using keyPath
+ // and certificateChainPath, your application needs to check that the key
+ // and leaf certificate are consistent, because it is possible to read the
+ // files mid-rotation.
CertificateChainPath *string `json:"certificateChainPath,omitempty"`
+ // userAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // These values are copied verbatim into the `spec.unverifiedUserAnnotations` field of
+ // the PodCertificateRequest objects that Kubelet creates.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ UserAnnotations map[string]string `json:"userAnnotations,omitempty"`
}
// PodCertificateProjectionApplyConfiguration constructs a declarative configuration of the PodCertificateProjection type for use with
@@ -82,3 +144,17 @@ func (b *PodCertificateProjectionApplyConfiguration) WithCertificateChainPath(va
b.CertificateChainPath = &value
return b
}
+
+// WithUserAnnotations puts the entries into the UserAnnotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the UserAnnotations field,
+// overwriting an existing map entries in UserAnnotations field with the same key.
+func (b *PodCertificateProjectionApplyConfiguration) WithUserAnnotations(entries map[string]string) *PodCertificateProjectionApplyConfiguration {
+ if b.UserAnnotations == nil && len(entries) > 0 {
+ b.UserAnnotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.UserAnnotations[k] = v
+ }
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
index 90bb8711..55e55dd0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podcondition.go
@@ -25,14 +25,27 @@ import (
// PodConditionApplyConfiguration represents a declarative configuration of the PodCondition type for use
// with apply.
+//
+// PodCondition contains details for the current condition of this pod.
type PodConditionApplyConfiguration struct {
- Type *corev1.PodConditionType `json:"type,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type is the type of the condition.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+ Type *corev1.PodConditionType `json:"type,omitempty"`
+ // If set, this represents the .metadata.generation that the pod condition was set based upon.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Status is the status of the condition.
+ // Can be True, False, Unknown.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // Last time we probed the condition.
+ LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // Unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // Human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// PodConditionApplyConfiguration constructs a declarative configuration of the PodCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go
index 2e0ce9a9..a725e3a9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfig.go
@@ -20,10 +20,23 @@ package v1
// PodDNSConfigApplyConfiguration represents a declarative configuration of the PodDNSConfig type for use
// with apply.
+//
+// PodDNSConfig defines the DNS parameters of a pod in addition to
+// those generated from DNSPolicy.
type PodDNSConfigApplyConfiguration struct {
- Nameservers []string `json:"nameservers,omitempty"`
- Searches []string `json:"searches,omitempty"`
- Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"`
+ // A list of DNS name server IP addresses.
+ // This will be appended to the base nameservers generated from DNSPolicy.
+ // Duplicated nameservers will be removed.
+ Nameservers []string `json:"nameservers,omitempty"`
+ // A list of DNS search domains for host-name lookup.
+ // This will be appended to the base search paths generated from DNSPolicy.
+ // Duplicated search paths will be removed.
+ Searches []string `json:"searches,omitempty"`
+ // A list of DNS resolver options.
+ // This will be merged with the base options generated from DNSPolicy.
+ // Duplicated entries will be removed. Resolution options given in Options
+ // will override those that appear in the base DNSPolicy.
+ Options []PodDNSConfigOptionApplyConfiguration `json:"options,omitempty"`
}
// PodDNSConfigApplyConfiguration constructs a declarative configuration of the PodDNSConfig type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go
index 458b333b..3b9a868e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/poddnsconfigoption.go
@@ -20,8 +20,13 @@ package v1
// PodDNSConfigOptionApplyConfiguration represents a declarative configuration of the PodDNSConfigOption type for use
// with apply.
+//
+// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOptionApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is this DNS resolver option's name.
+ // Required.
+ Name *string `json:"name,omitempty"`
+ // Value is this DNS resolver option's value.
Value *string `json:"value,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podextendedresourceclaimstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podextendedresourceclaimstatus.go
index d4378863..460936ee 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podextendedresourceclaimstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podextendedresourceclaimstatus.go
@@ -20,9 +20,17 @@ package v1
// PodExtendedResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodExtendedResourceClaimStatus type for use
// with apply.
+//
+// PodExtendedResourceClaimStatus is stored in the PodStatus for the extended
+// resource requests backed by DRA. It stores the generated name for
+// the corresponding special ResourceClaim created by the scheduler.
type PodExtendedResourceClaimStatusApplyConfiguration struct {
- RequestMappings []ContainerExtendedResourceRequestApplyConfiguration `json:"requestMappings,omitempty"`
- ResourceClaimName *string `json:"resourceClaimName,omitempty"`
+ // RequestMappings identifies the mapping of to device request
+ // in the generated ResourceClaim.
+ RequestMappings []ContainerExtendedResourceRequestApplyConfiguration `json:"requestMappings,omitempty"`
+ // ResourceClaimName is the name of the ResourceClaim that was
+ // generated for the Pod in the namespace of the Pod.
+ ResourceClaimName *string `json:"resourceClaimName,omitempty"`
}
// PodExtendedResourceClaimStatusApplyConfiguration constructs a declarative configuration of the PodExtendedResourceClaimStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go
index 73f08985..42412090 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podip.go
@@ -20,7 +20,10 @@ package v1
// PodIPApplyConfiguration represents a declarative configuration of the PodIP type for use
// with apply.
+//
+// PodIP represents a single IP address allocated to the pod.
type PodIPApplyConfiguration struct {
+ // IP is the IP address assigned to the pod
IP *string `json:"ip,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
index 22a74560..8c0010b6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podos.go
@@ -24,7 +24,13 @@ import (
// PodOSApplyConfiguration represents a declarative configuration of the PodOS type for use
// with apply.
+//
+// PodOS defines the OS parameters of a pod.
type PodOSApplyConfiguration struct {
+ // Name is the name of the operating system. The currently supported values are linux and windows.
+ // Additional value may be defined in future and can be one of:
+ // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ // Clients should expect to handle additional values and treat unrecognized values in this field as os: null
Name *corev1.OSName `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
index 4298b1ca..b86f4837 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podreadinessgate.go
@@ -24,7 +24,10 @@ import (
// PodReadinessGateApplyConfiguration represents a declarative configuration of the PodReadinessGate type for use
// with apply.
+//
+// PodReadinessGate contains the reference to a pod condition
type PodReadinessGateApplyConfiguration struct {
+ // ConditionType refers to a condition in the pod's condition list with matching type.
ConditionType *corev1.PodConditionType `json:"conditionType,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go
index b0bd67fa..93d2280f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaim.go
@@ -20,9 +20,38 @@ package v1
// PodResourceClaimApplyConfiguration represents a declarative configuration of the PodResourceClaim type for use
// with apply.
+//
+// PodResourceClaim references exactly one ResourceClaim, either directly
+// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+// for the pod.
+//
+// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+// Containers that need access to the ResourceClaim reference it with this name.
type PodResourceClaimApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ResourceClaimName *string `json:"resourceClaimName,omitempty"`
+ // Name uniquely identifies this resource claim inside the pod.
+ // This must be a DNS_LABEL.
+ Name *string `json:"name,omitempty"`
+ // ResourceClaimName is the name of a ResourceClaim object in the same
+ // namespace as this pod.
+ //
+ // Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ // be set.
+ ResourceClaimName *string `json:"resourceClaimName,omitempty"`
+ // ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ // object in the same namespace as this pod.
+ //
+ // The template will be used to create a new ResourceClaim, which will
+ // be bound to this pod. When this pod is deleted, the ResourceClaim
+ // will also be deleted. The pod name and resource name, along with a
+ // generated component, will be used to form a unique name for the
+ // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+ //
+ // This field is immutable and no changes will be made to the
+ // corresponding ResourceClaim by the control plane after creating the
+ // ResourceClaim.
+ //
+ // Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+ // be set.
ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
index f60ad4b0..f0673fa0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podresourceclaimstatus.go
@@ -20,8 +20,19 @@ package v1
// PodResourceClaimStatusApplyConfiguration represents a declarative configuration of the PodResourceClaimStatus type for use
// with apply.
+//
+// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
+// which references a ResourceClaimTemplate. It stores the generated name for
+// the corresponding ResourceClaim.
type PodResourceClaimStatusApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name uniquely identifies this resource claim inside the pod.
+ // This must match the name of an entry in pod.spec.resourceClaims,
+ // which implies that the string must be a DNS_LABEL.
+ Name *string `json:"name,omitempty"`
+ // ResourceClaimName is the name of the ResourceClaim that was
+ // generated for the Pod in the namespace of the Pod. If this is
+ // unset, then generating a ResourceClaim was not necessary. The
+ // pod.spec.resourceClaims entry can be ignored in this case.
ResourceClaimName *string `json:"resourceClaimName,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go
index 3d910927..bf9d551e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podschedulinggate.go
@@ -20,7 +20,11 @@ package v1
// PodSchedulingGateApplyConfiguration represents a declarative configuration of the PodSchedulingGate type for use
// with apply.
+//
+// PodSchedulingGate is associated to a Pod to guard its scheduling.
type PodSchedulingGateApplyConfiguration struct {
+ // Name of the scheduling gate.
+ // Each scheduling gate must have a unique name field.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
index f0a3e662..a6f1629e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podsecuritycontext.go
@@ -24,20 +24,114 @@ import (
// PodSecurityContextApplyConfiguration represents a declarative configuration of the PodSecurityContext type for use
// with apply.
+//
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext. Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContextApplyConfiguration struct {
- SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"`
- WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"`
- RunAsUser *int64 `json:"runAsUser,omitempty"`
- RunAsGroup *int64 `json:"runAsGroup,omitempty"`
- RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
- SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
- SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty"`
- FSGroup *int64 `json:"fsGroup,omitempty"`
- Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"`
- FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"`
- SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"`
- AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"`
- SELinuxChangePolicy *corev1.PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty"`
+ // The SELinux context to be applied to all containers.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in SecurityContext. If set in
+ // both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ // takes precedence for that container.
+ // Note that this field cannot be set when spec.os.name is windows.
+ SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"`
+ // The Windows specific settings applied to all containers.
+ // If unspecified, the options within a container's SecurityContext will be used.
+ // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ // Note that this field cannot be set when spec.os.name is linux.
+ WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"`
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence
+ // for that container.
+ // Note that this field cannot be set when spec.os.name is windows.
+ RunAsUser *int64 `json:"runAsUser,omitempty"`
+ // The GID to run the entrypoint of the container process.
+ // Uses runtime default if unset.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence
+ // for that container.
+ // Note that this field cannot be set when spec.os.name is windows.
+ RunAsGroup *int64 `json:"runAsGroup,omitempty"`
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in SecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
+ // A list of groups applied to the first process run in each container, in
+ // addition to the container's primary GID and fsGroup (if specified). If
+ // the SupplementalGroupsPolicy feature is enabled, the
+ // supplementalGroupsPolicy field determines whether these are in addition
+ // to or instead of any group memberships defined in the container image.
+ // If unspecified, no additional groups are added, though group memberships
+ // defined in the container image may still be used, depending on the
+ // supplementalGroupsPolicy field.
+ // Note that this field cannot be set when spec.os.name is windows.
+ SupplementalGroups []int64 `json:"supplementalGroups,omitempty"`
+ // Defines how supplemental groups of the first container processes are calculated.
+ // Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+ // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+ // and the container runtime must implement support for this feature.
+ // Note that this field cannot be set when spec.os.name is windows.
+ // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
+ SupplementalGroupsPolicy *corev1.SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty"`
+ // A special supplemental group that applies to all containers in a pod.
+ // Some volume types allow the Kubelet to change the ownership of that volume
+ // to be owned by the pod:
+ //
+ // 1. The owning GID will be the FSGroup
+ // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ // 3. The permission bits are OR'd with rw-rw----
+ //
+ // If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ // Note that this field cannot be set when spec.os.name is windows.
+ FSGroup *int64 `json:"fsGroup,omitempty"`
+ // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ // sysctls (by the container runtime) might fail to launch.
+ // Note that this field cannot be set when spec.os.name is windows.
+ Sysctls []SysctlApplyConfiguration `json:"sysctls,omitempty"`
+ // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ // before being exposed inside Pod. This field will only apply to
+ // volume types which support fsGroup based ownership(and permissions).
+ // It will have no effect on ephemeral volume types such as: secret, configmaps
+ // and emptydir.
+ // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ // Note that this field cannot be set when spec.os.name is windows.
+ FSGroupChangePolicy *corev1.PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty"`
+ // The seccomp options to use by the containers in this pod.
+ // Note that this field cannot be set when spec.os.name is windows.
+ SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"`
+ // appArmorProfile is the AppArmor options to use by the containers in this pod.
+ // Note that this field cannot be set when spec.os.name is windows.
+ AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"`
+ // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+ // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+ // Valid values are "MountOption" and "Recursive".
+ //
+ // "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+ // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+ //
+ // "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+ // This requires all Pods that share the same volume to use the same SELinux label.
+ // It is not possible to share the same volume among privileged and unprivileged Pods.
+ // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+ // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+ // CSIDriver instance. Other volumes are always re-labelled recursively.
+ // "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+ //
+ // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+ // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+ // and "Recursive" for all other volumes.
+ //
+ // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+ //
+ // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+ // Note that this field cannot be set when spec.os.name is windows.
+ SELinuxChangePolicy *corev1.PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty"`
}
// PodSecurityContextApplyConfiguration constructs a declarative configuration of the PodSecurityContext type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
index 82a1afa6..1948c713 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podspec.go
@@ -24,48 +24,263 @@ import (
// PodSpecApplyConfiguration represents a declarative configuration of the PodSpec type for use
// with apply.
+//
+// PodSpec is a description of a pod.
type PodSpecApplyConfiguration struct {
- Volumes []VolumeApplyConfiguration `json:"volumes,omitempty"`
- InitContainers []ContainerApplyConfiguration `json:"initContainers,omitempty"`
- Containers []ContainerApplyConfiguration `json:"containers,omitempty"`
- EphemeralContainers []EphemeralContainerApplyConfiguration `json:"ephemeralContainers,omitempty"`
- RestartPolicy *corev1.RestartPolicy `json:"restartPolicy,omitempty"`
- TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
- ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
- DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty"`
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
- ServiceAccountName *string `json:"serviceAccountName,omitempty"`
- DeprecatedServiceAccount *string `json:"serviceAccount,omitempty"`
- AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- HostNetwork *bool `json:"hostNetwork,omitempty"`
- HostPID *bool `json:"hostPID,omitempty"`
- HostIPC *bool `json:"hostIPC,omitempty"`
- ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty"`
- SecurityContext *PodSecurityContextApplyConfiguration `json:"securityContext,omitempty"`
- ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- Subdomain *string `json:"subdomain,omitempty"`
- Affinity *AffinityApplyConfiguration `json:"affinity,omitempty"`
- SchedulerName *string `json:"schedulerName,omitempty"`
- Tolerations []TolerationApplyConfiguration `json:"tolerations,omitempty"`
- HostAliases []HostAliasApplyConfiguration `json:"hostAliases,omitempty"`
- PriorityClassName *string `json:"priorityClassName,omitempty"`
- Priority *int32 `json:"priority,omitempty"`
- DNSConfig *PodDNSConfigApplyConfiguration `json:"dnsConfig,omitempty"`
- ReadinessGates []PodReadinessGateApplyConfiguration `json:"readinessGates,omitempty"`
- RuntimeClassName *string `json:"runtimeClassName,omitempty"`
- EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"`
- PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
- Overhead *corev1.ResourceList `json:"overhead,omitempty"`
- TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"`
- SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"`
- OS *PodOSApplyConfiguration `json:"os,omitempty"`
- HostUsers *bool `json:"hostUsers,omitempty"`
- SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"`
- ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"`
- Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
- HostnameOverride *string `json:"hostnameOverride,omitempty"`
+ // List of volumes that can be mounted by containers belonging to the pod.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes
+ Volumes []VolumeApplyConfiguration `json:"volumes,omitempty"`
+ // List of initialization containers belonging to the pod.
+ // Init containers are executed in order prior to containers being started. If any
+ // init container fails, the pod is considered to have failed and is handled according
+ // to its restartPolicy. The name for an init container or normal container must be
+ // unique among all containers.
+ // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ // The resourceRequirements of an init container are taken into account during scheduling
+ // by finding the highest request/limit for each resource type, and then using the max of
+ // that value or the sum of the normal containers. Limits are applied to init containers
+ // in a similar fashion.
+ // Init containers cannot currently be added or removed.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ InitContainers []ContainerApplyConfiguration `json:"initContainers,omitempty"`
+ // List of containers belonging to the pod.
+ // Containers cannot currently be added or removed.
+ // There must be at least one container in a Pod.
+ // Cannot be updated.
+ Containers []ContainerApplyConfiguration `json:"containers,omitempty"`
+ // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ // pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ // creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ EphemeralContainers []EphemeralContainerApplyConfiguration `json:"ephemeralContainers,omitempty"`
+ // Restart policy for all containers within the pod.
+ // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ // Default to Always.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ RestartPolicy *corev1.RestartPolicy `json:"restartPolicy,omitempty"`
+ // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ // Value must be non-negative integer. The value zero indicates stop immediately via
+ // the kill signal (no opportunity to shut down).
+ // If this value is nil, the default grace period will be used instead.
+ // The grace period is the duration in seconds after the processes running in the pod are sent
+ // a termination signal and the time when the processes are forcibly halted with a kill signal.
+ // Set this value longer than the expected cleanup time for your process.
+ // Defaults to 30 seconds.
+ TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
+ // Optional duration in seconds the pod may be active on the node relative to
+ // StartTime before the system will actively try to mark it failed and kill associated containers.
+ // Value must be a positive integer.
+ ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
+ // Set DNS policy for the pod.
+ // Defaults to "ClusterFirst".
+ // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ // To have DNS options set along with hostNetwork, you have to specify DNS policy
+ // explicitly to 'ClusterFirstWithHostNet'.
+ DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty"`
+ // NodeSelector is a selector which must be true for the pod to fit on a node.
+ // Selector which must match a node's labels for the pod to be scheduled on that node.
+ // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ ServiceAccountName *string `json:"serviceAccountName,omitempty"`
+ // DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+ // Deprecated: Use serviceAccountName instead.
+ DeprecatedServiceAccount *string `json:"serviceAccount,omitempty"`
+ // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+ AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
+ // NodeName indicates in which node this pod is scheduled.
+ // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+ // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+ // This field should not be used to express a desire for the pod to be scheduled on a specific node.
+ // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+ NodeName *string `json:"nodeName,omitempty"`
+ // Host networking requested for this pod. Use the host's network namespace.
+ // When using HostNetwork you should specify ports so the scheduler is aware.
+ // When `hostNetwork` is true, specified `hostPort` fields in port definitions must match `containerPort`,
+ // and unspecified `hostPort` fields in port definitions are defaulted to match `containerPort`.
+ // Default to false.
+ HostNetwork *bool `json:"hostNetwork,omitempty"`
+ // Use the host's pid namespace.
+ // Optional: Default to false.
+ HostPID *bool `json:"hostPID,omitempty"`
+ // Use the host's ipc namespace.
+ // Optional: Default to false.
+ HostIPC *bool `json:"hostIPC,omitempty"`
+ // Share a single process namespace between all of the containers in a pod.
+ // When this is set containers will be able to view and signal processes from other containers
+ // in the same pod, and the first process in each container will not be assigned PID 1.
+ // HostPID and ShareProcessNamespace cannot both be set.
+ // Optional: Default to false.
+ ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty"`
+ // SecurityContext holds pod-level security attributes and common container settings.
+ // Optional: Defaults to empty. See type description for default values of each field.
+ SecurityContext *PodSecurityContextApplyConfiguration `json:"securityContext,omitempty"`
+ // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ // If specified, these secrets will be passed to individual puller implementations for them to use.
+ // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"`
+ // Specifies the hostname of the Pod
+ // If not specified, the pod's hostname will be set to a system-defined value.
+ Hostname *string `json:"hostname,omitempty"`
+ // If specified, the fully qualified Pod hostname will be "...svc.".
+ // If not specified, the pod will not have a domainname at all.
+ Subdomain *string `json:"subdomain,omitempty"`
+ // If specified, the pod's scheduling constraints
+ Affinity *AffinityApplyConfiguration `json:"affinity,omitempty"`
+ // If specified, the pod will be dispatched by specified scheduler.
+ // If not specified, the pod will be dispatched by default scheduler.
+ SchedulerName *string `json:"schedulerName,omitempty"`
+ // If specified, the pod's tolerations.
+ Tolerations []TolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ // file if specified.
+ HostAliases []HostAliasApplyConfiguration `json:"hostAliases,omitempty"`
+ // If specified, indicates the pod's priority. "system-node-critical" and
+ // "system-cluster-critical" are two special keywords which indicate the
+ // highest priorities with the former being the highest priority. Any other
+ // name must be defined by creating a PriorityClass object with that name.
+ // If not specified, the pod priority will be default or zero if there is no
+ // default.
+ PriorityClassName *string `json:"priorityClassName,omitempty"`
+ // The priority value. Various system components use this field to find the
+ // priority of the pod. When Priority Admission Controller is enabled, it
+ // prevents users from setting this field. The admission controller populates
+ // this field from PriorityClassName.
+ // The higher the value, the higher the priority.
+ Priority *int32 `json:"priority,omitempty"`
+ // Specifies the DNS parameters of a pod.
+ // Parameters specified here will be merged to the generated DNS
+ // configuration based on DNSPolicy.
+ DNSConfig *PodDNSConfigApplyConfiguration `json:"dnsConfig,omitempty"`
+ // If specified, all readiness gates will be evaluated for pod readiness.
+ // A pod is ready when all its containers are ready AND
+ // all conditions specified in the readiness gates have status equal to "True"
+ // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ ReadinessGates []PodReadinessGateApplyConfiguration `json:"readinessGates,omitempty"`
+ // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ // empty definition that uses the default runtime handler.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ RuntimeClassName *string `json:"runtimeClassName,omitempty"`
+ // EnableServiceLinks indicates whether information about services should be injected into pod's
+ // environment variables, matching the syntax of Docker links.
+ // Optional: Defaults to true.
+ EnableServiceLinks *bool `json:"enableServiceLinks,omitempty"`
+ // PreemptionPolicy is the Policy for preempting pods with lower priority.
+ // One of Never, PreemptLowerPriority.
+ // Defaults to PreemptLowerPriority if unset.
+ PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
+ // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ // This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ // The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ Overhead *corev1.ResourceList `json:"overhead,omitempty"`
+ // TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ // domains. Scheduler will schedule pods in a way which abides by the constraints.
+ // All topologySpreadConstraints are ANDed.
+ TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"`
+ // If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ // In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ // In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ // If a pod does not have FQDN, this has no effect.
+ // Default to false.
+ SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty"`
+ // Specifies the OS of the containers in the pod.
+ // Some pod and container fields are restricted if this is set.
+ //
+ // If the OS field is set to linux, the following fields must be unset:
+ // -securityContext.windowsOptions
+ //
+ // If the OS field is set to windows, following fields must be unset:
+ // - spec.hostPID
+ // - spec.hostIPC
+ // - spec.hostUsers
+ // - spec.resources
+ // - spec.securityContext.appArmorProfile
+ // - spec.securityContext.seLinuxOptions
+ // - spec.securityContext.seccompProfile
+ // - spec.securityContext.fsGroup
+ // - spec.securityContext.fsGroupChangePolicy
+ // - spec.securityContext.sysctls
+ // - spec.shareProcessNamespace
+ // - spec.securityContext.runAsUser
+ // - spec.securityContext.runAsGroup
+ // - spec.securityContext.supplementalGroups
+ // - spec.securityContext.supplementalGroupsPolicy
+ // - spec.containers[*].securityContext.appArmorProfile
+ // - spec.containers[*].securityContext.seLinuxOptions
+ // - spec.containers[*].securityContext.seccompProfile
+ // - spec.containers[*].securityContext.capabilities
+ // - spec.containers[*].securityContext.readOnlyRootFilesystem
+ // - spec.containers[*].securityContext.privileged
+ // - spec.containers[*].securityContext.allowPrivilegeEscalation
+ // - spec.containers[*].securityContext.procMount
+ // - spec.containers[*].securityContext.runAsUser
+ // - spec.containers[*].securityContext.runAsGroup
+ OS *PodOSApplyConfiguration `json:"os,omitempty"`
+ // Use the host's user namespace.
+ // Optional: Default to true.
+ // If set to true or not present, the pod will be run in the host user namespace, useful
+ // for when the pod needs a feature only available to the host user namespace, such as
+ // loading a kernel module with CAP_SYS_MODULE.
+ // When set to false, a new userns is created for the pod. Setting false is useful for
+ // mitigating container breakout vulnerabilities even allowing users to run their
+ // containers as root without actually having root privileges on the host.
+ // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ HostUsers *bool `json:"hostUsers,omitempty"`
+ // SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ // scheduler will not attempt to schedule the pod.
+ //
+ // SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+ SchedulingGates []PodSchedulingGateApplyConfiguration `json:"schedulingGates,omitempty"`
+ // ResourceClaims defines which ResourceClaims must be allocated
+ // and reserved before the Pod is allowed to start. The resources
+ // will be made available to those containers which consume them
+ // by name.
+ //
+ // This is a stable field but requires that the
+ // DynamicResourceAllocation feature gate is enabled.
+ //
+ // This field is immutable.
+ ResourceClaims []PodResourceClaimApplyConfiguration `json:"resourceClaims,omitempty"`
+ // Resources is the total amount of CPU and Memory resources required by all
+ // containers in the pod. It supports specifying Requests and Limits for
+ // "cpu", "memory" and "hugepages-" resource names only. ResourceClaims are not supported.
+ //
+ // This field enables fine-grained control over resource allocation for the
+ // entire pod, allowing resource sharing among containers in a pod.
+ // TODO: For beta graduation, expand this comment with a detailed explanation.
+ //
+ // This is an alpha field and requires enabling the PodLevelResources feature
+ // gate.
+ Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
+ // HostnameOverride specifies an explicit override for the pod's hostname as perceived by the pod.
+ // This field only specifies the pod's hostname and does not affect its DNS records.
+ // When this field is set to a non-empty string:
+ // - It takes precedence over the values set in `hostname` and `subdomain`.
+ // - The Pod's hostname will be set to this value.
+ // - `setHostnameAsFQDN` must be nil or set to false.
+ // - `hostNetwork` must be set to false.
+ //
+ // This field must be a valid DNS subdomain as defined in RFC 1123 and contain at most 64 characters.
+ // Requires the HostnameOverride feature gate to be enabled.
+ HostnameOverride *string `json:"hostnameOverride,omitempty"`
+ // WorkloadRef provides a reference to the Workload object that this Pod belongs to.
+ // This field is used by the scheduler to identify the PodGroup and apply the
+ // correct group scheduling policies. The Workload object referenced
+ // by this field may not exist at the time the Pod is created.
+ // This field is immutable, but a Workload object with the same name
+ // may be recreated with different policies. Doing this during pod scheduling
+ // may result in the placement not conforming to the expected policies.
+ WorkloadRef *WorkloadReferenceApplyConfiguration `json:"workloadRef,omitempty"`
}
// PodSpecApplyConfiguration constructs a declarative configuration of the PodSpec type for use with
@@ -462,3 +677,11 @@ func (b *PodSpecApplyConfiguration) WithHostnameOverride(value string) *PodSpecA
b.HostnameOverride = &value
return b
}
+
+// WithWorkloadRef sets the WorkloadRef field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the WorkloadRef field is set to the value of the last call.
+func (b *PodSpecApplyConfiguration) WithWorkloadRef(value *WorkloadReferenceApplyConfiguration) *PodSpecApplyConfiguration {
+ b.WorkloadRef = value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
index 4e643420..e50ca933 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podstatus.go
@@ -25,25 +25,120 @@ import (
// PodStatusApplyConfiguration represents a declarative configuration of the PodStatus type for use
// with apply.
+//
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
type PodStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Phase *corev1.PodPhase `json:"phase,omitempty"`
- Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"`
- Message *string `json:"message,omitempty"`
- Reason *string `json:"reason,omitempty"`
- NominatedNodeName *string `json:"nominatedNodeName,omitempty"`
- HostIP *string `json:"hostIP,omitempty"`
- HostIPs []HostIPApplyConfiguration `json:"hostIPs,omitempty"`
- PodIP *string `json:"podIP,omitempty"`
- PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"`
- StartTime *metav1.Time `json:"startTime,omitempty"`
- InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"`
- ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"`
- QOSClass *corev1.PodQOSClass `json:"qosClass,omitempty"`
- EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"`
- Resize *corev1.PodResizeStatus `json:"resize,omitempty"`
- ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"`
+ // If set, this represents the .metadata.generation that the pod status was set based upon.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+ // The conditions array, the reason and message fields, and the individual container status
+ // arrays contain more detail about the pod's status.
+ // There are five possible phase values:
+ //
+ // Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+ // container images has not been created. This includes time before being scheduled as
+ // well as time spent downloading images over the network, which could take a while.
+ // Running: The pod has been bound to a node, and all of the containers have been created.
+ // At least one container is still running, or is in the process of starting or restarting.
+ // Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+ // Failed: All containers in the pod have terminated, and at least one container has
+ // terminated in failure. The container either exited with non-zero status or was terminated
+ // by the system.
+ // Unknown: For some reason the state of the pod could not be obtained, typically due to an
+ // error in communicating with the host of the pod.
+ //
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
+ Phase *corev1.PodPhase `json:"phase,omitempty"`
+ // Current service state of pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+ Conditions []PodConditionApplyConfiguration `json:"conditions,omitempty"`
+ // A human readable message indicating details about why the pod is in this condition.
+ Message *string `json:"message,omitempty"`
+ // A brief CamelCase message indicating details about why the pod is in this state.
+ // e.g. 'Evicted'
+ Reason *string `json:"reason,omitempty"`
+ // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
+ // scheduled right away as preemption victims receive their graceful termination periods.
+ // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
+ // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
+ // give the resources on this node to a higher priority pod that is created after preemption.
+ // As a result, this field may be different than PodSpec.nodeName when the pod is
+ // scheduled.
+ NominatedNodeName *string `json:"nominatedNodeName,omitempty"`
+ // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+ // not be updated even if there is a node is assigned to pod
+ HostIP *string `json:"hostIP,omitempty"`
+ // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must
+ // match the hostIP field. This list is empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will
+ // not be updated even if there is a node is assigned to this pod.
+ HostIPs []HostIPApplyConfiguration `json:"hostIPs,omitempty"`
+ // podIP address allocated to the pod. Routable at least within the cluster.
+ // Empty if not yet allocated.
+ PodIP *string `json:"podIP,omitempty"`
+ // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
+ // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
+ // is empty if no IPs have been allocated yet.
+ PodIPs []PodIPApplyConfiguration `json:"podIPs,omitempty"`
+ // RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+ // This is before the Kubelet pulled the container image(s) for the pod.
+ StartTime *metav1.Time `json:"startTime,omitempty"`
+ // Statuses of init containers in this pod. The most recent successful non-restartable
+ // init container will have ready = true, the most recently started container will have
+ // startTime set.
+ // Each init container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
+ InitContainerStatuses []ContainerStatusApplyConfiguration `json:"initContainerStatuses,omitempty"`
+ // Statuses of containers in this pod.
+ // Each container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+ ContainerStatuses []ContainerStatusApplyConfiguration `json:"containerStatuses,omitempty"`
+ // The Quality of Service (QOS) classification assigned to the pod based on resource requirements
+ // See PodQOSClass type for available QOS classes
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes
+ QOSClass *corev1.PodQOSClass `json:"qosClass,omitempty"`
+ // Statuses for any ephemeral containers that have run in this pod.
+ // Each ephemeral container in the pod should have at most one status in this list,
+ // and all statuses should be for containers in the pod.
+ // However this is not enforced.
+ // If a status for a non-existent container is present in the list, or the list has duplicate names,
+ // the behavior of various Kubernetes components is not defined and those statuses might be
+ // ignored.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+ EphemeralContainerStatuses []ContainerStatusApplyConfiguration `json:"ephemeralContainerStatuses,omitempty"`
+ // Status of resources resize desired for pod's containers.
+ // It is empty if no resources resize is pending.
+ // Any changes to container resources will automatically set this to "Proposed"
+ // Deprecated: Resize status is moved to two pod conditions PodResizePending and PodResizeInProgress.
+ // PodResizePending will track states where the spec has been resized, but the Kubelet has not yet allocated the resources.
+ // PodResizeInProgress will track in-progress resizes, and should be present whenever allocated resources != acknowledged resources.
+ Resize *corev1.PodResizeStatus `json:"resize,omitempty"`
+ // Status of resource claims.
+ ResourceClaimStatuses []PodResourceClaimStatusApplyConfiguration `json:"resourceClaimStatuses,omitempty"`
+ // Status of extended resource claim backed by DRA.
ExtendedResourceClaimStatus *PodExtendedResourceClaimStatusApplyConfiguration `json:"extendedResourceClaimStatus,omitempty"`
+ // AllocatedResources is the total requests allocated for this pod by the node.
+ // If pod-level requests are not set, this will be the total requests aggregated
+ // across containers in the pod.
+ AllocatedResources *corev1.ResourceList `json:"allocatedResources,omitempty"`
+ // Resources represents the compute resource requests and limits that have been
+ // applied at the pod level if pod-level requests or limits are set in
+ // PodSpec.Resources
+ Resources *ResourceRequirementsApplyConfiguration `json:"resources,omitempty"`
}
// PodStatusApplyConfiguration constructs a declarative configuration of the PodStatus type for use with
@@ -230,3 +325,19 @@ func (b *PodStatusApplyConfiguration) WithExtendedResourceClaimStatus(value *Pod
b.ExtendedResourceClaimStatus = value
return b
}
+
+// WithAllocatedResources sets the AllocatedResources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AllocatedResources field is set to the value of the last call.
+func (b *PodStatusApplyConfiguration) WithAllocatedResources(value corev1.ResourceList) *PodStatusApplyConfiguration {
+ b.AllocatedResources = &value
+ return b
+}
+
+// WithResources sets the Resources field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Resources field is set to the value of the last call.
+func (b *PodStatusApplyConfiguration) WithResources(value *ResourceRequirementsApplyConfiguration) *PodStatusApplyConfiguration {
+ b.Resources = value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
index e723125f..a4a2e300 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
@@ -29,10 +29,16 @@ import (
// PodTemplateApplyConfiguration represents a declarative configuration of the PodTemplate type for use
// with apply.
+//
+// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplateApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // Template defines the pods that will be created from this pod template.
+ // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
// PodTemplate constructs a declarative configuration of the PodTemplate type for use with
@@ -46,29 +52,14 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration {
return b
}
-// ExtractPodTemplate extracts the applied configuration owned by fieldManager from
-// podTemplate. If no managedFields are found in podTemplate for fieldManager, a
-// PodTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractPodTemplateFrom extracts the applied configuration owned by fieldManager from
+// podTemplate for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// podTemplate must be a unmodified PodTemplate API object that was retrieved from the Kubernetes API.
-// ExtractPodTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractPodTemplateFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
- return extractPodTemplate(podTemplate, fieldManager, "")
-}
-
-// ExtractPodTemplateStatus is the same as ExtractPodTemplate except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractPodTemplateStatus(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
- return extractPodTemplate(podTemplate, fieldManager, "status")
-}
-
-func extractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) {
+func ExtractPodTemplateFrom(podTemplate *corev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) {
b := &PodTemplateApplyConfiguration{}
err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string, su
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractPodTemplate extracts the applied configuration owned by fieldManager from
+// podTemplate. If no managedFields are found in podTemplate for fieldManager, a
+// PodTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// podTemplate must be a unmodified PodTemplate API object that was retrieved from the Kubernetes API.
+// ExtractPodTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPodTemplate(podTemplate *corev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
+ return ExtractPodTemplateFrom(podTemplate, fieldManager, "")
+}
+
func (b PodTemplateApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
index 9aa83092..6e87db62 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplatespec.go
@@ -26,9 +26,15 @@ import (
// PodTemplateSpecApplyConfiguration represents a declarative configuration of the PodTemplateSpec type for use
// with apply.
+//
+// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpecApplyConfiguration struct {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior of the pod.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *PodSpecApplyConfiguration `json:"spec,omitempty"`
}
// PodTemplateSpecApplyConfiguration constructs a declarative configuration of the PodTemplateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
index eff8fc2a..8eb36341 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portstatus.go
@@ -24,10 +24,23 @@ import (
// PortStatusApplyConfiguration represents a declarative configuration of the PortStatus type for use
// with apply.
+//
+// PortStatus represents the error condition of a service port
type PortStatusApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
+ // Port is the port number of the service port of which status is recorded here
+ Port *int32 `json:"port,omitempty"`
+ // Protocol is the protocol of the service port of which status is recorded here
+ // The supported values are: "TCP", "UDP", "SCTP"
Protocol *corev1.Protocol `json:"protocol,omitempty"`
- Error *string `json:"error,omitempty"`
+ // Error is to record the problem with the service port
+ // The format of the error shall comply with the following rules:
+ // - built-in error values shall be specified in this file and those shall use
+ // CamelCase names
+ // - cloud provider specific error values must have names that comply with the
+ // format foo.example.com/CamelCase.
+ // ---
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ Error *string `json:"error,omitempty"`
}
// PortStatusApplyConfiguration constructs a declarative configuration of the PortStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go
index 29715e02..56948168 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/portworxvolumesource.go
@@ -20,10 +20,18 @@ package v1
// PortworxVolumeSourceApplyConfiguration represents a declarative configuration of the PortworxVolumeSource type for use
// with apply.
+//
+// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSourceApplyConfiguration struct {
+ // volumeID uniquely identifies a Portworx volume
VolumeID *string `json:"volumeID,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // fSType represents the filesystem type to mount
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// PortworxVolumeSourceApplyConfiguration constructs a declarative configuration of the PortworxVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go
index b88a3646..db8d1512 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/preferredschedulingterm.go
@@ -20,8 +20,13 @@ package v1
// PreferredSchedulingTermApplyConfiguration represents a declarative configuration of the PreferredSchedulingTerm type for use
// with apply.
+//
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTermApplyConfiguration struct {
- Weight *int32 `json:"weight,omitempty"`
+ // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ Weight *int32 `json:"weight,omitempty"`
+ // A node selector term, associated with the corresponding weight.
Preference *NodeSelectorTermApplyConfiguration `json:"preference,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
index d6c65468..e6b3ddd7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probe.go
@@ -20,14 +20,39 @@ package v1
// ProbeApplyConfiguration represents a declarative configuration of the Probe type for use
// with apply.
+//
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
type ProbeApplyConfiguration struct {
+ // The action taken to determine the health of a container
ProbeHandlerApplyConfiguration `json:",inline"`
- InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"`
- TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
- PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
- SuccessThreshold *int32 `json:"successThreshold,omitempty"`
- FailureThreshold *int32 `json:"failureThreshold,omitempty"`
- TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
+ // Number of seconds after the container has started before liveness probes are initiated.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"`
+ // Number of seconds after which the probe times out.
+ // Defaults to 1 second. Minimum value is 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
+ // How often (in seconds) to perform the probe.
+ // Default to 10 seconds. Minimum value is 1.
+ PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
+ // Minimum consecutive successes for the probe to be considered successful after having failed.
+ // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ SuccessThreshold *int32 `json:"successThreshold,omitempty"`
+ // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ // Defaults to 3. Minimum value is 1.
+ FailureThreshold *int32 `json:"failureThreshold,omitempty"`
+ // Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ // The grace period is the duration in seconds after the processes running in the pod are sent
+ // a termination signal and the time when the processes are forcibly halted with a kill signal.
+ // Set this value longer than the expected cleanup time for your process.
+ // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ // value overrides the value provided by the pod spec.
+ // Value must be non-negative integer. The value zero indicates stop immediately via
+ // the kill signal (no opportunity to shut down).
+ // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
}
// ProbeApplyConfiguration constructs a declarative configuration of the Probe type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go
index 1f88745e..58fce860 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/probehandler.go
@@ -20,11 +20,18 @@ package v1
// ProbeHandlerApplyConfiguration represents a declarative configuration of the ProbeHandler type for use
// with apply.
+//
+// ProbeHandler defines a specific action that should be taken in a probe.
+// One and only one of the fields must be specified.
type ProbeHandlerApplyConfiguration struct {
- Exec *ExecActionApplyConfiguration `json:"exec,omitempty"`
- HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"`
+ // Exec specifies a command to execute in the container.
+ Exec *ExecActionApplyConfiguration `json:"exec,omitempty"`
+ // HTTPGet specifies an HTTP GET request to perform.
+ HTTPGet *HTTPGetActionApplyConfiguration `json:"httpGet,omitempty"`
+ // TCPSocket specifies a connection to a TCP port.
TCPSocket *TCPSocketActionApplyConfiguration `json:"tcpSocket,omitempty"`
- GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"`
+ // GRPC specifies a GRPC HealthCheckRequest.
+ GRPC *GRPCActionApplyConfiguration `json:"grpc,omitempty"`
}
// ProbeHandlerApplyConfiguration constructs a declarative configuration of the ProbeHandler type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go
index c922ec8c..a17e4d1d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/projectedvolumesource.go
@@ -20,9 +20,19 @@ package v1
// ProjectedVolumeSourceApplyConfiguration represents a declarative configuration of the ProjectedVolumeSource type for use
// with apply.
+//
+// Represents a projected volume source
type ProjectedVolumeSourceApplyConfiguration struct {
- Sources []VolumeProjectionApplyConfiguration `json:"sources,omitempty"`
- DefaultMode *int32 `json:"defaultMode,omitempty"`
+ // sources is the list of volume projections. Each entry in this list
+ // handles one source.
+ Sources []VolumeProjectionApplyConfiguration `json:"sources,omitempty"`
+ // defaultMode are the mode bits used to set permissions on created files by default.
+ // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ // Directories within the path are not affected by this setting.
+ // This might be in conflict with other options that affect the file
+ // mode, like fsGroup, and the result can be other mode bits set.
+ DefaultMode *int32 `json:"defaultMode,omitempty"`
}
// ProjectedVolumeSourceApplyConfiguration constructs a declarative configuration of the ProjectedVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go
index 9a042a0a..b9ac1ba0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/quobytevolumesource.go
@@ -20,13 +20,28 @@ package v1
// QuobyteVolumeSourceApplyConfiguration represents a declarative configuration of the QuobyteVolumeSource type for use
// with apply.
+//
+// Represents a Quobyte mount that lasts the lifetime of a pod.
+// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSourceApplyConfiguration struct {
+ // registry represents a single or multiple Quobyte Registry services
+ // specified as a string as host:port pair (multiple entries are separated with commas)
+ // which acts as the central registry for volumes
Registry *string `json:"registry,omitempty"`
- Volume *string `json:"volume,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- User *string `json:"user,omitempty"`
- Group *string `json:"group,omitempty"`
- Tenant *string `json:"tenant,omitempty"`
+ // volume is a string that references an already created Quobyte volume by name.
+ Volume *string `json:"volume,omitempty"`
+ // readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ // Defaults to false.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // user to map volume access to
+ // Defaults to serivceaccount user
+ User *string `json:"user,omitempty"`
+ // group to map volume access to
+ // Default is no group
+ Group *string `json:"group,omitempty"`
+ // tenant owning the given Quobyte volume in the Backend
+ // Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ Tenant *string `json:"tenant,omitempty"`
}
// QuobyteVolumeSourceApplyConfiguration constructs a declarative configuration of the QuobyteVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go
index 64f25724..e1f0960e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdpersistentvolumesource.go
@@ -20,15 +20,43 @@ package v1
// RBDPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the RBDPersistentVolumeSource type for use
// with apply.
+//
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSourceApplyConfiguration struct {
- CephMonitors []string `json:"monitors,omitempty"`
- RBDImage *string `json:"image,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- RBDPool *string `json:"pool,omitempty"`
- RadosUser *string `json:"user,omitempty"`
- Keyring *string `json:"keyring,omitempty"`
- SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // monitors is a collection of Ceph monitors.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ CephMonitors []string `json:"monitors,omitempty"`
+ // image is the rados image name.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ RBDImage *string `json:"image,omitempty"`
+ // fsType is the filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // pool is the rados pool name.
+ // Default is rbd.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ RBDPool *string `json:"pool,omitempty"`
+ // user is the rados user name.
+ // Default is admin.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ RadosUser *string `json:"user,omitempty"`
+ // keyring is the path to key ring for RBDUser.
+ // Default is /etc/ceph/keyring.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ Keyring *string `json:"keyring,omitempty"`
+ // secretRef is name of the authentication secret for RBDUser. If provided
+ // overrides keyring.
+ // Default is nil.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // readOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// RBDPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go
index 8dae198c..907bcedb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/rbdvolumesource.go
@@ -20,15 +20,43 @@ package v1
// RBDVolumeSourceApplyConfiguration represents a declarative configuration of the RBDVolumeSource type for use
// with apply.
+//
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSourceApplyConfiguration struct {
- CephMonitors []string `json:"monitors,omitempty"`
- RBDImage *string `json:"image,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- RBDPool *string `json:"pool,omitempty"`
- RadosUser *string `json:"user,omitempty"`
- Keyring *string `json:"keyring,omitempty"`
- SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // monitors is a collection of Ceph monitors.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ CephMonitors []string `json:"monitors,omitempty"`
+ // image is the rados image name.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ RBDImage *string `json:"image,omitempty"`
+ // fsType is the filesystem type of the volume that you want to mount.
+ // Tip: Ensure that the filesystem type is supported by the host operating system.
+ // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ // TODO: how do we prevent errors in the filesystem from compromising the machine
+ FSType *string `json:"fsType,omitempty"`
+ // pool is the rados pool name.
+ // Default is rbd.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ RBDPool *string `json:"pool,omitempty"`
+ // user is the rados user name.
+ // Default is admin.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ RadosUser *string `json:"user,omitempty"`
+ // keyring is the path to key ring for RBDUser.
+ // Default is /etc/ceph/keyring.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ Keyring *string `json:"keyring,omitempty"`
+ // secretRef is name of the authentication secret for RBDUser. If provided
+ // overrides keyring.
+ // Default is nil.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // readOnly here will force the ReadOnly setting in VolumeMounts.
+ // Defaults to false.
+ // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// RBDVolumeSourceApplyConfiguration constructs a declarative configuration of the RBDVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
index 6b06c290..a6416c4b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
@@ -29,11 +29,23 @@ import (
// ReplicationControllerApplyConfiguration represents a declarative configuration of the ReplicationController type for use
// with apply.
+//
+// ReplicationController represents the configuration of a replication controller.
type ReplicationControllerApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // If the Labels of a ReplicationController are empty, they are defaulted to
+ // be the same as the Pod(s) that the replication controller manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the specification of the desired behavior of the replication controller.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ReplicationControllerSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the most recently observed status of the replication controller.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ReplicationControllerStatusApplyConfiguration `json:"status,omitempty"`
}
// ReplicationController constructs a declarative configuration of the ReplicationController type for use with
@@ -47,6 +59,27 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo
return b
}
+// ExtractReplicationControllerFrom extracts the applied configuration owned by fieldManager from
+// replicationController for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// replicationController must be a unmodified ReplicationController API object that was retrieved from the Kubernetes API.
+// ExtractReplicationControllerFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractReplicationControllerFrom(replicationController *corev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) {
+ b := &ReplicationControllerApplyConfiguration{}
+ err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(replicationController.Name)
+ b.WithNamespace(replicationController.Namespace)
+
+ b.WithKind("ReplicationController")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractReplicationController extracts the applied configuration owned by fieldManager from
// replicationController. If no managedFields are found in replicationController for fieldManager, a
// ReplicationControllerApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +90,22 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo
// ExtractReplicationController provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractReplicationController(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
- return extractReplicationController(replicationController, fieldManager, "")
+ return ExtractReplicationControllerFrom(replicationController, fieldManager, "")
}
-// ExtractReplicationControllerStatus is the same as ExtractReplicationController except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractReplicationControllerStatus(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
- return extractReplicationController(replicationController, fieldManager, "status")
+// ExtractReplicationControllerScale extracts the applied configuration owned by fieldManager from
+// replicationController for the scale subresource.
+func ExtractReplicationControllerScale(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
+ return ExtractReplicationControllerFrom(replicationController, fieldManager, "scale")
}
-func extractReplicationController(replicationController *corev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) {
- b := &ReplicationControllerApplyConfiguration{}
- err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(replicationController.Name)
- b.WithNamespace(replicationController.Namespace)
-
- b.WithKind("ReplicationController")
- b.WithAPIVersion("v1")
- return b, nil
+// ExtractReplicationControllerStatus extracts the applied configuration owned by fieldManager from
+// replicationController for the status subresource.
+func ExtractReplicationControllerStatus(replicationController *corev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
+ return ExtractReplicationControllerFrom(replicationController, fieldManager, "status")
}
+
func (b ReplicationControllerApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
index dfcecc05..72808685 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollercondition.go
@@ -25,12 +25,19 @@ import (
// ReplicationControllerConditionApplyConfiguration represents a declarative configuration of the ReplicationControllerCondition type for use
// with apply.
+//
+// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerConditionApplyConfiguration struct {
- Type *corev1.ReplicationControllerConditionType `json:"type,omitempty"`
- Status *corev1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of replication controller condition.
+ Type *corev1.ReplicationControllerConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *corev1.ConditionStatus `json:"status,omitempty"`
+ // The last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// ReplicationControllerConditionApplyConfiguration constructs a declarative configuration of the ReplicationControllerCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go
index 07bac9f4..e556b829 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerspec.go
@@ -20,11 +20,29 @@ package v1
// ReplicationControllerSpecApplyConfiguration represents a declarative configuration of the ReplicationControllerSpec type for use
// with apply.
+//
+// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- Selector map[string]string `json:"selector,omitempty"`
- Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // Replicas is the number of desired replicas.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // Selector is a label query over pods that should match the Replicas count.
+ // If Selector is empty, it is defaulted to the labels present on the Pod template.
+ // Label keys and values that must match in order to be controlled by this replication
+ // controller, if empty defaulted to labels on Pod template.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector map[string]string `json:"selector,omitempty"`
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected. This takes precedence over a TemplateRef.
+ // The only allowed template.spec.restartPolicy value is "Always".
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ Template *PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
// ReplicationControllerSpecApplyConfiguration constructs a declarative configuration of the ReplicationControllerSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go
index c8046aa5..607d543f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontrollerstatus.go
@@ -20,13 +20,23 @@ package v1
// ReplicationControllerStatusApplyConfiguration represents a declarative configuration of the ReplicationControllerStatus type for use
// with apply.
+//
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
type ReplicationControllerStatusApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Conditions []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Replicas is the most recently observed number of replicas.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+ Replicas *int32 `json:"replicas,omitempty"`
+ // The number of pods that have labels matching the labels of the pod template of the replication controller.
+ FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
+ // The number of ready replicas for this replication controller.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // The number of available replicas (ready for at least minReadySeconds) for this replication controller.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // ObservedGeneration reflects the generation of the most recently observed replication controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Represents the latest available observations of a replication controller's current state.
+ Conditions []ReplicationControllerConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ReplicationControllerStatusApplyConfiguration constructs a declarative configuration of the ReplicationControllerStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go
index b00c6924..1c283d01 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourceclaim.go
@@ -20,8 +20,16 @@ package v1
// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
// with apply.
+//
+// ResourceClaim references one entry in PodSpec.ResourceClaims.
type ResourceClaimApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name must match the name of one entry in pod.spec.resourceClaims of
+ // the Pod where this field is used. It makes that resource available
+ // inside a container.
+ Name *string `json:"name,omitempty"`
+ // Request is the name chosen for a request in the referenced claim.
+ // If empty, everything from the claim is made available, otherwise
+ // only the result of this request.
Request *string `json:"request,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go
index 1b4918a6..ebff4d2b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcefieldselector.go
@@ -24,10 +24,15 @@ import (
// ResourceFieldSelectorApplyConfiguration represents a declarative configuration of the ResourceFieldSelector type for use
// with apply.
+//
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelectorApplyConfiguration struct {
- ContainerName *string `json:"containerName,omitempty"`
- Resource *string `json:"resource,omitempty"`
- Divisor *resource.Quantity `json:"divisor,omitempty"`
+ // Container name: required for volumes, optional for env vars
+ ContainerName *string `json:"containerName,omitempty"`
+ // Required: resource to select
+ Resource *string `json:"resource,omitempty"`
+ // Specifies the output format of the exposed resources, defaults to "1"
+ Divisor *resource.Quantity `json:"divisor,omitempty"`
}
// ResourceFieldSelectorApplyConfiguration constructs a declarative configuration of the ResourceFieldSelector type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
index 0338780b..684153c4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go
@@ -24,9 +24,23 @@ import (
// ResourceHealthApplyConfiguration represents a declarative configuration of the ResourceHealth type for use
// with apply.
+//
+// ResourceHealth represents the health of a resource. It has the latest device health information.
+// This is a part of KEP https://kep.k8s.io/4680.
type ResourceHealthApplyConfiguration struct {
- ResourceID *corev1.ResourceID `json:"resourceID,omitempty"`
- Health *corev1.ResourceHealthStatus `json:"health,omitempty"`
+ // ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
+ ResourceID *corev1.ResourceID `json:"resourceID,omitempty"`
+ // Health of the resource.
+ // can be one of:
+ // - Healthy: operates as normal
+ // - Unhealthy: reported unhealthy. We consider this a temporary health issue
+ // since we do not have a mechanism today to distinguish
+ // temporary and permanent issues.
+ // - Unknown: The status cannot be determined.
+ // For example, Device Plugin got unregistered and hasn't been re-registered since.
+ //
+ // In future we may want to introduce the PermanentlyUnhealthy Status.
+ Health *corev1.ResourceHealthStatus `json:"health,omitempty"`
}
// ResourceHealthApplyConfiguration constructs a declarative configuration of the ResourceHealth type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
index 7abe77b2..6489d292 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
@@ -29,11 +29,19 @@ import (
// ResourceQuotaApplyConfiguration represents a declarative configuration of the ResourceQuota type for use
// with apply.
+//
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuotaApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the desired quota.
+ // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ResourceQuotaSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status defines the actual enforced quota and its current usage.
+ // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ResourceQuotaStatusApplyConfiguration `json:"status,omitempty"`
}
// ResourceQuota constructs a declarative configuration of the ResourceQuota type for use with
@@ -47,6 +55,27 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration {
return b
}
+// ExtractResourceQuotaFrom extracts the applied configuration owned by fieldManager from
+// resourceQuota for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// resourceQuota must be a unmodified ResourceQuota API object that was retrieved from the Kubernetes API.
+// ExtractResourceQuotaFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceQuotaFrom(resourceQuota *corev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) {
+ b := &ResourceQuotaApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(resourceQuota.Name)
+ b.WithNamespace(resourceQuota.Namespace)
+
+ b.WithKind("ResourceQuota")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractResourceQuota extracts the applied configuration owned by fieldManager from
// resourceQuota. If no managedFields are found in resourceQuota for fieldManager, a
// ResourceQuotaApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +86,16 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration {
// ExtractResourceQuota provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
- return extractResourceQuota(resourceQuota, fieldManager, "")
+ return ExtractResourceQuotaFrom(resourceQuota, fieldManager, "")
}
-// ExtractResourceQuotaStatus is the same as ExtractResourceQuota except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractResourceQuotaStatus extracts the applied configuration owned by fieldManager from
+// resourceQuota for the status subresource.
func ExtractResourceQuotaStatus(resourceQuota *corev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
- return extractResourceQuota(resourceQuota, fieldManager, "status")
+ return ExtractResourceQuotaFrom(resourceQuota, fieldManager, "status")
}
-func extractResourceQuota(resourceQuota *corev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) {
- b := &ResourceQuotaApplyConfiguration{}
- err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(resourceQuota.Name)
- b.WithNamespace(resourceQuota.Namespace)
-
- b.WithKind("ResourceQuota")
- b.WithAPIVersion("v1")
- return b, nil
-}
func (b ResourceQuotaApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
index 36d342fc..a08e4b73 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotaspec.go
@@ -24,9 +24,18 @@ import (
// ResourceQuotaSpecApplyConfiguration represents a declarative configuration of the ResourceQuotaSpec type for use
// with apply.
+//
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpecApplyConfiguration struct {
- Hard *corev1.ResourceList `json:"hard,omitempty"`
- Scopes []corev1.ResourceQuotaScope `json:"scopes,omitempty"`
+ // hard is the set of desired hard limits for each named resource.
+ // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+ Hard *corev1.ResourceList `json:"hard,omitempty"`
+ // A collection of filters that must match each object tracked by a quota.
+ // If not specified, the quota matches all objects.
+ Scopes []corev1.ResourceQuotaScope `json:"scopes,omitempty"`
+ // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+ // but expressed using ScopeSelectorOperator in combination with possible values.
+ // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
ScopeSelector *ScopeSelectorApplyConfiguration `json:"scopeSelector,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
index 6338a130..4d7faf84 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequotastatus.go
@@ -24,8 +24,13 @@ import (
// ResourceQuotaStatusApplyConfiguration represents a declarative configuration of the ResourceQuotaStatus type for use
// with apply.
+//
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatusApplyConfiguration struct {
+ // Hard is the set of enforced hard limits for each named resource.
+ // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Hard *corev1.ResourceList `json:"hard,omitempty"`
+ // Used is the current observed total usage of the resource in the namespace.
Used *corev1.ResourceList `json:"used,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
index ea77647a..4ba93df1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcerequirements.go
@@ -24,10 +24,25 @@ import (
// ResourceRequirementsApplyConfiguration represents a declarative configuration of the ResourceRequirements type for use
// with apply.
+//
+// ResourceRequirements describes the compute resource requirements.
type ResourceRequirementsApplyConfiguration struct {
- Limits *corev1.ResourceList `json:"limits,omitempty"`
- Requests *corev1.ResourceList `json:"requests,omitempty"`
- Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"`
+ // Limits describes the maximum amount of compute resources allowed.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Limits *corev1.ResourceList `json:"limits,omitempty"`
+ // Requests describes the minimum amount of compute resources required.
+ // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ // otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Requests *corev1.ResourceList `json:"requests,omitempty"`
+ // Claims lists the names of resources, defined in spec.resourceClaims,
+ // that are used by this container.
+ //
+ // This field depends on the
+ // DynamicResourceAllocation feature gate.
+ //
+ // This field is immutable. It can only be set for containers.
+ Claims []ResourceClaimApplyConfiguration `json:"claims,omitempty"`
}
// ResourceRequirementsApplyConfiguration constructs a declarative configuration of the ResourceRequirements type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
index e9958665..511164e7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcestatus.go
@@ -24,8 +24,17 @@ import (
// ResourceStatusApplyConfiguration represents a declarative configuration of the ResourceStatus type for use
// with apply.
+//
+// ResourceStatus represents the status of a single resource allocated to a Pod.
type ResourceStatusApplyConfiguration struct {
- Name *corev1.ResourceName `json:"name,omitempty"`
+ // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
+ // For DRA resources, the value must be "claim:/".
+ // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
+ Name *corev1.ResourceName `json:"name,omitempty"`
+ // List of unique resources health. Each element in the list contains an unique resource ID and its health.
+ // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
+ // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
+ // See ResourceID type definition for a specific format it has in various use cases.
Resources []ResourceHealthApplyConfiguration `json:"resources,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go
index b07f46de..4fcb35ed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiopersistentvolumesource.go
@@ -20,17 +20,36 @@ package v1
// ScaleIOPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOPersistentVolumeSource type for use
// with apply.
+//
+// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSourceApplyConfiguration struct {
- Gateway *string `json:"gateway,omitempty"`
- System *string `json:"system,omitempty"`
- SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
- SSLEnabled *bool `json:"sslEnabled,omitempty"`
- ProtectionDomain *string `json:"protectionDomain,omitempty"`
- StoragePool *string `json:"storagePool,omitempty"`
- StorageMode *string `json:"storageMode,omitempty"`
- VolumeName *string `json:"volumeName,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // gateway is the host address of the ScaleIO API Gateway.
+ Gateway *string `json:"gateway,omitempty"`
+ // system is the name of the storage system as configured in ScaleIO.
+ System *string `json:"system,omitempty"`
+ // secretRef references to the secret for ScaleIO user and other
+ // sensitive information. If this is not provided, Login operation will fail.
+ SecretRef *SecretReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // sslEnabled is the flag to enable/disable SSL communication with Gateway, default false
+ SSLEnabled *bool `json:"sslEnabled,omitempty"`
+ // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+ ProtectionDomain *string `json:"protectionDomain,omitempty"`
+ // storagePool is the ScaleIO Storage Pool associated with the protection domain.
+ StoragePool *string `json:"storagePool,omitempty"`
+ // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ // Default is ThinProvisioned.
+ StorageMode *string `json:"storageMode,omitempty"`
+ // volumeName is the name of a volume already created in the ScaleIO system
+ // that is associated with this volume source.
+ VolumeName *string `json:"volumeName,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs".
+ // Default is "xfs"
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// ScaleIOPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go
index 740c05eb..2a484851 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scaleiovolumesource.go
@@ -20,17 +20,36 @@ package v1
// ScaleIOVolumeSourceApplyConfiguration represents a declarative configuration of the ScaleIOVolumeSource type for use
// with apply.
+//
+// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSourceApplyConfiguration struct {
- Gateway *string `json:"gateway,omitempty"`
- System *string `json:"system,omitempty"`
- SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
- SSLEnabled *bool `json:"sslEnabled,omitempty"`
- ProtectionDomain *string `json:"protectionDomain,omitempty"`
- StoragePool *string `json:"storagePool,omitempty"`
- StorageMode *string `json:"storageMode,omitempty"`
- VolumeName *string `json:"volumeName,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // gateway is the host address of the ScaleIO API Gateway.
+ Gateway *string `json:"gateway,omitempty"`
+ // system is the name of the storage system as configured in ScaleIO.
+ System *string `json:"system,omitempty"`
+ // secretRef references to the secret for ScaleIO user and other
+ // sensitive information. If this is not provided, Login operation will fail.
+ SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // sslEnabled Flag enable/disable SSL communication with Gateway, default false
+ SSLEnabled *bool `json:"sslEnabled,omitempty"`
+ // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+ ProtectionDomain *string `json:"protectionDomain,omitempty"`
+ // storagePool is the ScaleIO Storage Pool associated with the protection domain.
+ StoragePool *string `json:"storagePool,omitempty"`
+ // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ // Default is ThinProvisioned.
+ StorageMode *string `json:"storageMode,omitempty"`
+ // volumeName is the name of a volume already created in the ScaleIO system
+ // that is associated with this volume source.
+ VolumeName *string `json:"volumeName,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs".
+ // Default is "xfs".
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly Defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
}
// ScaleIOVolumeSourceApplyConfiguration constructs a declarative configuration of the ScaleIOVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
index c2481f49..cae949e8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopedresourceselectorrequirement.go
@@ -24,10 +24,20 @@ import (
// ScopedResourceSelectorRequirementApplyConfiguration represents a declarative configuration of the ScopedResourceSelectorRequirement type for use
// with apply.
+//
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
type ScopedResourceSelectorRequirementApplyConfiguration struct {
- ScopeName *corev1.ResourceQuotaScope `json:"scopeName,omitempty"`
- Operator *corev1.ScopeSelectorOperator `json:"operator,omitempty"`
- Values []string `json:"values,omitempty"`
+ // The name of the scope that the selector applies to.
+ ScopeName *corev1.ResourceQuotaScope `json:"scopeName,omitempty"`
+ // Represents a scope's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists, DoesNotExist.
+ Operator *corev1.ScopeSelectorOperator `json:"operator,omitempty"`
+ // An array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty.
+ // This array is replaced during a strategic merge patch.
+ Values []string `json:"values,omitempty"`
}
// ScopedResourceSelectorRequirementApplyConfiguration constructs a declarative configuration of the ScopedResourceSelectorRequirement type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go
index a9fb9a1b..26c1eabf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/scopeselector.go
@@ -20,7 +20,11 @@ package v1
// ScopeSelectorApplyConfiguration represents a declarative configuration of the ScopeSelector type for use
// with apply.
+//
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
type ScopeSelectorApplyConfiguration struct {
+ // A list of scope selector requirements by scope of the resources.
MatchExpressions []ScopedResourceSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
index 754bfd1b..858ac80f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/seccompprofile.go
@@ -24,9 +24,22 @@ import (
// SeccompProfileApplyConfiguration represents a declarative configuration of the SeccompProfile type for use
// with apply.
+//
+// SeccompProfile defines a pod/container's seccomp profile settings.
+// Only one profile source may be set.
type SeccompProfileApplyConfiguration struct {
- Type *corev1.SeccompProfileType `json:"type,omitempty"`
- LocalhostProfile *string `json:"localhostProfile,omitempty"`
+ // type indicates which kind of seccomp profile will be applied.
+ // Valid options are:
+ //
+ // Localhost - a profile defined in a file on the node should be used.
+ // RuntimeDefault - the container runtime default profile should be used.
+ // Unconfined - no profile should be applied.
+ Type *corev1.SeccompProfileType `json:"type,omitempty"`
+ // localhostProfile indicates a profile defined in a file on the node should be used.
+ // The profile must be preconfigured on the node to work.
+ // Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ // Must be set if type is "Localhost". Must NOT be set for any other type.
+ LocalhostProfile *string `json:"localhostProfile,omitempty"`
}
// SeccompProfileApplyConfiguration constructs a declarative configuration of the SeccompProfile type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
index ff859d86..42be7793 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
@@ -29,13 +29,32 @@ import (
// SecretApplyConfiguration represents a declarative configuration of the Secret type for use
// with apply.
+//
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
type SecretApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Immutable *bool `json:"immutable,omitempty"`
- Data map[string][]byte `json:"data,omitempty"`
- StringData map[string]string `json:"stringData,omitempty"`
- Type *corev1.SecretType `json:"type,omitempty"`
+ // Immutable, if set to true, ensures that data stored in the Secret cannot
+ // be updated (only object metadata can be modified).
+ // If not set to true, the field can be modified at any time.
+ // Defaulted to nil.
+ Immutable *bool `json:"immutable,omitempty"`
+ // Data contains the secret data. Each key must consist of alphanumeric
+ // characters, '-', '_' or '.'. The serialized form of the secret data is a
+ // base64 encoded string, representing the arbitrary (possibly non-string)
+ // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
+ Data map[string][]byte `json:"data,omitempty"`
+ // stringData allows specifying non-binary secret data in string form.
+ // It is provided as a write-only input field for convenience.
+ // All keys and values are merged into the data field on write, overwriting any existing values.
+ // The stringData field is never output when reading from the API.
+ StringData map[string]string `json:"stringData,omitempty"`
+ // Used to facilitate programmatic handling of secret data.
+ // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
+ Type *corev1.SecretType `json:"type,omitempty"`
}
// Secret constructs a declarative configuration of the Secret type for use with
@@ -49,29 +68,14 @@ func Secret(name, namespace string) *SecretApplyConfiguration {
return b
}
-// ExtractSecret extracts the applied configuration owned by fieldManager from
-// secret. If no managedFields are found in secret for fieldManager, a
-// SecretApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractSecretFrom extracts the applied configuration owned by fieldManager from
+// secret for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// secret must be a unmodified Secret API object that was retrieved from the Kubernetes API.
-// ExtractSecret provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractSecretFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractSecret(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) {
- return extractSecret(secret, fieldManager, "")
-}
-
-// ExtractSecretStatus is the same as ExtractSecret except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractSecretStatus(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) {
- return extractSecret(secret, fieldManager, "status")
-}
-
-func extractSecret(secret *corev1.Secret, fieldManager string, subresource string) (*SecretApplyConfiguration, error) {
+func ExtractSecretFrom(secret *corev1.Secret, fieldManager string, subresource string) (*SecretApplyConfiguration, error) {
b := &SecretApplyConfiguration{}
err := managedfields.ExtractInto(secret, internal.Parser().Type("io.k8s.api.core.v1.Secret"), fieldManager, b, subresource)
if err != nil {
@@ -84,6 +88,21 @@ func extractSecret(secret *corev1.Secret, fieldManager string, subresource strin
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractSecret extracts the applied configuration owned by fieldManager from
+// secret. If no managedFields are found in secret for fieldManager, a
+// SecretApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// secret must be a unmodified Secret API object that was retrieved from the Kubernetes API.
+// ExtractSecret provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractSecret(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) {
+ return ExtractSecretFrom(secret, fieldManager, "")
+}
+
func (b SecretApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
index d3cc9f6a..21a6b752 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretenvsource.go
@@ -20,9 +20,17 @@ package v1
// SecretEnvSourceApplyConfiguration represents a declarative configuration of the SecretEnvSource type for use
// with apply.
+//
+// SecretEnvSource selects a Secret to populate the environment
+// variables with.
+//
+// The contents of the target Secret's Data field will represent the
+// key-value pairs as environment variables.
type SecretEnvSourceApplyConfiguration struct {
+ // The Secret to select from.
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Optional *bool `json:"optional,omitempty"`
+ // Specify whether the Secret must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// SecretEnvSourceApplyConfiguration constructs a declarative configuration of the SecretEnvSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
index f1cd8b2d..8e83bc81 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretkeyselector.go
@@ -20,10 +20,15 @@ package v1
// SecretKeySelectorApplyConfiguration represents a declarative configuration of the SecretKeySelector type for use
// with apply.
+//
+// SecretKeySelector selects a key of a Secret.
type SecretKeySelectorApplyConfiguration struct {
+ // The name of the secret in the pod's namespace to select from.
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Key *string `json:"key,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // The key of the secret to select from. Must be a valid secret key.
+ Key *string `json:"key,omitempty"`
+ // Specify whether the Secret or its key must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// SecretKeySelectorApplyConfiguration constructs a declarative configuration of the SecretKeySelector type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
index 99fa36ec..87016d9f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretprojection.go
@@ -20,10 +20,25 @@ package v1
// SecretProjectionApplyConfiguration represents a declarative configuration of the SecretProjection type for use
// with apply.
+//
+// Adapts a secret into a projected volume.
+//
+// The contents of the target Secret's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names.
+// Note that this is identical to a secret volume source without the default
+// mode.
type SecretProjectionApplyConfiguration struct {
LocalObjectReferenceApplyConfiguration `json:",inline"`
- Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // items if unspecified, each key-value pair in the Data field of the referenced
+ // Secret will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the Secret,
+ // the volume setup will error unless it is marked optional. Paths must be
+ // relative and may not contain the '..' path or start with '..'.
+ Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
+ // optional field specify whether the Secret or its key must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// SecretProjectionApplyConfiguration constructs a declarative configuration of the SecretProjection type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go
index f5e0de23..607f8250 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretreference.go
@@ -20,8 +20,13 @@ package v1
// SecretReferenceApplyConfiguration represents a declarative configuration of the SecretReference type for use
// with apply.
+//
+// SecretReference represents a Secret Reference. It has enough information to retrieve secret
+// in any namespace
type SecretReferenceApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // name is unique within a namespace to reference a secret resource.
+ Name *string `json:"name,omitempty"`
+ // namespace defines the space within which the secret name must be unique.
Namespace *string `json:"namespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go
index 9f765d35..bdc09853 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/secretvolumesource.go
@@ -20,11 +20,34 @@ package v1
// SecretVolumeSourceApplyConfiguration represents a declarative configuration of the SecretVolumeSource type for use
// with apply.
+//
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSourceApplyConfiguration struct {
- SecretName *string `json:"secretName,omitempty"`
- Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
- DefaultMode *int32 `json:"defaultMode,omitempty"`
- Optional *bool `json:"optional,omitempty"`
+ // secretName is the name of the secret in the pod's namespace to use.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ SecretName *string `json:"secretName,omitempty"`
+ // items If unspecified, each key-value pair in the Data field of the referenced
+ // Secret will be projected into the volume as a file whose name is the
+ // key and content is the value. If specified, the listed keys will be
+ // projected into the specified paths, and unlisted keys will not be
+ // present. If a key is specified which is not present in the Secret,
+ // the volume setup will error unless it is marked optional. Paths must be
+ // relative and may not contain the '..' path or start with '..'.
+ Items []KeyToPathApplyConfiguration `json:"items,omitempty"`
+ // defaultMode is Optional: mode bits used to set permissions on created files by default.
+ // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ // YAML accepts both octal and decimal values, JSON requires decimal values
+ // for mode bits. Defaults to 0644.
+ // Directories within the path are not affected by this setting.
+ // This might be in conflict with other options that affect the file
+ // mode, like fsGroup, and the result can be other mode bits set.
+ DefaultMode *int32 `json:"defaultMode,omitempty"`
+ // optional field specify whether the Secret or its keys must be defined
+ Optional *bool `json:"optional,omitempty"`
}
// SecretVolumeSourceApplyConfiguration constructs a declarative configuration of the SecretVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go
index 99faab72..aee26352 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/securitycontext.go
@@ -24,19 +24,77 @@ import (
// SecurityContextApplyConfiguration represents a declarative configuration of the SecurityContext type for use
// with apply.
+//
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext. When both
+// are set, the values in SecurityContext take precedence.
type SecurityContextApplyConfiguration struct {
- Capabilities *CapabilitiesApplyConfiguration `json:"capabilities,omitempty"`
- Privileged *bool `json:"privileged,omitempty"`
- SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"`
- WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"`
- RunAsUser *int64 `json:"runAsUser,omitempty"`
- RunAsGroup *int64 `json:"runAsGroup,omitempty"`
- RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
- ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"`
- AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"`
- ProcMount *corev1.ProcMountType `json:"procMount,omitempty"`
- SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"`
- AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"`
+ // The capabilities to add/drop when running containers.
+ // Defaults to the default set of capabilities granted by the container runtime.
+ // Note that this field cannot be set when spec.os.name is windows.
+ Capabilities *CapabilitiesApplyConfiguration `json:"capabilities,omitempty"`
+ // Run container in privileged mode.
+ // Processes in privileged containers are essentially equivalent to root on the host.
+ // Defaults to false.
+ // Note that this field cannot be set when spec.os.name is windows.
+ Privileged *bool `json:"privileged,omitempty"`
+ // The SELinux context to be applied to the container.
+ // If unspecified, the container runtime will allocate a random SELinux context for each
+ // container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ // Note that this field cannot be set when spec.os.name is windows.
+ SELinuxOptions *SELinuxOptionsApplyConfiguration `json:"seLinuxOptions,omitempty"`
+ // The Windows specific settings applied to all containers.
+ // If unspecified, the options from the PodSecurityContext will be used.
+ // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ // Note that this field cannot be set when spec.os.name is linux.
+ WindowsOptions *WindowsSecurityContextOptionsApplyConfiguration `json:"windowsOptions,omitempty"`
+ // The UID to run the entrypoint of the container process.
+ // Defaults to user specified in image metadata if unspecified.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ // Note that this field cannot be set when spec.os.name is windows.
+ RunAsUser *int64 `json:"runAsUser,omitempty"`
+ // The GID to run the entrypoint of the container process.
+ // Uses runtime default if unset.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ // Note that this field cannot be set when spec.os.name is windows.
+ RunAsGroup *int64 `json:"runAsGroup,omitempty"`
+ // Indicates that the container must run as a non-root user.
+ // If true, the Kubelet will validate the image at runtime to ensure that it
+ // does not run as UID 0 (root) and fail to start the container if it does.
+ // If unset or false, no such validation will be performed.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsNonRoot *bool `json:"runAsNonRoot,omitempty"`
+ // Whether this container has a read-only root filesystem.
+ // Default is false.
+ // Note that this field cannot be set when spec.os.name is windows.
+ ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty"`
+ // AllowPrivilegeEscalation controls whether a process can gain more
+ // privileges than its parent process. This bool directly controls if
+ // the no_new_privs flag will be set on the container process.
+ // AllowPrivilegeEscalation is true always when the container is:
+ // 1) run as Privileged
+ // 2) has CAP_SYS_ADMIN
+ // Note that this field cannot be set when spec.os.name is windows.
+ AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty"`
+ // procMount denotes the type of proc mount to use for the containers.
+ // The default value is Default which uses the container runtime defaults for
+ // readonly paths and masked paths.
+ // This requires the ProcMountType feature flag to be enabled.
+ // Note that this field cannot be set when spec.os.name is windows.
+ ProcMount *corev1.ProcMountType `json:"procMount,omitempty"`
+ // The seccomp options to use by this container. If seccomp options are
+ // provided at both the pod & container level, the container options
+ // override the pod options.
+ // Note that this field cannot be set when spec.os.name is windows.
+ SeccompProfile *SeccompProfileApplyConfiguration `json:"seccompProfile,omitempty"`
+ // appArmorProfile is the AppArmor options to use by this container. If set, this profile
+ // overrides the pod's appArmorProfile.
+ // Note that this field cannot be set when spec.os.name is windows.
+ AppArmorProfile *AppArmorProfileApplyConfiguration `json:"appArmorProfile,omitempty"`
}
// SecurityContextApplyConfiguration constructs a declarative configuration of the SecurityContext type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go
index bad01300..d03e63e2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/selinuxoptions.go
@@ -20,10 +20,16 @@ package v1
// SELinuxOptionsApplyConfiguration represents a declarative configuration of the SELinuxOptions type for use
// with apply.
+//
+// SELinuxOptions are the labels to be applied to the container
type SELinuxOptionsApplyConfiguration struct {
- User *string `json:"user,omitempty"`
- Role *string `json:"role,omitempty"`
- Type *string `json:"type,omitempty"`
+ // User is a SELinux user label that applies to the container.
+ User *string `json:"user,omitempty"`
+ // Role is a SELinux role label that applies to the container.
+ Role *string `json:"role,omitempty"`
+ // Type is a SELinux type label that applies to the container.
+ Type *string `json:"type,omitempty"`
+ // Level is SELinux level label that applies to the container.
Level *string `json:"level,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
index 90d2ca0f..b0e66782 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
@@ -29,11 +29,23 @@ import (
// ServiceApplyConfiguration represents a declarative configuration of the Service type for use
// with apply.
+//
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
type ServiceApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ServiceStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the behavior of a service.
+ // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ServiceSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the service.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ServiceStatusApplyConfiguration `json:"status,omitempty"`
}
// Service constructs a declarative configuration of the Service type for use with
@@ -47,6 +59,27 @@ func Service(name, namespace string) *ServiceApplyConfiguration {
return b
}
+// ExtractServiceFrom extracts the applied configuration owned by fieldManager from
+// service for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// service must be a unmodified Service API object that was retrieved from the Kubernetes API.
+// ExtractServiceFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractServiceFrom(service *corev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) {
+ b := &ServiceApplyConfiguration{}
+ err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(service.Name)
+ b.WithNamespace(service.Namespace)
+
+ b.WithKind("Service")
+ b.WithAPIVersion("v1")
+ return b, nil
+}
+
// ExtractService extracts the applied configuration owned by fieldManager from
// service. If no managedFields are found in service for fieldManager, a
// ServiceApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +90,16 @@ func Service(name, namespace string) *ServiceApplyConfiguration {
// ExtractService provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractService(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
- return extractService(service, fieldManager, "")
+ return ExtractServiceFrom(service, fieldManager, "")
}
-// ExtractServiceStatus is the same as ExtractService except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractServiceStatus extracts the applied configuration owned by fieldManager from
+// service for the status subresource.
func ExtractServiceStatus(service *corev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
- return extractService(service, fieldManager, "status")
+ return ExtractServiceFrom(service, fieldManager, "status")
}
-func extractService(service *corev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) {
- b := &ServiceApplyConfiguration{}
- err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(service.Name)
- b.WithNamespace(service.Namespace)
-
- b.WithKind("Service")
- b.WithAPIVersion("v1")
- return b, nil
-}
func (b ServiceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
index 768acb2e..d4393477 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
@@ -29,12 +29,32 @@ import (
// ServiceAccountApplyConfiguration represents a declarative configuration of the ServiceAccount type for use
// with apply.
+//
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
type ServiceAccountApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"`
- ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"`
- AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
+ // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
+ // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
+ // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
+ // Prefer separate namespaces to isolate access to mounted secrets.
+ // This field should not be used to find auto-generated service account token secrets for use outside of pods.
+ // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
+ // More info: https://kubernetes.io/docs/concepts/configuration/secret
+ Secrets []ObjectReferenceApplyConfiguration `json:"secrets,omitempty"`
+ // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+ // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+ // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+ // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+ ImagePullSecrets []LocalObjectReferenceApplyConfiguration `json:"imagePullSecrets,omitempty"`
+ // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
+ // Can be overridden at the pod level.
+ AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty"`
}
// ServiceAccount constructs a declarative configuration of the ServiceAccount type for use with
@@ -48,29 +68,14 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration {
return b
}
-// ExtractServiceAccount extracts the applied configuration owned by fieldManager from
-// serviceAccount. If no managedFields are found in serviceAccount for fieldManager, a
-// ServiceAccountApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractServiceAccountFrom extracts the applied configuration owned by fieldManager from
+// serviceAccount for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// serviceAccount must be a unmodified ServiceAccount API object that was retrieved from the Kubernetes API.
-// ExtractServiceAccount provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractServiceAccountFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
- return extractServiceAccount(serviceAccount, fieldManager, "")
-}
-
-// ExtractServiceAccountStatus is the same as ExtractServiceAccount except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractServiceAccountStatus(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
- return extractServiceAccount(serviceAccount, fieldManager, "status")
-}
-
-func extractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) {
+func ExtractServiceAccountFrom(serviceAccount *corev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) {
b := &ServiceAccountApplyConfiguration{}
err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +88,27 @@ func extractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager s
b.WithAPIVersion("v1")
return b, nil
}
+
+// ExtractServiceAccount extracts the applied configuration owned by fieldManager from
+// serviceAccount. If no managedFields are found in serviceAccount for fieldManager, a
+// ServiceAccountApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// serviceAccount must be a unmodified ServiceAccount API object that was retrieved from the Kubernetes API.
+// ExtractServiceAccount provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractServiceAccount(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
+ return ExtractServiceAccountFrom(serviceAccount, fieldManager, "")
+}
+
+// ExtractServiceAccountToken extracts the applied configuration owned by fieldManager from
+// serviceAccount for the token subresource.
+func ExtractServiceAccountToken(serviceAccount *corev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
+ return ExtractServiceAccountFrom(serviceAccount, fieldManager, "token")
+}
+
func (b ServiceAccountApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go
index fab81bf8..e2bf69bc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccounttokenprojection.go
@@ -20,10 +20,27 @@ package v1
// ServiceAccountTokenProjectionApplyConfiguration represents a declarative configuration of the ServiceAccountTokenProjection type for use
// with apply.
+//
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
type ServiceAccountTokenProjectionApplyConfiguration struct {
- Audience *string `json:"audience,omitempty"`
- ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"`
- Path *string `json:"path,omitempty"`
+ // audience is the intended audience of the token. A recipient of a token
+ // must identify itself with an identifier specified in the audience of the
+ // token, and otherwise should reject the token. The audience defaults to the
+ // identifier of the apiserver.
+ Audience *string `json:"audience,omitempty"`
+ // expirationSeconds is the requested duration of validity of the service
+ // account token. As the token approaches expiration, the kubelet volume
+ // plugin will proactively rotate the service account token. The kubelet will
+ // start trying to rotate the token if the token is older than 80 percent of
+ // its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ // and must be at least 10 minutes.
+ ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"`
+ // path is the path relative to the mount point of the file to project the
+ // token into.
+ Path *string `json:"path,omitempty"`
}
// ServiceAccountTokenProjectionApplyConfiguration constructs a declarative configuration of the ServiceAccountTokenProjection type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
index 4d5774d8..44353149 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceport.go
@@ -25,13 +25,55 @@ import (
// ServicePortApplyConfiguration represents a declarative configuration of the ServicePort type for use
// with apply.
+//
+// ServicePort contains information on service's port.
type ServicePortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Protocol *corev1.Protocol `json:"protocol,omitempty"`
- AppProtocol *string `json:"appProtocol,omitempty"`
- Port *int32 `json:"port,omitempty"`
- TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
- NodePort *int32 `json:"nodePort,omitempty"`
+ // The name of this port within the service. This must be a DNS_LABEL.
+ // All ports within a ServiceSpec must have unique names. When considering
+ // the endpoints for a Service, this must match the 'name' field in the
+ // EndpointPort.
+ // Optional if only one ServicePort is defined on this service.
+ Name *string `json:"name,omitempty"`
+ // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+ // Default is TCP.
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ // The application protocol for this port.
+ // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ // This field follows standard Kubernetes label syntax.
+ // Valid values are either:
+ //
+ // * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ // RFC-6335 and https://www.iana.org/assignments/service-names).
+ //
+ // * Kubernetes-defined prefixed names:
+ // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+ //
+ // * Other protocols should use implementation-defined prefixed names such as
+ // mycompany.com/my-custom-protocol.
+ AppProtocol *string `json:"appProtocol,omitempty"`
+ // The port that will be exposed by this service.
+ Port *int32 `json:"port,omitempty"`
+ // Number or name of the port to access on the pods targeted by the service.
+ // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+ // If this is a string, it will be looked up as a named port in the
+ // target Pod's container ports. If this is not specified, the value
+ // of the 'port' field is used (an identity map).
+ // This field is ignored for services with clusterIP=None, and should be
+ // omitted or set equal to the 'port' field.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+ TargetPort *intstr.IntOrString `json:"targetPort,omitempty"`
+ // The port on each node on which this service is exposed when type is
+ // NodePort or LoadBalancer. Usually assigned by the system. If a value is
+ // specified, in-range, and not in use it will be used, otherwise the
+ // operation will fail. If not specified, a port will be allocated if this
+ // Service requires one. If this field is specified when creating a
+ // Service which does not need it, creation will fail. This field will be
+ // wiped when updating a Service to no longer need it (e.g. changing type
+ // from NodePort to ClusterIP).
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ NodePort *int32 `json:"nodePort,omitempty"`
}
// ServicePortApplyConfiguration constructs a declarative configuration of the ServicePort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
index 41367dce..c6a09d26 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
@@ -24,27 +24,201 @@ import (
// ServiceSpecApplyConfiguration represents a declarative configuration of the ServiceSpec type for use
// with apply.
+//
+// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpecApplyConfiguration struct {
- Ports []ServicePortApplyConfiguration `json:"ports,omitempty"`
- Selector map[string]string `json:"selector,omitempty"`
- ClusterIP *string `json:"clusterIP,omitempty"`
- ClusterIPs []string `json:"clusterIPs,omitempty"`
- Type *corev1.ServiceType `json:"type,omitempty"`
- ExternalIPs []string `json:"externalIPs,omitempty"`
- SessionAffinity *corev1.ServiceAffinity `json:"sessionAffinity,omitempty"`
- LoadBalancerIP *string `json:"loadBalancerIP,omitempty"`
- LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"`
- ExternalName *string `json:"externalName,omitempty"`
- ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"`
- HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"`
- PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"`
- SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"`
- IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"`
- IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"`
- AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"`
- LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
- InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"`
- TrafficDistribution *string `json:"trafficDistribution,omitempty"`
+ // The list of ports that are exposed by this service.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ Ports []ServicePortApplyConfiguration `json:"ports,omitempty"`
+ // Route service traffic to pods with label keys and values matching this
+ // selector. If empty or not present, the service is assumed to have an
+ // external process managing its endpoints, which Kubernetes will not
+ // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+ // Ignored if type is ExternalName.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ Selector map[string]string `json:"selector,omitempty"`
+ // clusterIP is the IP address of the service and is usually assigned
+ // randomly. If an address is specified manually, is in-range (as per
+ // system configuration), and is not in use, it will be allocated to the
+ // service; otherwise creation of the service will fail. This field may not
+ // be changed through updates unless the type field is also being changed
+ // to ExternalName (which requires this field to be blank) or the type
+ // field is being changed from ExternalName (in which case this field may
+ // optionally be specified, as describe above). Valid values are "None",
+ // empty string (""), or a valid IP address. Setting this to "None" makes a
+ // "headless service" (no virtual IP), which is useful when direct endpoint
+ // connections are preferred and proxying is not required. Only applies to
+ // types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+ // when creating a Service of type ExternalName, creation will fail. This
+ // field will be wiped when updating a Service to type ExternalName.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ClusterIP *string `json:"clusterIP,omitempty"`
+ // ClusterIPs is a list of IP addresses assigned to this service, and are
+ // usually assigned randomly. If an address is specified manually, is
+ // in-range (as per system configuration), and is not in use, it will be
+ // allocated to the service; otherwise creation of the service will fail.
+ // This field may not be changed through updates unless the type field is
+ // also being changed to ExternalName (which requires this field to be
+ // empty) or the type field is being changed from ExternalName (in which
+ // case this field may optionally be specified, as describe above). Valid
+ // values are "None", empty string (""), or a valid IP address. Setting
+ // this to "None" makes a "headless service" (no virtual IP), which is
+ // useful when direct endpoint connections are preferred and proxying is
+ // not required. Only applies to types ClusterIP, NodePort, and
+ // LoadBalancer. If this field is specified when creating a Service of type
+ // ExternalName, creation will fail. This field will be wiped when updating
+ // a Service to type ExternalName. If this field is not specified, it will
+ // be initialized from the clusterIP field. If this field is specified,
+ // clients must ensure that clusterIPs[0] and clusterIP have the same
+ // value.
+ //
+ // This field may hold a maximum of two entries (dual-stack IPs, in either order).
+ // These IPs must correspond to the values of the ipFamilies field. Both
+ // clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ ClusterIPs []string `json:"clusterIPs,omitempty"`
+ // type determines how the Service is exposed. Defaults to ClusterIP. Valid
+ // options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+ // "ClusterIP" allocates a cluster-internal IP address for load-balancing
+ // to endpoints. Endpoints are determined by the selector or if that is not
+ // specified, by manual construction of an Endpoints object or
+ // EndpointSlice objects. If clusterIP is "None", no virtual IP is
+ // allocated and the endpoints are published as a set of endpoints rather
+ // than a virtual IP.
+ // "NodePort" builds on ClusterIP and allocates a port on every node which
+ // routes to the same endpoints as the clusterIP.
+ // "LoadBalancer" builds on NodePort and creates an external load-balancer
+ // (if supported in the current cloud) which routes to the same endpoints
+ // as the clusterIP.
+ // "ExternalName" aliases this service to the specified externalName.
+ // Several other fields do not apply to ExternalName services.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ Type *corev1.ServiceType `json:"type,omitempty"`
+ // externalIPs is a list of IP addresses for which nodes in the cluster
+ // will also accept traffic for this service. These IPs are not managed by
+ // Kubernetes. The user is responsible for ensuring that traffic arrives
+ // at a node with this IP. A common example is external load-balancers
+ // that are not part of the Kubernetes system.
+ ExternalIPs []string `json:"externalIPs,omitempty"`
+ // Supports "ClientIP" and "None". Used to maintain session affinity.
+ // Enable client IP based session affinity.
+ // Must be ClientIP or None.
+ // Defaults to None.
+ // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+ SessionAffinity *corev1.ServiceAffinity `json:"sessionAffinity,omitempty"`
+ // Only applies to Service Type: LoadBalancer.
+ // This feature depends on whether the underlying cloud-provider supports specifying
+ // the loadBalancerIP when a load balancer is created.
+ // This field will be ignored if the cloud-provider does not support the feature.
+ // Deprecated: This field was under-specified and its meaning varies across implementations.
+ // Using it is non-portable and it may not support dual-stack.
+ // Users are encouraged to use implementation-specific annotations when available.
+ LoadBalancerIP *string `json:"loadBalancerIP,omitempty"`
+ // If specified and supported by the platform, this will restrict traffic through the cloud-provider
+ // load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+ // cloud-provider does not support the feature."
+ // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+ LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"`
+ // externalName is the external reference that discovery mechanisms will
+ // return as an alias for this service (e.g. a DNS CNAME record). No
+ // proxying will be involved. Must be a lowercase RFC-1123 hostname
+ // (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+ ExternalName *string `json:"externalName,omitempty"`
+ // externalTrafficPolicy describes how nodes distribute service traffic they
+ // receive on one of the Service's "externally-facing" addresses (NodePorts,
+ // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+ // the service in a way that assumes that external load balancers will take care
+ // of balancing the service traffic between nodes, and so each node will deliver
+ // traffic only to the node-local endpoints of the service, without masquerading
+ // the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+ // be dropped.) The default value, "Cluster", uses the standard behavior of
+ // routing to all endpoints evenly (possibly modified by topology and other
+ // features). Note that traffic sent to an External IP or LoadBalancer IP from
+ // within the cluster will always get "Cluster" semantics, but clients sending to
+ // a NodePort from within the cluster may need to take traffic policy into account
+ // when picking a node.
+ ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty"`
+ // healthCheckNodePort specifies the healthcheck nodePort for the service.
+ // This only applies when type is set to LoadBalancer and
+ // externalTrafficPolicy is set to Local. If a value is specified, is
+ // in-range, and is not in use, it will be used. If not specified, a value
+ // will be automatically allocated. External systems (e.g. load-balancers)
+ // can use this port to determine if a given node holds endpoints for this
+ // service or not. If this field is specified when creating a Service
+ // which does not need it, creation will fail. This field will be wiped
+ // when updating a Service to no longer need it (e.g. changing type).
+ // This field cannot be updated once set.
+ HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"`
+ // publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+ // Service should disregard any indications of ready/not-ready.
+ // The primary use case for setting this field is for a StatefulSet's Headless Service to
+ // propagate SRV DNS records for its Pods for the purpose of peer discovery.
+ // The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+ // Services interpret this to mean that all endpoints are considered "ready" even if the
+ // Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+ // through the Endpoints or EndpointSlice resources can safely assume this behavior.
+ PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"`
+ // sessionAffinityConfig contains the configurations of session affinity.
+ SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"`
+ // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+ // service. This field is usually assigned automatically based on cluster
+ // configuration and the ipFamilyPolicy field. If this field is specified
+ // manually, the requested family is available in the cluster,
+ // and ipFamilyPolicy allows it, it will be used; otherwise creation of
+ // the service will fail. This field is conditionally mutable: it allows
+ // for adding or removing a secondary IP family, but it does not allow
+ // changing the primary IP family of the Service. Valid values are "IPv4"
+ // and "IPv6". This field only applies to Services of types ClusterIP,
+ // NodePort, and LoadBalancer, and does apply to "headless" services.
+ // This field will be wiped when updating a Service to type ExternalName.
+ //
+ // This field may hold a maximum of two entries (dual-stack families, in
+ // either order). These families must correspond to the values of the
+ // clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+ // governed by the ipFamilyPolicy field.
+ IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"`
+ // IPFamilyPolicy represents the dual-stack-ness requested or required by
+ // this Service. If there is no value provided, then this field will be set
+ // to SingleStack. Services can be "SingleStack" (a single IP family),
+ // "PreferDualStack" (two IP families on dual-stack configured clusters or
+ // a single IP family on single-stack clusters), or "RequireDualStack"
+ // (two IP families on dual-stack configured clusters, otherwise fail). The
+ // ipFamilies and clusterIPs fields depend on the value of this field. This
+ // field will be wiped when updating a service to type ExternalName.
+ IPFamilyPolicy *corev1.IPFamilyPolicy `json:"ipFamilyPolicy,omitempty"`
+ // allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+ // allocated for services with type LoadBalancer. Default is "true". It
+ // may be set to "false" if the cluster load-balancer does not rely on
+ // NodePorts. If the caller requests specific NodePorts (by specifying a
+ // value), those requests will be respected, regardless of this field.
+ // This field may only be set for services with type LoadBalancer and will
+ // be cleared if the type is changed to any other type.
+ AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"`
+ // loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+ // If specified, the value of this field must be a label-style identifier, with an optional prefix,
+ // e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+ // This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+ // balancer implementation is used, today this is typically done through the cloud provider integration,
+ // but should apply for any default implementation. If set, it is assumed that a load balancer
+ // implementation is watching for Services with a matching class. Any default load balancer
+ // implementation (e.g. cloud providers) should ignore Services that set this field.
+ // This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+ // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+ LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
+ // InternalTrafficPolicy describes how nodes distribute service traffic they
+ // receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+ // only want to talk to endpoints of the service on the same node as the pod,
+ // dropping the traffic if there are no local endpoints. The default value,
+ // "Cluster", uses the standard behavior of routing to all endpoints evenly
+ // (possibly modified by topology and other features).
+ InternalTrafficPolicy *corev1.ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty"`
+ // TrafficDistribution offers a way to express preferences for how traffic
+ // is distributed to Service endpoints. Implementations can use this field
+ // as a hint, but are not required to guarantee strict adherence. If the
+ // field is not set, the implementation will apply its default routing
+ // strategy. If set to "PreferClose", implementations should prioritize
+ // endpoints that are in the same zone.
+ TrafficDistribution *string `json:"trafficDistribution,omitempty"`
}
// ServiceSpecApplyConfiguration constructs a declarative configuration of the ServiceSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go
index 11c3f8a8..6b0d450d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicestatus.go
@@ -24,9 +24,14 @@ import (
// ServiceStatusApplyConfiguration represents a declarative configuration of the ServiceStatus type for use
// with apply.
+//
+// ServiceStatus represents the current status of a service.
type ServiceStatusApplyConfiguration struct {
+ // LoadBalancer contains the current status of the load-balancer,
+ // if one is present.
LoadBalancer *LoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
- Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Current service state
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ServiceStatusApplyConfiguration constructs a declarative configuration of the ServiceStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go
index 13b045ff..18b7410f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sessionaffinityconfig.go
@@ -20,7 +20,10 @@ package v1
// SessionAffinityConfigApplyConfiguration represents a declarative configuration of the SessionAffinityConfig type for use
// with apply.
+//
+// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfigApplyConfiguration struct {
+ // clientIP contains the configurations of Client IP based session affinity.
ClientIP *ClientIPConfigApplyConfiguration `json:"clientIP,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go
index b4115609..a74fec68 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sleepaction.go
@@ -20,7 +20,10 @@ package v1
// SleepActionApplyConfiguration represents a declarative configuration of the SleepAction type for use
// with apply.
+//
+// SleepAction describes a "sleep" action.
type SleepActionApplyConfiguration struct {
+ // Seconds is the number of seconds to sleep.
Seconds *int64 `json:"seconds,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go
index 7381a498..8aa7ea7a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageospersistentvolumesource.go
@@ -20,12 +20,29 @@ package v1
// StorageOSPersistentVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSPersistentVolumeSource type for use
// with apply.
+//
+// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSourceApplyConfiguration struct {
- VolumeName *string `json:"volumeName,omitempty"`
- VolumeNamespace *string `json:"volumeNamespace,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- SecretRef *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // volumeName is the human-readable name of the StorageOS volume. Volume
+ // names are only unique within a namespace.
+ VolumeName *string `json:"volumeName,omitempty"`
+ // volumeNamespace specifies the scope of the volume within StorageOS. If no
+ // namespace is specified then the Pod's namespace will be used. This allows the
+ // Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ // Set VolumeName to any name to override the default behaviour.
+ // Set to "default" if you are not using namespaces within StorageOS.
+ // Namespaces that do not pre-exist within StorageOS will be created.
+ VolumeNamespace *string `json:"volumeNamespace,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // secretRef specifies the secret to use for obtaining the StorageOS API
+ // credentials. If not specified, default values will be attempted.
+ SecretRef *ObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
}
// StorageOSPersistentVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSPersistentVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go
index 81d9373c..2419121e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/storageosvolumesource.go
@@ -20,12 +20,29 @@ package v1
// StorageOSVolumeSourceApplyConfiguration represents a declarative configuration of the StorageOSVolumeSource type for use
// with apply.
+//
+// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSourceApplyConfiguration struct {
- VolumeName *string `json:"volumeName,omitempty"`
- VolumeNamespace *string `json:"volumeNamespace,omitempty"`
- FSType *string `json:"fsType,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
- SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
+ // volumeName is the human-readable name of the StorageOS volume. Volume
+ // names are only unique within a namespace.
+ VolumeName *string `json:"volumeName,omitempty"`
+ // volumeNamespace specifies the scope of the volume within StorageOS. If no
+ // namespace is specified then the Pod's namespace will be used. This allows the
+ // Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ // Set VolumeName to any name to override the default behaviour.
+ // Set to "default" if you are not using namespaces within StorageOS.
+ // Namespaces that do not pre-exist within StorageOS will be created.
+ VolumeNamespace *string `json:"volumeNamespace,omitempty"`
+ // fsType is the filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType *string `json:"fsType,omitempty"`
+ // readOnly defaults to false (read/write). ReadOnly here will force
+ // the ReadOnly setting in VolumeMounts.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // secretRef specifies the secret to use for obtaining the StorageOS API
+ // credentials. If not specified, default values will be attempted.
+ SecretRef *LocalObjectReferenceApplyConfiguration `json:"secretRef,omitempty"`
}
// StorageOSVolumeSourceApplyConfiguration constructs a declarative configuration of the StorageOSVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go
index 7719eb7d..5bb09a3a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/sysctl.go
@@ -20,8 +20,12 @@ package v1
// SysctlApplyConfiguration represents a declarative configuration of the Sysctl type for use
// with apply.
+//
+// Sysctl defines a kernel parameter to be set
type SysctlApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name of a property to set
+ Name *string `json:"name,omitempty"`
+ // Value of a property to set
Value *string `json:"value,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
index 4b9e4305..6c487980 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/taint.go
@@ -25,11 +25,20 @@ import (
// TaintApplyConfiguration represents a declarative configuration of the Taint type for use
// with apply.
+//
+// The node this Taint is attached to has the "effect" on
+// any pod that does not tolerate the Taint.
type TaintApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *corev1.TaintEffect `json:"effect,omitempty"`
- TimeAdded *metav1.Time `json:"timeAdded,omitempty"`
+ // Required. The taint key to be applied to a node.
+ Key *string `json:"key,omitempty"`
+ // The taint value corresponding to the taint key.
+ Value *string `json:"value,omitempty"`
+ // Required. The effect of the taint on pods
+ // that do not tolerate the taint.
+ // Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+ Effect *corev1.TaintEffect `json:"effect,omitempty"`
+ // TimeAdded represents the time at which the taint was added.
+ TimeAdded *metav1.Time `json:"timeAdded,omitempty"`
}
// TaintApplyConfiguration constructs a declarative configuration of the Taint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go
index cba1a7d0..ede78a3f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/tcpsocketaction.go
@@ -24,9 +24,15 @@ import (
// TCPSocketActionApplyConfiguration represents a declarative configuration of the TCPSocketAction type for use
// with apply.
+//
+// TCPSocketAction describes an action based on opening a socket
type TCPSocketActionApplyConfiguration struct {
+ // Number or name of the port to access on the container.
+ // Number must be in the range 1 to 65535.
+ // Name must be an IANA_SVC_NAME.
Port *intstr.IntOrString `json:"port,omitempty"`
- Host *string `json:"host,omitempty"`
+ // Optional: Host name to connect to, defaults to the pod IP.
+ Host *string `json:"host,omitempty"`
}
// TCPSocketActionApplyConfiguration constructs a declarative configuration of the TCPSocketAction type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
index a0a0aac0..1870cbf1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/toleration.go
@@ -24,12 +24,30 @@ import (
// TolerationApplyConfiguration represents a declarative configuration of the Toleration type for use
// with apply.
+//
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple using the matching operator .
type TolerationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *corev1.TolerationOperator `json:"operator,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *corev1.TaintEffect `json:"effect,omitempty"`
- TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
+ // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ Key *string `json:"key,omitempty"`
+ // Operator represents a key's relationship to the value.
+ // Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a pod can
+ // tolerate all taints of a particular category.
+ // Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).
+ Operator *corev1.TolerationOperator `json:"operator,omitempty"`
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value should be empty, otherwise just a regular string.
+ Value *string `json:"value,omitempty"`
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ Effect *corev1.TaintEffect `json:"effect,omitempty"`
+ // TolerationSeconds represents the period of time the toleration (which must be
+ // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ // it is not set, which means tolerate the taint forever (do not evict). Zero and
+ // negative values will be treated as 0 (evict immediately) by the system.
+ TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
}
// TolerationApplyConfiguration constructs a declarative configuration of the Toleration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go
index 674ddec9..4f1a875f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorlabelrequirement.go
@@ -20,8 +20,14 @@ package v1
// TopologySelectorLabelRequirementApplyConfiguration represents a declarative configuration of the TopologySelectorLabelRequirement type for use
// with apply.
+//
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
type TopologySelectorLabelRequirementApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // The label key that the selector applies to.
+ Key *string `json:"key,omitempty"`
+ // An array of string values. One value must match the label to be selected.
+ // Each entry in Values is ORed.
Values []string `json:"values,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go
index 7812ae52..2aaddefd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyselectorterm.go
@@ -20,7 +20,14 @@ package v1
// TopologySelectorTermApplyConfiguration represents a declarative configuration of the TopologySelectorTerm type for use
// with apply.
+//
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
type TopologySelectorTermApplyConfiguration struct {
+ // A list of topology selector requirements by labels.
MatchLabelExpressions []TopologySelectorLabelRequirementApplyConfiguration `json:"matchLabelExpressions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
index ab814e8e..6bd28b11 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/topologyspreadconstraint.go
@@ -25,15 +25,107 @@ import (
// TopologySpreadConstraintApplyConfiguration represents a declarative configuration of the TopologySpreadConstraint type for use
// with apply.
+//
+// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
type TopologySpreadConstraintApplyConfiguration struct {
- MaxSkew *int32 `json:"maxSkew,omitempty"`
- TopologyKey *string `json:"topologyKey,omitempty"`
- WhenUnsatisfiable *corev1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
- LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
- MinDomains *int32 `json:"minDomains,omitempty"`
- NodeAffinityPolicy *corev1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
- NodeTaintsPolicy *corev1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
- MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
+ // MaxSkew describes the degree to which pods may be unevenly distributed.
+ // When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ // between the number of matching pods in the target topology and the global minimum.
+ // The global minimum is the minimum number of matching pods in an eligible domain
+ // or zero if the number of eligible domains is less than MinDomains.
+ // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ // labelSelector spread as 2/2/1:
+ // In this case, the global minimum is 1.
+ // | zone1 | zone2 | zone3 |
+ // | P P | P P | P |
+ // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ // scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ // violate MaxSkew(1).
+ // - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ // When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ // to topologies that satisfy it.
+ // It's a required field. Default value is 1 and 0 is not allowed.
+ MaxSkew *int32 `json:"maxSkew,omitempty"`
+ // TopologyKey is the key of node labels. Nodes that have a label with this key
+ // and identical values are considered to be in the same topology.
+ // We consider each as a "bucket", and try to put balanced number
+ // of pods into each bucket.
+ // We define a domain as a particular instance of a topology.
+ // Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ // nodeAffinityPolicy and nodeTaintsPolicy.
+ // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ // It's a required field.
+ TopologyKey *string `json:"topologyKey,omitempty"`
+ // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ // the spread constraint.
+ // - DoNotSchedule (default) tells the scheduler not to schedule it.
+ // - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ // but giving higher precedence to topologies that would help reduce the
+ // skew.
+ // A constraint is considered "Unsatisfiable" for an incoming pod
+ // if and only if every possible node assignment for that pod would violate
+ // "MaxSkew" on some topology.
+ // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ // labelSelector spread as 3/1/1:
+ // | zone1 | zone2 | zone3 |
+ // | P P P | P | P |
+ // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ // won't make it *more* imbalanced.
+ // It's a required field.
+ WhenUnsatisfiable *corev1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
+ // LabelSelector is used to find matching pods.
+ // Pods that match this label selector are counted to determine the number of pods
+ // in their corresponding topology domain.
+ LabelSelector *metav1.LabelSelectorApplyConfiguration `json:"labelSelector,omitempty"`
+ // MinDomains indicates a minimum number of eligible domains.
+ // When the number of eligible domains with matching topology keys is less than minDomains,
+ // Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ // And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ // this value has no effect on scheduling.
+ // As a result, when the number of eligible domains is less than minDomains,
+ // scheduler won't schedule more than maxSkew Pods to those domains.
+ // If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ // Valid values are integers greater than 0.
+ // When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+ //
+ // For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ // labelSelector spread as 2/2/2:
+ // | zone1 | zone2 | zone3 |
+ // | P P | P P | P P |
+ // The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ // In this situation, new pod with the same labelSelector cannot be scheduled,
+ // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ // it will violate MaxSkew.
+ MinDomains *int32 `json:"minDomains,omitempty"`
+ // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ // when calculating pod topology spread skew. Options are:
+ // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+ //
+ // If this value is nil, the behavior is equivalent to the Honor policy.
+ NodeAffinityPolicy *corev1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
+ // NodeTaintsPolicy indicates how we will treat node taints when calculating
+ // pod topology spread skew. Options are:
+ // - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ // has a toleration, are included.
+ // - Ignore: node taints are ignored. All nodes are included.
+ //
+ // If this value is nil, the behavior is equivalent to the Ignore policy.
+ NodeTaintsPolicy *corev1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
+ // MatchLabelKeys is a set of pod label keys to select the pods over which
+ // spreading will be calculated. The keys are used to lookup values from the
+ // incoming pod labels, those key-value labels are ANDed with labelSelector
+ // to select the group of existing pods over which spreading will be calculated
+ // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ // MatchLabelKeys cannot be set when LabelSelector isn't set.
+ // Keys that don't exist in the incoming pod labels will
+ // be ignored. A null or empty list means only match against labelSelector.
+ //
+ // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
}
// TopologySpreadConstraintApplyConfiguration constructs a declarative configuration of the TopologySpreadConstraint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go
index 1e63b798..5864532a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedlocalobjectreference.go
@@ -20,10 +20,32 @@ package v1
// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use
// with apply.
+//
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+// 1. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
+// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+// Those cannot be well described when embedded.
+// 2. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+// 3. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
+// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
+// and the version of the actual struct is irrelevant.
+// 4. We cannot easily change it. Because this type is embedded in many locations, updates to this type
+// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
type TypedLocalObjectReferenceApplyConfiguration struct {
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is not specified, the specified Kind must be in the core API group.
+ // For any other third-party types, APIGroup is required.
APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind is the type of resource being referenced
+ Kind *string `json:"kind,omitempty"`
+ // Name is the name of resource being referenced
+ Name *string `json:"name,omitempty"`
}
// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go
index f07de890..f2eab829 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/typedobjectreference.go
@@ -20,10 +20,20 @@ package v1
// TypedObjectReferenceApplyConfiguration represents a declarative configuration of the TypedObjectReference type for use
// with apply.
+//
+// TypedObjectReference contains enough information to let you locate the typed referenced object
type TypedObjectReferenceApplyConfiguration struct {
- APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is not specified, the specified Kind must be in the core API group.
+ // For any other third-party types, APIGroup is required.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Kind is the type of resource being referenced
+ Kind *string `json:"kind,omitempty"`
+ // Name is the name of resource being referenced
+ Name *string `json:"name,omitempty"`
+ // Namespace is the namespace of resource being referenced
+ // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
Namespace *string `json:"namespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
index e47cd031..2e4c0e6a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volume.go
@@ -20,8 +20,16 @@ package v1
// VolumeApplyConfiguration represents a declarative configuration of the Volume type for use
// with apply.
+//
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type VolumeApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // name of the volume.
+ // Must be a DNS_LABEL and unique within the pod.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ Name *string `json:"name,omitempty"`
+ // volumeSource represents the location and type of the mounted volume.
+ // If not specified, the Volume is implied to be an EmptyDir.
+ // This implied behavior is deprecated and will be removed in a future version.
VolumeSourceApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go
index 0bc52aad..ce4f766c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumedevice.go
@@ -20,8 +20,12 @@ package v1
// VolumeDeviceApplyConfiguration represents a declarative configuration of the VolumeDevice type for use
// with apply.
+//
+// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDeviceApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // name must match the name of a persistentVolumeClaim in the pod
+ Name *string `json:"name,omitempty"`
+ // devicePath is the path inside of the container that the device will be mapped to.
DevicePath *string `json:"devicePath,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
index ccd426a0..83b71eb6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemount.go
@@ -24,14 +24,49 @@ import (
// VolumeMountApplyConfiguration represents a declarative configuration of the VolumeMount type for use
// with apply.
+//
+// VolumeMount describes a mounting of a Volume within a container.
type VolumeMountApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // This must match the Name of a Volume.
+ Name *string `json:"name,omitempty"`
+ // Mounted read-only if true, read-write otherwise (false or unspecified).
+ // Defaults to false.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // RecursiveReadOnly specifies whether read-only mounts should be handled
+ // recursively.
+ //
+ // If ReadOnly is false, this field has no meaning and must be unspecified.
+ //
+ // If ReadOnly is true, and this field is set to Disabled, the mount is not made
+ // recursively read-only. If this field is set to IfPossible, the mount is made
+ // recursively read-only, if it is supported by the container runtime. If this
+ // field is set to Enabled, the mount is made recursively read-only if it is
+ // supported by the container runtime, otherwise the pod will not be started and
+ // an error will be generated to indicate the reason.
+ //
+ // If this field is set to IfPossible or Enabled, MountPropagation must be set to
+ // None (or be unspecified, which defaults to None).
+ //
+ // If this field is not specified, it is treated as an equivalent of Disabled.
RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
- MountPath *string `json:"mountPath,omitempty"`
- SubPath *string `json:"subPath,omitempty"`
- MountPropagation *corev1.MountPropagationMode `json:"mountPropagation,omitempty"`
- SubPathExpr *string `json:"subPathExpr,omitempty"`
+ // Path within the container at which the volume should be mounted. Must
+ // not contain ':'.
+ MountPath *string `json:"mountPath,omitempty"`
+ // Path within the volume from which the container's volume should be mounted.
+ // Defaults to "" (volume's root).
+ SubPath *string `json:"subPath,omitempty"`
+ // mountPropagation determines how mounts are propagated from the host
+ // to container and the other way around.
+ // When not set, MountPropagationNone is used.
+ // This field is beta in 1.10.
+ // When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+ // (which defaults to None).
+ MountPropagation *corev1.MountPropagationMode `json:"mountPropagation,omitempty"`
+ // Expanded path within the volume from which the container's volume should be mounted.
+ // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ // Defaults to "" (volume's root).
+ // SubPathExpr and SubPath are mutually exclusive.
+ SubPathExpr *string `json:"subPathExpr,omitempty"`
}
// VolumeMountApplyConfiguration constructs a declarative configuration of the VolumeMount type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
index f55c4072..ad4965a1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go
@@ -24,10 +24,18 @@ import (
// VolumeMountStatusApplyConfiguration represents a declarative configuration of the VolumeMountStatus type for use
// with apply.
+//
+// VolumeMountStatus shows status of volume mounts.
type VolumeMountStatusApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- MountPath *string `json:"mountPath,omitempty"`
- ReadOnly *bool `json:"readOnly,omitempty"`
+ // Name corresponds to the name of the original VolumeMount.
+ Name *string `json:"name,omitempty"`
+ // MountPath corresponds to the original VolumeMount.
+ MountPath *string `json:"mountPath,omitempty"`
+ // ReadOnly corresponds to the original VolumeMount.
+ ReadOnly *bool `json:"readOnly,omitempty"`
+ // RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts).
+ // An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled,
+ // depending on the mount result.
RecursiveReadOnly *corev1.RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go
index 9198c25d..3719b829 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumenodeaffinity.go
@@ -20,7 +20,10 @@ package v1
// VolumeNodeAffinityApplyConfiguration represents a declarative configuration of the VolumeNodeAffinity type for use
// with apply.
+//
+// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinityApplyConfiguration struct {
+ // required specifies hard node constraints that must be met.
Required *NodeSelectorApplyConfiguration `json:"required,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go
index 28d9e567..b6f493d0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeprojection.go
@@ -20,13 +20,67 @@ package v1
// VolumeProjectionApplyConfiguration represents a declarative configuration of the VolumeProjection type for use
// with apply.
+//
+// Projection that may be projected along with other supported volume types.
+// Exactly one of these fields must be set.
type VolumeProjectionApplyConfiguration struct {
- Secret *SecretProjectionApplyConfiguration `json:"secret,omitempty"`
- DownwardAPI *DownwardAPIProjectionApplyConfiguration `json:"downwardAPI,omitempty"`
- ConfigMap *ConfigMapProjectionApplyConfiguration `json:"configMap,omitempty"`
+ // secret information about the secret data to project
+ Secret *SecretProjectionApplyConfiguration `json:"secret,omitempty"`
+ // downwardAPI information about the downwardAPI data to project
+ DownwardAPI *DownwardAPIProjectionApplyConfiguration `json:"downwardAPI,omitempty"`
+ // configMap information about the configMap data to project
+ ConfigMap *ConfigMapProjectionApplyConfiguration `json:"configMap,omitempty"`
+ // serviceAccountToken is information about the serviceAccountToken data to project
ServiceAccountToken *ServiceAccountTokenProjectionApplyConfiguration `json:"serviceAccountToken,omitempty"`
- ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"`
- PodCertificate *PodCertificateProjectionApplyConfiguration `json:"podCertificate,omitempty"`
+ // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ // of ClusterTrustBundle objects in an auto-updating file.
+ //
+ // Alpha, gated by the ClusterTrustBundleProjection feature gate.
+ //
+ // ClusterTrustBundle objects can either be selected by name, or by the
+ // combination of signer name and a label selector.
+ //
+ // Kubelet performs aggressive normalization of the PEM contents written
+ // into the pod filesystem. Esoteric PEM features such as inter-block
+ // comments and block headers are stripped. Certificates are deduplicated.
+ // The ordering of certificates within the file is arbitrary, and Kubelet
+ // may change the order over time.
+ ClusterTrustBundle *ClusterTrustBundleProjectionApplyConfiguration `json:"clusterTrustBundle,omitempty"`
+ // Projects an auto-rotating credential bundle (private key and certificate
+ // chain) that the pod can use either as a TLS client or server.
+ //
+ // Kubelet generates a private key and uses it to send a
+ // PodCertificateRequest to the named signer. Once the signer approves the
+ // request and issues a certificate chain, Kubelet writes the key and
+ // certificate chain to the pod filesystem. The pod does not start until
+ // certificates have been issued for each podCertificate projected volume
+ // source in its spec.
+ //
+ // Kubelet will begin trying to rotate the certificate at the time indicated
+ // by the signer using the PodCertificateRequest.Status.BeginRefreshAt
+ // timestamp.
+ //
+ // Kubelet can write a single file, indicated by the credentialBundlePath
+ // field, or separate files, indicated by the keyPath and
+ // certificateChainPath fields.
+ //
+ // The credential bundle is a single file in PEM format. The first PEM
+ // entry is the private key (in PKCS#8 format), and the remaining PEM
+ // entries are the certificate chain issued by the signer (typically,
+ // signers will return their certificate chain in leaf-to-root order).
+ //
+ // Prefer using the credential bundle format, since your application code
+ // can read it atomically. If you use keyPath and certificateChainPath,
+ // your application must make two separate file reads. If these coincide
+ // with a certificate rotation, it is possible that the private key and leaf
+ // certificate you read may not correspond to each other. Your application
+ // will need to check for this condition, and re-read until they are
+ // consistent.
+ //
+ // The named signer controls chooses the format of the certificate it
+ // issues; consult the signer implementation's documentation to learn how to
+ // use the certificates it issues.
+ PodCertificate *PodCertificateProjectionApplyConfiguration `json:"podCertificate,omitempty"`
}
// VolumeProjectionApplyConfiguration constructs a declarative configuration of the VolumeProjection type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
index 5c83ae6d..64f52bed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumeresourcerequirements.go
@@ -24,8 +24,16 @@ import (
// VolumeResourceRequirementsApplyConfiguration represents a declarative configuration of the VolumeResourceRequirements type for use
// with apply.
+//
+// VolumeResourceRequirements describes the storage resource requirements for a volume.
type VolumeResourceRequirementsApplyConfiguration struct {
- Limits *corev1.ResourceList `json:"limits,omitempty"`
+ // Limits describes the maximum amount of compute resources allowed.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ Limits *corev1.ResourceList `json:"limits,omitempty"`
+ // Requests describes the minimum amount of compute resources required.
+ // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ // otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
Requests *corev1.ResourceList `json:"requests,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go
index aeead953..5d9a6b0f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/volumesource.go
@@ -20,37 +20,153 @@ package v1
// VolumeSourceApplyConfiguration represents a declarative configuration of the VolumeSource type for use
// with apply.
+//
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
type VolumeSourceApplyConfiguration struct {
- HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"`
- EmptyDir *EmptyDirVolumeSourceApplyConfiguration `json:"emptyDir,omitempty"`
- GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"`
- AWSElasticBlockStore *AWSElasticBlockStoreVolumeSourceApplyConfiguration `json:"awsElasticBlockStore,omitempty"`
- GitRepo *GitRepoVolumeSourceApplyConfiguration `json:"gitRepo,omitempty"`
- Secret *SecretVolumeSourceApplyConfiguration `json:"secret,omitempty"`
- NFS *NFSVolumeSourceApplyConfiguration `json:"nfs,omitempty"`
- ISCSI *ISCSIVolumeSourceApplyConfiguration `json:"iscsi,omitempty"`
- Glusterfs *GlusterfsVolumeSourceApplyConfiguration `json:"glusterfs,omitempty"`
+ // hostPath represents a pre-existing file or directory on the host
+ // machine that is directly exposed to the container. This is generally
+ // used for system agents or other privileged things that are allowed
+ // to see the host machine. Most containers will NOT need this.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ // ---
+ // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ // mount host directories as read/write.
+ HostPath *HostPathVolumeSourceApplyConfiguration `json:"hostPath,omitempty"`
+ // emptyDir represents a temporary directory that shares a pod's lifetime.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ EmptyDir *EmptyDirVolumeSourceApplyConfiguration `json:"emptyDir,omitempty"`
+ // gcePersistentDisk represents a GCE Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+ // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ GCEPersistentDisk *GCEPersistentDiskVolumeSourceApplyConfiguration `json:"gcePersistentDisk,omitempty"`
+ // awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+ // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ AWSElasticBlockStore *AWSElasticBlockStoreVolumeSourceApplyConfiguration `json:"awsElasticBlockStore,omitempty"`
+ // gitRepo represents a git repository at a particular revision.
+ // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
+ // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ // into the Pod's container.
+ GitRepo *GitRepoVolumeSourceApplyConfiguration `json:"gitRepo,omitempty"`
+ // secret represents a secret that should populate this volume.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ Secret *SecretVolumeSourceApplyConfiguration `json:"secret,omitempty"`
+ // nfs represents an NFS mount on the host that shares a pod's lifetime
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ NFS *NFSVolumeSourceApplyConfiguration `json:"nfs,omitempty"`
+ // iscsi represents an ISCSI Disk resource that is attached to a
+ // kubelet's host machine and then exposed to the pod.
+ // More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi
+ ISCSI *ISCSIVolumeSourceApplyConfiguration `json:"iscsi,omitempty"`
+ // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+ Glusterfs *GlusterfsVolumeSourceApplyConfiguration `json:"glusterfs,omitempty"`
+ // persistentVolumeClaimVolumeSource represents a reference to a
+ // PersistentVolumeClaim in the same namespace.
+ // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
PersistentVolumeClaim *PersistentVolumeClaimVolumeSourceApplyConfiguration `json:"persistentVolumeClaim,omitempty"`
- RBD *RBDVolumeSourceApplyConfiguration `json:"rbd,omitempty"`
- FlexVolume *FlexVolumeSourceApplyConfiguration `json:"flexVolume,omitempty"`
- Cinder *CinderVolumeSourceApplyConfiguration `json:"cinder,omitempty"`
- CephFS *CephFSVolumeSourceApplyConfiguration `json:"cephfs,omitempty"`
- Flocker *FlockerVolumeSourceApplyConfiguration `json:"flocker,omitempty"`
- DownwardAPI *DownwardAPIVolumeSourceApplyConfiguration `json:"downwardAPI,omitempty"`
- FC *FCVolumeSourceApplyConfiguration `json:"fc,omitempty"`
- AzureFile *AzureFileVolumeSourceApplyConfiguration `json:"azureFile,omitempty"`
- ConfigMap *ConfigMapVolumeSourceApplyConfiguration `json:"configMap,omitempty"`
- VsphereVolume *VsphereVirtualDiskVolumeSourceApplyConfiguration `json:"vsphereVolume,omitempty"`
- Quobyte *QuobyteVolumeSourceApplyConfiguration `json:"quobyte,omitempty"`
- AzureDisk *AzureDiskVolumeSourceApplyConfiguration `json:"azureDisk,omitempty"`
- PhotonPersistentDisk *PhotonPersistentDiskVolumeSourceApplyConfiguration `json:"photonPersistentDisk,omitempty"`
- Projected *ProjectedVolumeSourceApplyConfiguration `json:"projected,omitempty"`
- PortworxVolume *PortworxVolumeSourceApplyConfiguration `json:"portworxVolume,omitempty"`
- ScaleIO *ScaleIOVolumeSourceApplyConfiguration `json:"scaleIO,omitempty"`
- StorageOS *StorageOSVolumeSourceApplyConfiguration `json:"storageos,omitempty"`
- CSI *CSIVolumeSourceApplyConfiguration `json:"csi,omitempty"`
- Ephemeral *EphemeralVolumeSourceApplyConfiguration `json:"ephemeral,omitempty"`
- Image *ImageVolumeSourceApplyConfiguration `json:"image,omitempty"`
+ // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+ RBD *RBDVolumeSourceApplyConfiguration `json:"rbd,omitempty"`
+ // flexVolume represents a generic volume resource that is
+ // provisioned/attached using an exec based plugin.
+ // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+ FlexVolume *FlexVolumeSourceApplyConfiguration `json:"flexVolume,omitempty"`
+ // cinder represents a cinder volume attached and mounted on kubelets host machine.
+ // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+ // are redirected to the cinder.csi.openstack.org CSI driver.
+ // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ Cinder *CinderVolumeSourceApplyConfiguration `json:"cinder,omitempty"`
+ // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+ // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+ CephFS *CephFSVolumeSourceApplyConfiguration `json:"cephfs,omitempty"`
+ // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+ // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+ Flocker *FlockerVolumeSourceApplyConfiguration `json:"flocker,omitempty"`
+ // downwardAPI represents downward API about the pod that should populate this volume
+ DownwardAPI *DownwardAPIVolumeSourceApplyConfiguration `json:"downwardAPI,omitempty"`
+ // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+ FC *FCVolumeSourceApplyConfiguration `json:"fc,omitempty"`
+ // azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+ // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+ // are redirected to the file.csi.azure.com CSI driver.
+ AzureFile *AzureFileVolumeSourceApplyConfiguration `json:"azureFile,omitempty"`
+ // configMap represents a configMap that should populate this volume
+ ConfigMap *ConfigMapVolumeSourceApplyConfiguration `json:"configMap,omitempty"`
+ // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+ // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+ // are redirected to the csi.vsphere.vmware.com CSI driver.
+ VsphereVolume *VsphereVirtualDiskVolumeSourceApplyConfiguration `json:"vsphereVolume,omitempty"`
+ // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+ // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+ Quobyte *QuobyteVolumeSourceApplyConfiguration `json:"quobyte,omitempty"`
+ // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+ // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+ // are redirected to the disk.csi.azure.com CSI driver.
+ AzureDisk *AzureDiskVolumeSourceApplyConfiguration `json:"azureDisk,omitempty"`
+ // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+ // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+ PhotonPersistentDisk *PhotonPersistentDiskVolumeSourceApplyConfiguration `json:"photonPersistentDisk,omitempty"`
+ // projected items for all in one resources secrets, configmaps, and downward API
+ Projected *ProjectedVolumeSourceApplyConfiguration `json:"projected,omitempty"`
+ // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+ // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+ // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+ // is on.
+ PortworxVolume *PortworxVolumeSourceApplyConfiguration `json:"portworxVolume,omitempty"`
+ // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+ // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+ ScaleIO *ScaleIOVolumeSourceApplyConfiguration `json:"scaleIO,omitempty"`
+ // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+ // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+ StorageOS *StorageOSVolumeSourceApplyConfiguration `json:"storageos,omitempty"`
+ // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
+ CSI *CSIVolumeSourceApplyConfiguration `json:"csi,omitempty"`
+ // ephemeral represents a volume that is handled by a cluster storage driver.
+ // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ // and deleted when the pod is removed.
+ //
+ // Use this if:
+ // a) the volume is only needed while the pod runs,
+ // b) features of normal volumes like restoring from snapshot or capacity
+ // tracking are needed,
+ // c) the storage driver is specified through a storage class, and
+ // d) the storage driver supports dynamic volume provisioning through
+ // a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ // information on the connection between this volume type
+ // and PersistentVolumeClaim).
+ //
+ // Use PersistentVolumeClaim or one of the vendor-specific
+ // APIs for volumes that persist for longer than the lifecycle
+ // of an individual pod.
+ //
+ // Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ // be used that way - see the documentation of the driver for
+ // more information.
+ //
+ // A pod can use both types of ephemeral volumes and
+ // persistent volumes at the same time.
+ Ephemeral *EphemeralVolumeSourceApplyConfiguration `json:"ephemeral,omitempty"`
+ // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+ // The volume is resolved at pod startup depending on which PullPolicy value is provided:
+ //
+ // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+ // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+ // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+ //
+ // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+ // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+ // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+ // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+ // The volume will be mounted read-only (ro) and non-executable files (noexec).
+ // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33.
+ // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+ Image *ImageVolumeSourceApplyConfiguration `json:"image,omitempty"`
}
// VolumeSourceApplyConfiguration constructs a declarative configuration of the VolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go
index ea8fd8d6..f32a4211 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/vspherevirtualdiskvolumesource.go
@@ -20,11 +20,19 @@ package v1
// VsphereVirtualDiskVolumeSourceApplyConfiguration represents a declarative configuration of the VsphereVirtualDiskVolumeSource type for use
// with apply.
+//
+// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSourceApplyConfiguration struct {
- VolumePath *string `json:"volumePath,omitempty"`
- FSType *string `json:"fsType,omitempty"`
+ // volumePath is the path that identifies vSphere volume vmdk
+ VolumePath *string `json:"volumePath,omitempty"`
+ // fsType is filesystem type to mount.
+ // Must be a filesystem type supported by the host operating system.
+ // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ FSType *string `json:"fsType,omitempty"`
+ // storagePolicyName is the storage Policy Based Management (SPBM) profile name.
StoragePolicyName *string `json:"storagePolicyName,omitempty"`
- StoragePolicyID *string `json:"storagePolicyID,omitempty"`
+ // storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+ StoragePolicyID *string `json:"storagePolicyID,omitempty"`
}
// VsphereVirtualDiskVolumeSourceApplyConfiguration constructs a declarative configuration of the VsphereVirtualDiskVolumeSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go
index c49ef93e..dea0cbb7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/weightedpodaffinityterm.go
@@ -20,8 +20,13 @@ package v1
// WeightedPodAffinityTermApplyConfiguration represents a declarative configuration of the WeightedPodAffinityTerm type for use
// with apply.
+//
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTermApplyConfiguration struct {
- Weight *int32 `json:"weight,omitempty"`
+ // weight associated with matching the corresponding podAffinityTerm,
+ // in the range 1-100.
+ Weight *int32 `json:"weight,omitempty"`
+ // Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm *PodAffinityTermApplyConfiguration `json:"podAffinityTerm,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
index bb37a500..8ad2ce99 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
@@ -20,11 +20,25 @@ package v1
// WindowsSecurityContextOptionsApplyConfiguration represents a declarative configuration of the WindowsSecurityContextOptions type for use
// with apply.
+//
+// WindowsSecurityContextOptions contain Windows-specific options and credentials.
type WindowsSecurityContextOptionsApplyConfiguration struct {
+ // GMSACredentialSpecName is the name of the GMSA credential spec to use.
GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"`
- GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty"`
- RunAsUserName *string `json:"runAsUserName,omitempty"`
- HostProcess *bool `json:"hostProcess,omitempty"`
+ // GMSACredentialSpec is where the GMSA admission webhook
+ // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ // GMSA credential spec named by the GMSACredentialSpecName field.
+ GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty"`
+ // The UserName in Windows to run the entrypoint of the container process.
+ // Defaults to the user specified in image metadata if unspecified.
+ // May also be set in PodSecurityContext. If set in both SecurityContext and
+ // PodSecurityContext, the value specified in SecurityContext takes precedence.
+ RunAsUserName *string `json:"runAsUserName,omitempty"`
+ // HostProcess determines if a container should be run as a 'Host Process' container.
+ // All of a Pod's containers must have the same effective HostProcess value
+ // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ // In addition, if HostProcess is true then HostNetwork must also be set to true.
+ HostProcess *bool `json:"hostProcess,omitempty"`
}
// WindowsSecurityContextOptionsApplyConfiguration constructs a declarative configuration of the WindowsSecurityContextOptions type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/workloadreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/workloadreference.go
new file mode 100644
index 00000000..758c4517
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/core/v1/workloadreference.go
@@ -0,0 +1,74 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// WorkloadReferenceApplyConfiguration represents a declarative configuration of the WorkloadReference type for use
+// with apply.
+//
+// WorkloadReference identifies the Workload object and PodGroup membership
+// that a Pod belongs to. The scheduler uses this information to apply
+// workload-aware scheduling semantics.
+type WorkloadReferenceApplyConfiguration struct {
+ // Name defines the name of the Workload object this Pod belongs to.
+ // Workload must be in the same namespace as the Pod.
+ // If it doesn't match any existing Workload, the Pod will remain unschedulable
+ // until a Workload object is created and observed by the kube-scheduler.
+ // It must be a DNS subdomain.
+ Name *string `json:"name,omitempty"`
+ // PodGroup is the name of the PodGroup within the Workload that this Pod
+ // belongs to. If it doesn't match any existing PodGroup within the Workload,
+ // the Pod will remain unschedulable until the Workload object is recreated
+ // and observed by the kube-scheduler. It must be a DNS label.
+ PodGroup *string `json:"podGroup,omitempty"`
+ // PodGroupReplicaKey specifies the replica key of the PodGroup to which this
+ // Pod belongs. It is used to distinguish pods belonging to different replicas
+ // of the same pod group. The pod group policy is applied separately to each replica.
+ // When set, it must be a DNS label.
+ PodGroupReplicaKey *string `json:"podGroupReplicaKey,omitempty"`
+}
+
+// WorkloadReferenceApplyConfiguration constructs a declarative configuration of the WorkloadReference type for use with
+// apply.
+func WorkloadReference() *WorkloadReferenceApplyConfiguration {
+ return &WorkloadReferenceApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *WorkloadReferenceApplyConfiguration) WithName(value string) *WorkloadReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithPodGroup sets the PodGroup field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PodGroup field is set to the value of the last call.
+func (b *WorkloadReferenceApplyConfiguration) WithPodGroup(value string) *WorkloadReferenceApplyConfiguration {
+ b.PodGroup = &value
+ return b
+}
+
+// WithPodGroupReplicaKey sets the PodGroupReplicaKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the PodGroupReplicaKey field is set to the value of the last call.
+func (b *WorkloadReferenceApplyConfiguration) WithPodGroupReplicaKey(value string) *WorkloadReferenceApplyConfiguration {
+ b.PodGroupReplicaKey = &value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go
index df45a6fb..950dc52e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpoint.go
@@ -24,15 +24,42 @@ import (
// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use
// with apply.
+//
+// Endpoint represents a single logical "backend" implementing a service.
type EndpointApplyConfiguration struct {
- Addresses []string `json:"addresses,omitempty"`
- Conditions *EndpointConditionsApplyConfiguration `json:"conditions,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- TargetRef *corev1.ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"`
- DeprecatedTopology map[string]string `json:"deprecatedTopology,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- Zone *string `json:"zone,omitempty"`
- Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"`
+ // addresses of this endpoint. For EndpointSlices of addressType "IPv4" or "IPv6",
+ // the values are IP addresses in canonical form. The syntax and semantics of
+ // other addressType values are not defined. This must contain at least one
+ // address but no more than 100. EndpointSlices generated by the EndpointSlice
+ // controller will always have exactly 1 address. No semantics are defined for
+ // additional addresses beyond the first, and kube-proxy does not look at them.
+ Addresses []string `json:"addresses,omitempty"`
+ // conditions contains information about the current status of the endpoint.
+ Conditions *EndpointConditionsApplyConfiguration `json:"conditions,omitempty"`
+ // hostname of this endpoint. This field may be used by consumers of
+ // endpoints to distinguish endpoints from each other (e.g. in DNS names).
+ // Multiple endpoints which use the same hostname should be considered
+ // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS
+ // Label (RFC 1123) validation.
+ Hostname *string `json:"hostname,omitempty"`
+ // targetRef is a reference to a Kubernetes object that represents this
+ // endpoint.
+ TargetRef *corev1.ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"`
+ // deprecatedTopology contains topology information part of the v1beta1
+ // API. This field is deprecated, and will be removed when the v1beta1
+ // API is removed (no sooner than kubernetes v1.24). While this field can
+ // hold values, it is not writable through the v1 API, and any attempts to
+ // write to it will be silently ignored. Topology information can be found
+ // in the zone and nodeName fields instead.
+ DeprecatedTopology map[string]string `json:"deprecatedTopology,omitempty"`
+ // nodeName represents the name of the Node hosting this endpoint. This can
+ // be used to determine endpoints local to a Node.
+ NodeName *string `json:"nodeName,omitempty"`
+ // zone is the name of the Zone this endpoint exists in.
+ Zone *string `json:"zone,omitempty"`
+ // hints contains information associated with how an endpoint should be
+ // consumed.
+ Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"`
}
// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go
index 20f0b971..abd46414 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointconditions.go
@@ -20,9 +20,24 @@ package v1
// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use
// with apply.
+//
+// EndpointConditions represents the current condition of an endpoint.
type EndpointConditionsApplyConfiguration struct {
- Ready *bool `json:"ready,omitempty"`
- Serving *bool `json:"serving,omitempty"`
+ // ready indicates that this endpoint is ready to receive traffic,
+ // according to whatever system is managing the endpoint. A nil value
+ // should be interpreted as "true". In general, an endpoint should be
+ // marked ready if it is serving and not terminating, though this can
+ // be overridden in some cases, such as when the associated Service has
+ // set the publishNotReadyAddresses flag.
+ Ready *bool `json:"ready,omitempty"`
+ // serving indicates that this endpoint is able to receive traffic,
+ // according to whatever system is managing the endpoint. For endpoints
+ // backed by pods, the EndpointSlice controller will mark the endpoint
+ // as serving if the pod's Ready condition is True. A nil value should be
+ // interpreted as "true".
+ Serving *bool `json:"serving,omitempty"`
+ // terminating indicates that this endpoint is terminating. A nil value
+ // should be interpreted as "false".
Terminating *bool `json:"terminating,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go
index 7afda39b..7b9e5be7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointhints.go
@@ -20,8 +20,14 @@ package v1
// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use
// with apply.
+//
+// EndpointHints provides hints describing how an endpoint should be consumed.
type EndpointHintsApplyConfiguration struct {
+ // forZones indicates the zone(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"`
+ // forNodes indicates the node(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
ForNodes []ForNodeApplyConfiguration `json:"forNodes,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
index b55c868c..7a55f60b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointport.go
@@ -24,11 +24,42 @@ import (
// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
// with apply.
+//
+// EndpointPort represents a Port used by an EndpointSlice
type EndpointPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Protocol *corev1.Protocol `json:"protocol,omitempty"`
- Port *int32 `json:"port,omitempty"`
- AppProtocol *string `json:"appProtocol,omitempty"`
+ // name represents the name of this port. All ports in an EndpointSlice must have a unique name.
+ // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name.
+ // Name must either be an empty string or pass DNS_LABEL validation:
+ // * must be no more than 63 characters long.
+ // * must consist of lower case alphanumeric characters or '-'.
+ // * must start and end with an alphanumeric character.
+ // Default is empty string.
+ Name *string `json:"name,omitempty"`
+ // protocol represents the IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ // port represents the port number of the endpoint.
+ // If the EndpointSlice is derived from a Kubernetes service, this must be set
+ // to the service's target port. EndpointSlices used for other purposes may have
+ // a nil port.
+ Port *int32 `json:"port,omitempty"`
+ // The application protocol for this port.
+ // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+ // This field follows standard Kubernetes label syntax.
+ // Valid values are either:
+ //
+ // * Un-prefixed protocol names - reserved for IANA standard service names (as per
+ // RFC-6335 and https://www.iana.org/assignments/service-names).
+ //
+ // * Kubernetes-defined prefixed names:
+ // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+ // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+ // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+ //
+ // * Other protocols should use implementation-defined prefixed names such as
+ // mycompany.com/my-custom-protocol.
+ AppProtocol *string `json:"appProtocol,omitempty"`
}
// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
index d976ca82..cba77dd5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
@@ -29,12 +29,38 @@ import (
// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use
// with apply.
+//
+// EndpointSlice represents a set of service endpoints. Most EndpointSlices are created by
+// the EndpointSlice controller to represent the Pods selected by Service objects. For a
+// given service there may be multiple EndpointSlice objects which must be joined to
+// produce the full set of endpoints; you can find all of the slices for a given service
+// by listing EndpointSlices in the service's namespace whose `kubernetes.io/service-name`
+// label contains the service's name.
type EndpointSliceApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- AddressType *discoveryv1.AddressType `json:"addressType,omitempty"`
- Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
- Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
+ // addressType specifies the type of address carried by this EndpointSlice.
+ // All addresses in this slice must be the same type. This field is
+ // immutable after creation. The following address types are currently
+ // supported:
+ // * IPv4: Represents an IPv4 Address.
+ // * IPv6: Represents an IPv6 Address.
+ // * FQDN: Represents a Fully Qualified Domain Name. (Deprecated)
+ // The EndpointSlice controller only generates, and kube-proxy only processes,
+ // slices of addressType "IPv4" and "IPv6". No semantics are defined for
+ // the "FQDN" type.
+ AddressType *discoveryv1.AddressType `json:"addressType,omitempty"`
+ // endpoints is a list of unique endpoints in this slice. Each slice may
+ // include a maximum of 1000 endpoints.
+ Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
+ // ports specifies the list of network ports exposed by each endpoint in
+ // this slice. Each port must have a unique name. Each slice may include a
+ // maximum of 100 ports.
+ // Services always have at least 1 port, so EndpointSlices generated by the
+ // EndpointSlice controller will likewise always have at least 1 port.
+ // EndpointSlices used for other purposes may have an empty ports list.
+ Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
}
// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with
@@ -48,29 +74,14 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
return b
}
-// ExtractEndpointSlice extracts the applied configuration owned by fieldManager from
-// endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a
-// EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEndpointSliceFrom extracts the applied configuration owned by fieldManager from
+// endpointSlice for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API.
-// ExtractEndpointSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEndpointSliceFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
- return extractEndpointSlice(endpointSlice, fieldManager, "")
-}
-
-// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEndpointSliceStatus(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
- return extractEndpointSlice(endpointSlice, fieldManager, "status")
-}
-
-func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
+func ExtractEndpointSliceFrom(endpointSlice *discoveryv1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
b := &EndpointSliceApplyConfiguration{}
err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1.EndpointSlice"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +94,21 @@ func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager
b.WithAPIVersion("discovery.k8s.io/v1")
return b, nil
}
+
+// ExtractEndpointSlice extracts the applied configuration owned by fieldManager from
+// endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a
+// EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API.
+// ExtractEndpointSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+ return ExtractEndpointSliceFrom(endpointSlice, fieldManager, "")
+}
+
func (b EndpointSliceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go
index 3b2304d3..5818f70b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/fornode.go
@@ -20,7 +20,10 @@ package v1
// ForNodeApplyConfiguration represents a declarative configuration of the ForNode type for use
// with apply.
+//
+// ForNode provides information about which nodes should consume this endpoint.
type ForNodeApplyConfiguration struct {
+ // name represents the name of the node.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go
index 505d11ae..1d06e1ad 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/forzone.go
@@ -20,7 +20,10 @@ package v1
// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use
// with apply.
+//
+// ForZone provides information about which zones should consume this endpoint.
type ForZoneApplyConfiguration struct {
+ // name represents the name of the zone.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go
index 5d87dae7..4d7b3997 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpoint.go
@@ -24,14 +24,47 @@ import (
// EndpointApplyConfiguration represents a declarative configuration of the Endpoint type for use
// with apply.
+//
+// Endpoint represents a single logical "backend" implementing a service.
type EndpointApplyConfiguration struct {
- Addresses []string `json:"addresses,omitempty"`
+ // addresses of this endpoint. The contents of this field are interpreted
+ // according to the corresponding EndpointSlice addressType field. Consumers
+ // must handle different types of addresses in the context of their own
+ // capabilities. This must contain at least one address but no more than
+ // 100. These are all assumed to be fungible and clients may choose to only
+ // use the first element. Refer to: https://issue.k8s.io/106267
+ Addresses []string `json:"addresses,omitempty"`
+ // conditions contains information about the current status of the endpoint.
Conditions *EndpointConditionsApplyConfiguration `json:"conditions,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- TargetRef *v1.ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"`
- Topology map[string]string `json:"topology,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"`
+ // hostname of this endpoint. This field may be used by consumers of
+ // endpoints to distinguish endpoints from each other (e.g. in DNS names).
+ // Multiple endpoints which use the same hostname should be considered
+ // fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS
+ // Label (RFC 1123) validation.
+ Hostname *string `json:"hostname,omitempty"`
+ // targetRef is a reference to a Kubernetes object that represents this
+ // endpoint.
+ TargetRef *v1.ObjectReferenceApplyConfiguration `json:"targetRef,omitempty"`
+ // topology contains arbitrary topology information associated with the
+ // endpoint. These key/value pairs must conform with the label format.
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ // Topology may include a maximum of 16 key/value pairs. This includes, but
+ // is not limited to the following well known keys:
+ // * kubernetes.io/hostname: the value indicates the hostname of the node
+ // where the endpoint is located. This should match the corresponding
+ // node label.
+ // * topology.kubernetes.io/zone: the value indicates the zone where the
+ // endpoint is located. This should match the corresponding node label.
+ // * topology.kubernetes.io/region: the value indicates the region where the
+ // endpoint is located. This should match the corresponding node label.
+ // This field is deprecated and will be removed in future api versions.
+ Topology map[string]string `json:"topology,omitempty"`
+ // nodeName represents the name of the Node hosting this endpoint. This can
+ // be used to determine endpoints local to a Node.
+ NodeName *string `json:"nodeName,omitempty"`
+ // hints contains information associated with how an endpoint should be
+ // consumed.
+ Hints *EndpointHintsApplyConfiguration `json:"hints,omitempty"`
}
// EndpointApplyConfiguration constructs a declarative configuration of the Endpoint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go
index 13f5fa55..e94b9887 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointconditions.go
@@ -20,9 +20,23 @@ package v1beta1
// EndpointConditionsApplyConfiguration represents a declarative configuration of the EndpointConditions type for use
// with apply.
+//
+// EndpointConditions represents the current condition of an endpoint.
type EndpointConditionsApplyConfiguration struct {
- Ready *bool `json:"ready,omitempty"`
- Serving *bool `json:"serving,omitempty"`
+ // ready indicates that this endpoint is prepared to receive traffic,
+ // according to whatever system is managing the endpoint. A nil value
+ // indicates an unknown state. In most cases consumers should interpret this
+ // unknown state as ready. For compatibility reasons, ready should never be
+ // "true" for terminating endpoints.
+ Ready *bool `json:"ready,omitempty"`
+ // serving is identical to ready except that it is set regardless of the
+ // terminating state of endpoints. This condition should be set to true for
+ // a ready endpoint that is terminating. If nil, consumers should defer to
+ // the ready condition.
+ Serving *bool `json:"serving,omitempty"`
+ // terminating indicates that this endpoint is terminating. A nil value
+ // indicates an unknown state. Consumers should interpret this unknown state
+ // to mean that the endpoint is not terminating.
Terminating *bool `json:"terminating,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go
index 9637f994..0e36451a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointhints.go
@@ -20,8 +20,14 @@ package v1beta1
// EndpointHintsApplyConfiguration represents a declarative configuration of the EndpointHints type for use
// with apply.
+//
+// EndpointHints provides hints describing how an endpoint should be consumed.
type EndpointHintsApplyConfiguration struct {
+ // forZones indicates the zone(s) this endpoint should be consumed by to
+ // enable topology aware routing. May contain a maximum of 8 entries.
ForZones []ForZoneApplyConfiguration `json:"forZones,omitempty"`
+ // forNodes indicates the node(s) this endpoint should be consumed by when
+ // using topology aware routing. May contain a maximum of 8 entries.
ForNodes []ForNodeApplyConfiguration `json:"forNodes,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go
index 07cfc684..22429529 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointport.go
@@ -24,11 +24,32 @@ import (
// EndpointPortApplyConfiguration represents a declarative configuration of the EndpointPort type for use
// with apply.
+//
+// EndpointPort represents a Port used by an EndpointSlice
type EndpointPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- Port *int32 `json:"port,omitempty"`
- AppProtocol *string `json:"appProtocol,omitempty"`
+ // name represents the name of this port. All ports in an EndpointSlice must have a unique name.
+ // If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name.
+ // Name must either be an empty string or pass DNS_LABEL validation:
+ // * must be no more than 63 characters long.
+ // * must consist of lower case alphanumeric characters or '-'.
+ // * must start and end with an alphanumeric character.
+ // Default is empty string.
+ Name *string `json:"name,omitempty"`
+ // protocol represents the IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ Protocol *v1.Protocol `json:"protocol,omitempty"`
+ // port represents the port number of the endpoint.
+ // If this is not specified, ports are not restricted and must be
+ // interpreted in the context of the specific consumer.
+ Port *int32 `json:"port,omitempty"`
+ // appProtocol represents the application protocol for this port.
+ // This field follows standard Kubernetes label syntax.
+ // Un-prefixed names are reserved for IANA standard service names (as per
+ // RFC-6335 and https://www.iana.org/assignments/service-names).
+ // Non-standard protocols should use prefixed names such as
+ // mycompany.com/my-custom-protocol.
+ AppProtocol *string `json:"appProtocol,omitempty"`
}
// EndpointPortApplyConfiguration constructs a declarative configuration of the EndpointPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
index 437cef59..4d40de54 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
@@ -29,12 +29,31 @@ import (
// EndpointSliceApplyConfiguration represents a declarative configuration of the EndpointSlice type for use
// with apply.
+//
+// EndpointSlice represents a subset of the endpoints that implement a service.
+// For a given service there may be multiple EndpointSlice objects, selected by
+// labels, which must be joined to produce the full set of endpoints.
type EndpointSliceApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- AddressType *discoveryv1beta1.AddressType `json:"addressType,omitempty"`
- Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
- Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
+ // addressType specifies the type of address carried by this EndpointSlice.
+ // All addresses in this slice must be the same type. This field is
+ // immutable after creation. The following address types are currently
+ // supported:
+ // * IPv4: Represents an IPv4 Address.
+ // * IPv6: Represents an IPv6 Address.
+ // * FQDN: Represents a Fully Qualified Domain Name.
+ AddressType *discoveryv1beta1.AddressType `json:"addressType,omitempty"`
+ // endpoints is a list of unique endpoints in this slice. Each slice may
+ // include a maximum of 1000 endpoints.
+ Endpoints []EndpointApplyConfiguration `json:"endpoints,omitempty"`
+ // ports specifies the list of network ports exposed by each endpoint in
+ // this slice. Each port must have a unique name. When ports is empty, it
+ // indicates that there are no defined ports. When a port is defined with a
+ // nil port value, it indicates "all ports". Each slice may include a
+ // maximum of 100 ports.
+ Ports []EndpointPortApplyConfiguration `json:"ports,omitempty"`
}
// EndpointSlice constructs a declarative configuration of the EndpointSlice type for use with
@@ -48,29 +67,14 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
return b
}
-// ExtractEndpointSlice extracts the applied configuration owned by fieldManager from
-// endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a
-// EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEndpointSliceFrom extracts the applied configuration owned by fieldManager from
+// endpointSlice for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API.
-// ExtractEndpointSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEndpointSliceFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
- return extractEndpointSlice(endpointSlice, fieldManager, "")
-}
-
-// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEndpointSliceStatus(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
- return extractEndpointSlice(endpointSlice, fieldManager, "status")
-}
-
-func extractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
+func ExtractEndpointSliceFrom(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
b := &EndpointSliceApplyConfiguration{}
err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +87,21 @@ func extractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldMa
b.WithAPIVersion("discovery.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractEndpointSlice extracts the applied configuration owned by fieldManager from
+// endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a
+// EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API.
+// ExtractEndpointSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEndpointSlice(endpointSlice *discoveryv1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+ return ExtractEndpointSliceFrom(endpointSlice, fieldManager, "")
+}
+
func (b EndpointSliceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go
index 79aff881..bb976f2f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/fornode.go
@@ -20,7 +20,10 @@ package v1beta1
// ForNodeApplyConfiguration represents a declarative configuration of the ForNode type for use
// with apply.
+//
+// ForNode provides information about which nodes should consume this endpoint.
type ForNodeApplyConfiguration struct {
+ // name represents the name of the node.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go
index 4af09cc4..6308861c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/forzone.go
@@ -20,7 +20,10 @@ package v1beta1
// ForZoneApplyConfiguration represents a declarative configuration of the ForZone type for use
// with apply.
+//
+// ForZone provides information about which zones should consume this endpoint.
type ForZoneApplyConfiguration struct {
+ // name represents the name of the zone.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
index 391dfc96..122cccad 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
@@ -30,23 +30,57 @@ import (
// EventApplyConfiguration represents a declarative configuration of the Event type for use
// with apply.
+//
+// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.
+// Events have a limited retention time and triggers and messages may evolve
+// with time. Event consumers should not rely on the timing of an event
+// with a given Reason reflecting a consistent underlying trigger, or the
+// continued existence of events with that Reason. Events should be
+// treated as informative, best-effort, supplemental data.
type EventApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"`
- Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
- ReportingController *string `json:"reportingController,omitempty"`
- ReportingInstance *string `json:"reportingInstance,omitempty"`
- Action *string `json:"action,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"`
- Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"`
- Note *string `json:"note,omitempty"`
- Type *string `json:"type,omitempty"`
- DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"`
- DeprecatedFirstTimestamp *apismetav1.Time `json:"deprecatedFirstTimestamp,omitempty"`
- DeprecatedLastTimestamp *apismetav1.Time `json:"deprecatedLastTimestamp,omitempty"`
- DeprecatedCount *int32 `json:"deprecatedCount,omitempty"`
+ // eventTime is the time when this Event was first observed. It is required.
+ EventTime *apismetav1.MicroTime `json:"eventTime,omitempty"`
+ // series is data about the Event series this event represents or nil if it's a singleton Event.
+ Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
+ // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+ // This field cannot be empty for new Events.
+ ReportingController *string `json:"reportingController,omitempty"`
+ // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`.
+ // This field cannot be empty for new Events and it can have at most 128 characters.
+ ReportingInstance *string `json:"reportingInstance,omitempty"`
+ // action is what action was taken/failed regarding to the regarding object. It is machine-readable.
+ // This field cannot be empty for new Events and it can have at most 128 characters.
+ Action *string `json:"action,omitempty"`
+ // reason is why the action was taken. It is human-readable.
+ // This field cannot be empty for new Events and it can have at most 128 characters.
+ Reason *string `json:"reason,omitempty"`
+ // regarding contains the object this Event is about. In most cases it's an Object reporting controller
+ // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because
+ // it acts on some changes in a ReplicaSet object.
+ Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"`
+ // related is the optional secondary object for more complex actions. E.g. when regarding object triggers
+ // a creation or deletion of related object.
+ Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"`
+ // note is a human-readable description of the status of this operation.
+ // Maximal length of the note is 1kB, but libraries should be prepared to
+ // handle values up to 64kB.
+ Note *string `json:"note,omitempty"`
+ // type is the type of this event (Normal, Warning), new types could be added in the future.
+ // It is machine-readable.
+ // This field cannot be empty for new Events.
+ Type *string `json:"type,omitempty"`
+ // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"`
+ // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedFirstTimestamp *apismetav1.Time `json:"deprecatedFirstTimestamp,omitempty"`
+ // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedLastTimestamp *apismetav1.Time `json:"deprecatedLastTimestamp,omitempty"`
+ // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedCount *int32 `json:"deprecatedCount,omitempty"`
}
// Event constructs a declarative configuration of the Event type for use with
@@ -60,29 +94,14 @@ func Event(name, namespace string) *EventApplyConfiguration {
return b
}
-// ExtractEvent extracts the applied configuration owned by fieldManager from
-// event. If no managedFields are found in event for fieldManager, a
-// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEventFrom extracts the applied configuration owned by fieldManager from
+// event for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
-// ExtractEvent provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEventFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEvent(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
- return extractEvent(event, fieldManager, "")
-}
-
-// ExtractEventStatus is the same as ExtractEvent except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEventStatus(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
- return extractEvent(event, fieldManager, "status")
-}
-
-func extractEvent(event *eventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
+func ExtractEventFrom(event *eventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b, subresource)
if err != nil {
@@ -95,6 +114,21 @@ func extractEvent(event *eventsv1.Event, fieldManager string, subresource string
b.WithAPIVersion("events.k8s.io/v1")
return b, nil
}
+
+// ExtractEvent extracts the applied configuration owned by fieldManager from
+// event. If no managedFields are found in event for fieldManager, a
+// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
+// ExtractEvent provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEvent(event *eventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return ExtractEventFrom(event, fieldManager, "")
+}
+
func (b EventApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
index c90954bc..ddacc01e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1/eventseries.go
@@ -24,8 +24,15 @@ import (
// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
// with apply.
+//
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time. How often to update the EventSeries is up to the event reporters.
+// The default event reporter in "k8s.io/client-go/tools/events/event_broadcaster.go" shows
+// how this struct is updated on heartbeats and can guide customized reporter implementations.
type EventSeriesApplyConfiguration struct {
- Count *int32 `json:"count,omitempty"`
+ // count is the number of occurrences in this series up to the last heartbeat time.
+ Count *int32 `json:"count,omitempty"`
+ // lastObservedTime is the time when last Event from the series was seen before last heartbeat.
LastObservedTime *metav1.MicroTime `json:"lastObservedTime,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
index c57af55b..cf6068f4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
@@ -30,23 +30,56 @@ import (
// EventApplyConfiguration represents a declarative configuration of the Event type for use
// with apply.
+//
+// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.
+// Events have a limited retention time and triggers and messages may evolve
+// with time. Event consumers should not rely on the timing of an event
+// with a given Reason reflecting a consistent underlying trigger, or the
+// continued existence of events with that Reason. Events should be
+// treated as informative, best-effort, supplemental data.
type EventApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- EventTime *metav1.MicroTime `json:"eventTime,omitempty"`
- Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
- ReportingController *string `json:"reportingController,omitempty"`
- ReportingInstance *string `json:"reportingInstance,omitempty"`
- Action *string `json:"action,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"`
- Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"`
- Note *string `json:"note,omitempty"`
- Type *string `json:"type,omitempty"`
- DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"`
- DeprecatedFirstTimestamp *metav1.Time `json:"deprecatedFirstTimestamp,omitempty"`
- DeprecatedLastTimestamp *metav1.Time `json:"deprecatedLastTimestamp,omitempty"`
- DeprecatedCount *int32 `json:"deprecatedCount,omitempty"`
+ // eventTime is the time when this Event was first observed. It is required.
+ EventTime *metav1.MicroTime `json:"eventTime,omitempty"`
+ // series is data about the Event series this event represents or nil if it's a singleton Event.
+ Series *EventSeriesApplyConfiguration `json:"series,omitempty"`
+ // reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+ // This field cannot be empty for new Events.
+ ReportingController *string `json:"reportingController,omitempty"`
+ // reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`.
+ // This field cannot be empty for new Events and it can have at most 128 characters.
+ ReportingInstance *string `json:"reportingInstance,omitempty"`
+ // action is what action was taken/failed regarding to the regarding object. It is machine-readable.
+ // This field can have at most 128 characters.
+ Action *string `json:"action,omitempty"`
+ // reason is why the action was taken. It is human-readable.
+ // This field can have at most 128 characters.
+ Reason *string `json:"reason,omitempty"`
+ // regarding contains the object this Event is about. In most cases it's an Object reporting controller
+ // implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because
+ // it acts on some changes in a ReplicaSet object.
+ Regarding *corev1.ObjectReferenceApplyConfiguration `json:"regarding,omitempty"`
+ // related is the optional secondary object for more complex actions. E.g. when regarding object triggers
+ // a creation or deletion of related object.
+ Related *corev1.ObjectReferenceApplyConfiguration `json:"related,omitempty"`
+ // note is a human-readable description of the status of this operation.
+ // Maximal length of the note is 1kB, but libraries should be prepared to
+ // handle values up to 64kB.
+ Note *string `json:"note,omitempty"`
+ // type is the type of this event (Normal, Warning), new types could be added in the future.
+ // It is machine-readable.
+ Type *string `json:"type,omitempty"`
+ // deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedSource *corev1.EventSourceApplyConfiguration `json:"deprecatedSource,omitempty"`
+ // deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedFirstTimestamp *metav1.Time `json:"deprecatedFirstTimestamp,omitempty"`
+ // deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedLastTimestamp *metav1.Time `json:"deprecatedLastTimestamp,omitempty"`
+ // deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.
+ DeprecatedCount *int32 `json:"deprecatedCount,omitempty"`
}
// Event constructs a declarative configuration of the Event type for use with
@@ -60,29 +93,14 @@ func Event(name, namespace string) *EventApplyConfiguration {
return b
}
-// ExtractEvent extracts the applied configuration owned by fieldManager from
-// event. If no managedFields are found in event for fieldManager, a
-// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEventFrom extracts the applied configuration owned by fieldManager from
+// event for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
-// ExtractEvent provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEventFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEvent(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) {
- return extractEvent(event, fieldManager, "")
-}
-
-// ExtractEventStatus is the same as ExtractEvent except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEventStatus(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) {
- return extractEvent(event, fieldManager, "status")
-}
-
-func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
+func ExtractEventFrom(event *eventsv1beta1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1beta1.Event"), fieldManager, b, subresource)
if err != nil {
@@ -95,6 +113,21 @@ func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource s
b.WithAPIVersion("events.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractEvent extracts the applied configuration owned by fieldManager from
+// event. If no managedFields are found in event for fieldManager, a
+// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
+// ExtractEvent provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEvent(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return ExtractEventFrom(event, fieldManager, "")
+}
+
func (b EventApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go
index 75d936e8..fa1edd56 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/eventseries.go
@@ -24,8 +24,13 @@ import (
// EventSeriesApplyConfiguration represents a declarative configuration of the EventSeries type for use
// with apply.
+//
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
type EventSeriesApplyConfiguration struct {
- Count *int32 `json:"count,omitempty"`
+ // count is the number of occurrences in this series up to the last heartbeat time.
+ Count *int32 `json:"count,omitempty"`
+ // lastObservedTime is the time when last Event from the series was seen before last heartbeat.
LastObservedTime *v1.MicroTime `json:"lastObservedTime,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
index 081b00d0..452f9b1c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
@@ -29,11 +29,24 @@ import (
// DaemonSetApplyConfiguration represents a declarative configuration of the DaemonSet type for use
// with apply.
+//
+// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for
+// more information.
+// DaemonSet represents the configuration of a daemon set.
type DaemonSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
+ // The desired behavior of this daemon set.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *DaemonSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // The current status of this daemon set. This data may be
+ // out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *DaemonSetStatusApplyConfiguration `json:"status,omitempty"`
}
// DaemonSet constructs a declarative configuration of the DaemonSet type for use with
@@ -47,6 +60,27 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
return b
}
+// ExtractDaemonSetFrom extracts the applied configuration owned by fieldManager from
+// daemonSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API.
+// ExtractDaemonSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDaemonSetFrom(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
+ b := &DaemonSetApplyConfiguration{}
+ err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.DaemonSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(daemonSet.Name)
+ b.WithNamespace(daemonSet.Namespace)
+
+ b.WithKind("DaemonSet")
+ b.WithAPIVersion("extensions/v1beta1")
+ return b, nil
+}
+
// ExtractDaemonSet extracts the applied configuration owned by fieldManager from
// daemonSet. If no managedFields are found in daemonSet for fieldManager, a
// DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,16 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// ExtractDaemonSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
- return extractDaemonSet(daemonSet, fieldManager, "")
+ return ExtractDaemonSetFrom(daemonSet, fieldManager, "")
}
-// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractDaemonSetStatus extracts the applied configuration owned by fieldManager from
+// daemonSet for the status subresource.
func ExtractDaemonSetStatus(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
- return extractDaemonSet(daemonSet, fieldManager, "status")
+ return ExtractDaemonSetFrom(daemonSet, fieldManager, "status")
}
-func extractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
- b := &DaemonSetApplyConfiguration{}
- err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.DaemonSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(daemonSet.Name)
- b.WithNamespace(daemonSet.Namespace)
-
- b.WithKind("DaemonSet")
- b.WithAPIVersion("extensions/v1beta1")
- return b, nil
-}
func (b DaemonSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
index 0312a309..971c6847 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetcondition.go
@@ -26,12 +26,20 @@ import (
// DaemonSetConditionApplyConfiguration represents a declarative configuration of the DaemonSetCondition type for use
// with apply.
+//
+// TODO: Add valid condition types of a DaemonSet.
+// DaemonSetCondition describes the state of a DaemonSet at a certain point.
type DaemonSetConditionApplyConfiguration struct {
- Type *extensionsv1beta1.DaemonSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of DaemonSet condition.
+ Type *extensionsv1beta1.DaemonSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DaemonSetConditionApplyConfiguration constructs a declarative configuration of the DaemonSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go
index d6289691..eddbbe41 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetspec.go
@@ -25,13 +25,35 @@ import (
// DaemonSetSpecApplyConfiguration represents a declarative configuration of the DaemonSetSpec type for use
// with apply.
+//
+// DaemonSetSpec is the specification of a daemon set.
type DaemonSetSpecApplyConfiguration struct {
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- TemplateGeneration *int64 `json:"templateGeneration,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // A label query over pods that are managed by the daemon set.
+ // Must match in order to be controlled.
+ // If empty, defaulted to labels on Pod template.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // An object that describes the pod that will be created.
+ // The DaemonSet will create exactly one copy of this pod on every node
+ // that matches the template's node selector (or on every node if no node
+ // selector is specified).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // An update strategy to replace existing DaemonSet pods with new pods.
+ UpdateStrategy *DaemonSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
+ // The minimum number of seconds for which a newly created DaemonSet pod should
+ // be ready without any of its container crashing, for it to be considered
+ // available. Defaults to 0 (pod will be considered available as soon as it
+ // is ready).
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // DEPRECATED.
+ // A sequence number representing a specific generation of the template.
+ // Populated by the system. It can be set only during the creation.
+ TemplateGeneration *int64 `json:"templateGeneration,omitempty"`
+ // The number of old history to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // Defaults to 10.
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
}
// DaemonSetSpecApplyConfiguration constructs a declarative configuration of the DaemonSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go
index 373f9ef9..1803d3b7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetstatus.go
@@ -20,17 +20,42 @@ package v1beta1
// DaemonSetStatusApplyConfiguration represents a declarative configuration of the DaemonSetStatus type for use
// with apply.
+//
+// DaemonSetStatus represents the current status of a daemon set.
type DaemonSetStatusApplyConfiguration struct {
- CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"`
- NumberMisscheduled *int32 `json:"numberMisscheduled,omitempty"`
- DesiredNumberScheduled *int32 `json:"desiredNumberScheduled,omitempty"`
- NumberReady *int32 `json:"numberReady,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- UpdatedNumberScheduled *int32 `json:"updatedNumberScheduled,omitempty"`
- NumberAvailable *int32 `json:"numberAvailable,omitempty"`
- NumberUnavailable *int32 `json:"numberUnavailable,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
- Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // The number of nodes that are running at least 1
+ // daemon pod and are supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ CurrentNumberScheduled *int32 `json:"currentNumberScheduled,omitempty"`
+ // The number of nodes that are running the daemon pod, but are
+ // not supposed to run the daemon pod.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ NumberMisscheduled *int32 `json:"numberMisscheduled,omitempty"`
+ // The total number of nodes that should be running the daemon
+ // pod (including nodes correctly running the daemon pod).
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/
+ DesiredNumberScheduled *int32 `json:"desiredNumberScheduled,omitempty"`
+ // The number of nodes that should be running the daemon pod and have one
+ // or more of the daemon pod running and ready.
+ NumberReady *int32 `json:"numberReady,omitempty"`
+ // The most recent generation observed by the daemon set controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // The total number of nodes that are running updated daemon pod
+ UpdatedNumberScheduled *int32 `json:"updatedNumberScheduled,omitempty"`
+ // The number of nodes that should be running the
+ // daemon pod and have one or more of the daemon pod running and
+ // available (ready for at least spec.minReadySeconds)
+ NumberAvailable *int32 `json:"numberAvailable,omitempty"`
+ // The number of nodes that should be running the
+ // daemon pod and have none of the daemon pod running and available
+ // (ready for at least spec.minReadySeconds)
+ NumberUnavailable *int32 `json:"numberUnavailable,omitempty"`
+ // Count of hash collisions for the DaemonSet. The DaemonSet controller
+ // uses this field as a collision avoidance mechanism when it needs to
+ // create the name for the newest ControllerRevision.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // Represents the latest available observations of a DaemonSet's current state.
+ Conditions []DaemonSetConditionApplyConfiguration `json:"conditions,omitempty"`
}
// DaemonSetStatusApplyConfiguration constructs a declarative configuration of the DaemonSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
index d3403605..4dec8afe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonsetupdatestrategy.go
@@ -24,9 +24,20 @@ import (
// DaemonSetUpdateStrategyApplyConfiguration represents a declarative configuration of the DaemonSetUpdateStrategy type for use
// with apply.
+//
+// DaemonSetUpdateStrategy indicates the strategy that the DaemonSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
type DaemonSetUpdateStrategyApplyConfiguration struct {
- Type *extensionsv1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
- RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
+ // Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
+ // Default is OnDelete.
+ Type *extensionsv1beta1.DaemonSetUpdateStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if type = "RollingUpdate".
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be. Same as Deployment `strategy.rollingUpdate`.
+ // See https://github.com/kubernetes/kubernetes/issues/35345
+ RollingUpdate *RollingUpdateDaemonSetApplyConfiguration `json:"rollingUpdate,omitempty"`
}
// DaemonSetUpdateStrategyApplyConfiguration constructs a declarative configuration of the DaemonSetUpdateStrategy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
index d9351479..6d0e5f89 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
@@ -29,11 +29,18 @@ import (
// DeploymentApplyConfiguration represents a declarative configuration of the Deployment type for use
// with apply.
+//
+// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for
+// more information.
+// Deployment enables declarative updates for Pods and ReplicaSets.
type DeploymentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the Deployment.
+ Spec *DeploymentSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the Deployment.
+ Status *DeploymentStatusApplyConfiguration `json:"status,omitempty"`
}
// Deployment constructs a declarative configuration of the Deployment type for use with
@@ -47,6 +54,27 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
return b
}
+// ExtractDeploymentFrom extracts the applied configuration owned by fieldManager from
+// deployment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
+// ExtractDeploymentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeploymentFrom(deployment *extensionsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
+ b := &DeploymentApplyConfiguration{}
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Deployment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(deployment.Name)
+ b.WithNamespace(deployment.Namespace)
+
+ b.WithKind("Deployment")
+ b.WithAPIVersion("extensions/v1beta1")
+ return b, nil
+}
+
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +85,22 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "")
+ return ExtractDeploymentFrom(deployment, fieldManager, "")
}
-// ExtractDeploymentStatus is the same as ExtractDeployment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractDeploymentStatus(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
- return extractDeployment(deployment, fieldManager, "status")
+// ExtractDeploymentScale extracts the applied configuration owned by fieldManager from
+// deployment for the scale subresource.
+func ExtractDeploymentScale(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return ExtractDeploymentFrom(deployment, fieldManager, "scale")
}
-func extractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
- b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Deployment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(deployment.Name)
- b.WithNamespace(deployment.Namespace)
-
- b.WithKind("Deployment")
- b.WithAPIVersion("extensions/v1beta1")
- return b, nil
+// ExtractDeploymentStatus extracts the applied configuration owned by fieldManager from
+// deployment for the status subresource.
+func ExtractDeploymentStatus(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return ExtractDeploymentFrom(deployment, fieldManager, "status")
}
+
func (b DeploymentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
index 2b64508d..c423e756 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentcondition.go
@@ -26,13 +26,21 @@ import (
// DeploymentConditionApplyConfiguration represents a declarative configuration of the DeploymentCondition type for use
// with apply.
+//
+// DeploymentCondition describes the state of a deployment at a certain point.
type DeploymentConditionApplyConfiguration struct {
- Type *extensionsv1beta1.DeploymentConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of deployment condition.
+ Type *extensionsv1beta1.DeploymentConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // The last time this condition was updated.
+ LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
+ // Last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// DeploymentConditionApplyConfiguration constructs a declarative configuration of the DeploymentCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go
index 5531c756..12f3e2c7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentspec.go
@@ -25,16 +25,41 @@ import (
// DeploymentSpecApplyConfiguration represents a declarative configuration of the DeploymentSpec type for use
// with apply.
+//
+// DeploymentSpec is the specification of the desired behavior of the Deployment.
type DeploymentSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
- Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
- Paused *bool `json:"paused,omitempty"`
- RollbackTo *RollbackConfigApplyConfiguration `json:"rollbackTo,omitempty"`
- ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
+ // Number of desired pods. This is a pointer to distinguish between explicit
+ // zero and not specified. Defaults to 1.
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Label selector for pods. Existing ReplicaSets whose pods are
+ // selected by this will be the ones affected by this deployment.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template describes the pods that will be created.
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // The deployment strategy to use to replace existing pods with new ones.
+ Strategy *DeploymentStrategyApplyConfiguration `json:"strategy,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // The number of old ReplicaSets to retain to allow rollback.
+ // This is a pointer to distinguish between explicit zero and not specified.
+ // This is set to the max value of int32 (i.e. 2147483647) by default, which
+ // means "retaining all old ReplicaSets".
+ RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ // Indicates that the deployment is paused and will not be processed by the
+ // deployment controller.
+ Paused *bool `json:"paused,omitempty"`
+ // DEPRECATED.
+ // The config this deployment is rolling back to. Will be cleared after rollback is done.
+ RollbackTo *RollbackConfigApplyConfiguration `json:"rollbackTo,omitempty"`
+ // The maximum time in seconds for a deployment to make progress before it
+ // is considered to be failed. The deployment controller will continue to
+ // process failed deployments and a condition with a ProgressDeadlineExceeded
+ // reason will be surfaced in the deployment status. Note that progress will
+ // not be estimated during the time a deployment is paused. This is set to
+ // the max value of int32 (i.e. 2147483647) by default, which means "no deadline".
+ ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty"`
}
// DeploymentSpecApplyConfiguration constructs a declarative configuration of the DeploymentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go
index 36b4fd42..e58b08ed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstatus.go
@@ -20,16 +20,34 @@ package v1beta1
// DeploymentStatusApplyConfiguration represents a declarative configuration of the DeploymentStatus type for use
// with apply.
+//
+// DeploymentStatus is the most recently observed status of the Deployment.
type DeploymentStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Replicas *int32 `json:"replicas,omitempty"`
- UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
- CollisionCount *int32 `json:"collisionCount,omitempty"`
+ // The generation observed by the deployment controller.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment (their labels match the selector).
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Total number of non-terminating pods targeted by this deployment that have the desired template spec.
+ UpdatedReplicas *int32 `json:"updatedReplicas,omitempty"`
+ // Total number of non-terminating pods targeted by this Deployment with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // Total number of available non-terminating pods (ready for at least minReadySeconds) targeted by this deployment.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // Total number of unavailable pods targeted by this deployment. This is the total number of
+ // pods that are still required for the deployment to have 100% available capacity. They may
+ // either be pods that are running but not yet available or pods that still have not been created.
+ UnavailableReplicas *int32 `json:"unavailableReplicas,omitempty"`
+ // Total number of terminating pods targeted by this deployment. Terminating pods have a non-null
+ // .metadata.deletionTimestamp and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // Represents the latest available observations of a deployment's current state.
+ Conditions []DeploymentConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Count of hash collisions for the Deployment. The Deployment controller uses this
+ // field as a collision avoidance mechanism when it needs to create the name for the
+ // newest ReplicaSet.
+ CollisionCount *int32 `json:"collisionCount,omitempty"`
}
// DeploymentStatusApplyConfiguration constructs a declarative configuration of the DeploymentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
index b142b0de..4c368f33 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deploymentstrategy.go
@@ -24,8 +24,16 @@ import (
// DeploymentStrategyApplyConfiguration represents a declarative configuration of the DeploymentStrategy type for use
// with apply.
+//
+// DeploymentStrategy describes how to replace existing pods with new ones.
type DeploymentStrategyApplyConfiguration struct {
- Type *extensionsv1beta1.DeploymentStrategyType `json:"type,omitempty"`
+ // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
+ Type *extensionsv1beta1.DeploymentStrategyType `json:"type,omitempty"`
+ // Rolling update config params. Present only if DeploymentStrategyType =
+ // RollingUpdate.
+ // ---
+ // TODO: Update this to follow our convention for oneOf, whatever we decide it
+ // to be.
RollingUpdate *RollingUpdateDeploymentApplyConfiguration `json:"rollingUpdate,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
index 32e0c8b1..1a84aebc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingresspath.go
@@ -24,10 +24,34 @@ import (
// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
// with apply.
+//
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
type HTTPIngressPathApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
- PathType *extensionsv1beta1.PathType `json:"pathType,omitempty"`
- Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
+ // Path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
+ // all paths from incoming requests are matched.
+ Path *string `json:"path,omitempty"`
+ // PathType determines the interpretation of the Path matching. PathType can
+ // be one of the following values:
+ // * Exact: Matches the URL path exactly.
+ // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+ // done on a path element by element basis. A path element refers is the
+ // list of labels in the path split by the '/' separator. A request is a
+ // match for path p if every p is an element-wise prefix of p of the
+ // request path. Note that if the last element of the path is a substring
+ // of the last element in request path, it is not a match (e.g. /foo/bar
+ // matches /foo/bar/baz, but does not match /foo/barbaz).
+ // * ImplementationSpecific: Interpretation of the Path matching is up to
+ // the IngressClass. Implementations can treat this as a separate PathType
+ // or treat it identically to Prefix or Exact path types.
+ // Implementations are required to support all path types.
+ // Defaults to ImplementationSpecific.
+ PathType *extensionsv1beta1.PathType `json:"pathType,omitempty"`
+ // Backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
}
// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go
index 12454522..8c8db0c3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/httpingressrulevalue.go
@@ -20,7 +20,14 @@ package v1beta1
// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use
// with apply.
+//
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http:///? -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
type HTTPIngressRuleValueApplyConfiguration struct {
+ // A collection of paths that map requests to backends.
Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
index 6c792531..ba82ba9b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
@@ -29,11 +29,23 @@ import (
// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
// with apply.
+//
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.
type IngressApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
- Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec is the desired state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the current state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
}
// Ingress constructs a declarative configuration of the Ingress type for use with
@@ -47,6 +59,27 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
return b
}
+// ExtractIngressFrom extracts the applied configuration owned by fieldManager from
+// ingress for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
+// ExtractIngressFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIngressFrom(ingress *extensionsv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
+ b := &IngressApplyConfiguration{}
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Ingress"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(ingress.Name)
+ b.WithNamespace(ingress.Namespace)
+
+ b.WithKind("Ingress")
+ b.WithAPIVersion("extensions/v1beta1")
+ return b, nil
+}
+
// ExtractIngress extracts the applied configuration owned by fieldManager from
// ingress. If no managedFields are found in ingress for fieldManager, a
// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +90,16 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// ExtractIngress provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
- return extractIngress(ingress, fieldManager, "")
+ return ExtractIngressFrom(ingress, fieldManager, "")
}
-// ExtractIngressStatus is the same as ExtractIngress except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractIngressStatus extracts the applied configuration owned by fieldManager from
+// ingress for the status subresource.
func ExtractIngressStatus(ingress *extensionsv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
- return extractIngress(ingress, fieldManager, "status")
+ return ExtractIngressFrom(ingress, fieldManager, "status")
}
-func extractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
- b := &IngressApplyConfiguration{}
- err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Ingress"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(ingress.Name)
- b.WithNamespace(ingress.Namespace)
-
- b.WithKind("Ingress")
- b.WithAPIVersion("extensions/v1beta1")
- return b, nil
-}
func (b IngressApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go
index 9d386f16..7c84833a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressbackend.go
@@ -25,10 +25,17 @@ import (
// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use
// with apply.
+//
+// IngressBackend describes all endpoints for a given service and port.
type IngressBackendApplyConfiguration struct {
- ServiceName *string `json:"serviceName,omitempty"`
- ServicePort *intstr.IntOrString `json:"servicePort,omitempty"`
- Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
+ // Specifies the name of the referenced service.
+ ServiceName *string `json:"serviceName,omitempty"`
+ // Specifies the port of the referenced service.
+ ServicePort *intstr.IntOrString `json:"servicePort,omitempty"`
+ // Resource is an ObjectRef to another Kubernetes resource in the namespace
+ // of the Ingress object. If resource is specified, serviceName and servicePort
+ // must not be specified.
+ Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
}
// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go
index 12dbc359..6b1d3cd8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalanceringress.go
@@ -20,10 +20,15 @@ package v1beta1
// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use
// with apply.
+//
+// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngressApplyConfiguration struct {
- IP *string `json:"ip,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
+ // IP is set for load-balancer ingress points that are IP based.
+ IP *string `json:"ip,omitempty"`
+ // Hostname is set for load-balancer ingress points that are DNS based.
+ Hostname *string `json:"hostname,omitempty"`
+ // Ports provides information about the ports exposed by this LoadBalancer.
+ Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
}
// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go
index e896ab34..a71887ad 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressloadbalancerstatus.go
@@ -20,7 +20,10 @@ package v1beta1
// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use
// with apply.
+//
+// LoadBalancerStatus represents the status of a load-balancer.
type IngressLoadBalancerStatusApplyConfiguration struct {
+ // Ingress is a list containing ingress points for the load-balancer.
Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go
index 4ee3f016..019c29bc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressportstatus.go
@@ -24,10 +24,23 @@ import (
// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
// with apply.
+//
+// IngressPortStatus represents the error condition of a service port
type IngressPortStatusApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
+ // Port is the port number of the ingress port.
+ Port *int32 `json:"port,omitempty"`
+ // Protocol is the protocol of the ingress port.
+ // The supported values are: "TCP", "UDP", "SCTP"
Protocol *v1.Protocol `json:"protocol,omitempty"`
- Error *string `json:"error,omitempty"`
+ // Error is to record the problem with the service port
+ // The format of the error shall comply with the following rules:
+ // - built-in error values shall be specified in this file and those shall use
+ // CamelCase names
+ // - cloud provider specific error values must have names that comply with the
+ // format foo.example.com/CamelCase.
+ // ---
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ Error *string `json:"error,omitempty"`
}
// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
index 809fada9..91ebcf89 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrule.go
@@ -20,8 +20,39 @@ package v1beta1
// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use
// with apply.
+//
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
type IngressRuleApplyConfiguration struct {
- Host *string `json:"host,omitempty"`
+ // Host is the fully qualified domain name of a network host, as defined by RFC 3986.
+ // Note the following deviations from the "host" part of the
+ // URI as defined in RFC 3986:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+ // the IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the
+ // IngressRuleValue. If the host is unspecified, the Ingress routes all
+ // traffic based on the specified IngressRuleValue.
+ //
+ // Host can be "precise" which is a domain name without the terminating dot of
+ // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+ // prefixed with a single wildcard label (e.g. "*.foo.com").
+ // The wildcard character '*' must appear by itself as the first DNS label and
+ // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+ // Requests will be matched against the Host field in the following way:
+ // 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
+ // 2. If Host is a wildcard, then the request matches this rule if the http host header
+ // is to equal to the suffix (removing the first label) of the wildcard rule.
+ Host *string `json:"host,omitempty"`
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
IngressRuleValueApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go
index 4a641247..1ace1322 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressrulevalue.go
@@ -20,7 +20,18 @@ package v1beta1
// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use
// with apply.
+//
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
type IngressRuleValueApplyConfiguration struct {
+ // http is a list of http selectors pointing to backends.
+ // A path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/'.
+ // A backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go
index 58fbde8b..1b01da93 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressspec.go
@@ -20,11 +20,34 @@ package v1beta1
// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use
// with apply.
+//
+// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpecApplyConfiguration struct {
- IngressClassName *string `json:"ingressClassName,omitempty"`
- Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
- TLS []IngressTLSApplyConfiguration `json:"tls,omitempty"`
- Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"`
+ // IngressClassName is the name of the IngressClass cluster resource. The
+ // associated IngressClass defines which controller will implement the
+ // resource. This replaces the deprecated `kubernetes.io/ingress.class`
+ // annotation. For backwards compatibility, when that annotation is set, it
+ // must be given precedence over this field. The controller may emit a
+ // warning if the field and annotation have different values.
+ // Implementations of this API should ignore Ingresses without a class
+ // specified. An IngressClass resource may be marked as default, which can
+ // be used to set a default value for this field. For more information,
+ // refer to the IngressClass documentation.
+ IngressClassName *string `json:"ingressClassName,omitempty"`
+ // A default backend capable of servicing requests that don't match any
+ // rule. At least one of 'backend' or 'rules' must be specified. This field
+ // is optional to allow the loadbalancer controller or defaulting logic to
+ // specify a global default.
+ Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
+ // TLS configuration. Currently the Ingress only supports a single TLS
+ // port, 443. If multiple members of this list specify different hosts, they
+ // will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ TLS []IngressTLSApplyConfiguration `json:"tls,omitempty"`
+ // A list of host rules used to configure the Ingress. If unspecified, or
+ // no rule matches, all traffic is sent to the default backend.
+ Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"`
}
// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go
index 3aed6168..1374e016 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingressstatus.go
@@ -20,7 +20,10 @@ package v1beta1
// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use
// with apply.
+//
+// IngressStatus describe the current state of the Ingress.
type IngressStatusApplyConfiguration struct {
+ // LoadBalancer contains the current status of the load-balancer.
LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go
index 63648cd4..87e6315b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingresstls.go
@@ -20,9 +20,20 @@ package v1beta1
// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use
// with apply.
+//
+// IngressTLS describes the transport layer security associated with an Ingress.
type IngressTLSApplyConfiguration struct {
- Hosts []string `json:"hosts,omitempty"`
- SecretName *string `json:"secretName,omitempty"`
+ // Hosts are a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ Hosts []string `json:"hosts,omitempty"`
+ // SecretName is the name of the secret used to terminate SSL traffic on 443.
+ // Field is left optional to allow SSL routing based on SNI hostname alone.
+ // If the SNI host in a listener conflicts with the "Host" header field used
+ // by an IngressRule, the SNI host is used for termination and value of the
+ // Host header is used for routing.
+ SecretName *string `json:"secretName,omitempty"`
}
// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go
index 4a671130..903a870d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ipblock.go
@@ -20,8 +20,18 @@ package v1beta1
// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
type IPBlockApplyConfiguration struct {
- CIDR *string `json:"cidr,omitempty"`
+ // CIDR is a string representing the IP Block
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ CIDR *string `json:"cidr,omitempty"`
+ // Except is a slice of CIDRs that should not be included within an IP Block
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ // Except values will be rejected if they are outside the CIDR range
Except []string `json:"except,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
index e1f0aad8..64d95e49 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
@@ -29,10 +29,16 @@ import (
// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy.
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
type NetworkPolicyApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // Specification of the desired behavior for this NetworkPolicy.
+ Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with
@@ -46,29 +52,14 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
return b
}
-// ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from
-// networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a
-// NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractNetworkPolicyFrom extracts the applied configuration owned by fieldManager from
+// networkPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API.
-// ExtractNetworkPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractNetworkPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
- return extractNetworkPolicy(networkPolicy, fieldManager, "")
-}
-
-// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractNetworkPolicyStatus(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
- return extractNetworkPolicy(networkPolicy, fieldManager, "status")
-}
-
-func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
+func ExtractNetworkPolicyFrom(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
b := &NetworkPolicyApplyConfiguration{}
err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.NetworkPolicy"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldM
b.WithAPIVersion("extensions/v1beta1")
return b, nil
}
+
+// ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from
+// networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a
+// NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API.
+// ExtractNetworkPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+ return ExtractNetworkPolicyFrom(networkPolicy, fieldManager, "")
+}
+
func (b NetworkPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go
index ca3e174f..d9812fc2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyegressrule.go
@@ -20,9 +20,24 @@ package v1beta1
// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule.
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
type NetworkPolicyEgressRuleApplyConfiguration struct {
+ // List of destination ports for outgoing traffic.
+ // Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
- To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"`
+ // List of destinations for outgoing traffic of pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all destinations (traffic not restricted by
+ // destination). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the to list.
+ To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"`
}
// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go
index 16071372..c52d5116 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyingressrule.go
@@ -20,9 +20,22 @@ package v1beta1
// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule.
+// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.
type NetworkPolicyIngressRuleApplyConfiguration struct {
+ // List of ports which should be made accessible on the pods selected for this rule.
+ // Each item in this list is combined using a logical OR.
+ // If this field is empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows traffic
+ // only if the traffic matches at least one port in the list.
Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
- From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"`
+ // List of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation.
+ // If this field is empty or missing, this rule matches all sources (traffic not restricted by source).
+ // If this field is present and contains at least one item, this rule allows traffic only if the
+ // traffic matches at least one item in the from list.
+ From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"`
}
// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go
index 8a0fa574..153095e8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicypeer.go
@@ -24,10 +24,26 @@ import (
// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.
type NetworkPolicyPeerApplyConfiguration struct {
- PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ // This is a label selector which selects Pods. This field follows standard label
+ // selector semantics; if present but empty, it selects all pods.
+ //
+ // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+ // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
+ PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ // Selects Namespaces using cluster-scoped labels. This field follows standard label
+ // selector semantics; if present but empty, it selects all namespaces.
+ //
+ // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
+ // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"`
+ // IPBlock defines policy on a particular IPBlock. If this field is set then
+ // neither of the other fields can be.
+ IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"`
}
// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go
index 6bc1c197..94ea3bdd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyport.go
@@ -25,10 +25,22 @@ import (
// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.
type NetworkPolicyPortApplyConfiguration struct {
- Protocol *v1.Protocol `json:"protocol,omitempty"`
- Port *intstr.IntOrString `json:"port,omitempty"`
- EndPort *int32 `json:"endPort,omitempty"`
+ // Optional. The protocol (TCP, UDP, or SCTP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ Protocol *v1.Protocol `json:"protocol,omitempty"`
+ // The port on the given protocol. This can either be a numerical or named
+ // port on a pod. If this field is not provided, this matches all port names and
+ // numbers.
+ // If present, only traffic on the specified protocol AND port will be matched.
+ Port *intstr.IntOrString `json:"port,omitempty"`
+ // If set, indicates that the range of ports from port to endPort, inclusive,
+ // should be allowed by the policy. This field cannot be defined if the port field
+ // is not defined or if the port field is defined as a named (string) port.
+ // The endPort must be equal or greater than port.
+ EndPort *int32 `json:"endPort,omitempty"`
}
// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go
index 4454329c..5785b82e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicyspec.go
@@ -25,11 +25,42 @@ import (
// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use
// with apply.
+//
+// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.
type NetworkPolicySpecApplyConfiguration struct {
- PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
- Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"`
- Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"`
- PolicyTypes []extensionsv1beta1.PolicyType `json:"policyTypes,omitempty"`
+ // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules
+ // is applied to any pods selected by this field. Multiple network policies can select the
+ // same set of pods. In this case, the ingress rules for each are combined additively.
+ // This field is NOT optional and follows standard label selector semantics.
+ // An empty podSelector matches all pods in this namespace.
+ PodSelector *v1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ // List of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
+ // OR if the traffic source is the pod's local node,
+ // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy
+ // objects whose podSelector matches the pod.
+ // If this field is empty then this NetworkPolicy does not allow any traffic
+ // (and serves solely to ensure that the pods it selects are isolated by default).
+ Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"`
+ // List of egress rules to be applied to the selected pods. Outgoing traffic is
+ // allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+ // otherwise allows the traffic), OR if the traffic matches at least one egress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default).
+ // This field is beta-level in 1.8
+ Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"`
+ // List of rule types that the NetworkPolicy relates to.
+ // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
+ // If this field is not specified, it will default based on the existence of Ingress or Egress rules;
+ // policies that contain an Egress section are assumed to affect Egress, and all policies
+ // (whether or not they contain an Ingress section) are assumed to affect Ingress.
+ // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+ // Likewise, if you want to write a policy that specifies that no egress is allowed,
+ // you must specify a policyTypes value that include "Egress" (since such a policy would not include
+ // an Egress section and would otherwise default to just [ "Ingress" ]).
+ // This field is beta-level in 1.8
+ PolicyTypes []extensionsv1beta1.PolicyType `json:"policyTypes,omitempty"`
}
// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
index dbe787b5..02d6cbff 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
@@ -29,11 +29,25 @@ import (
// ReplicaSetApplyConfiguration represents a declarative configuration of the ReplicaSet type for use
// with apply.
+//
+// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for
+// more information.
+// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
type ReplicaSetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // If the Labels of a ReplicaSet are empty, they are defaulted to
+ // be the same as the Pod(s) that the ReplicaSet manages.
+ // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec defines the specification of the desired behavior of the ReplicaSet.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ReplicaSetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is the most recently observed status of the ReplicaSet.
+ // This data may be out of date by some window of time.
+ // Populated by the system.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ReplicaSetStatusApplyConfiguration `json:"status,omitempty"`
}
// ReplicaSet constructs a declarative configuration of the ReplicaSet type for use with
@@ -47,6 +61,27 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
return b
}
+// ExtractReplicaSetFrom extracts the applied configuration owned by fieldManager from
+// replicaSet for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API.
+// ExtractReplicaSetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractReplicaSetFrom(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
+ b := &ReplicaSetApplyConfiguration{}
+ err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.ReplicaSet"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(replicaSet.Name)
+ b.WithNamespace(replicaSet.Namespace)
+
+ b.WithKind("ReplicaSet")
+ b.WithAPIVersion("extensions/v1beta1")
+ return b, nil
+}
+
// ExtractReplicaSet extracts the applied configuration owned by fieldManager from
// replicaSet. If no managedFields are found in replicaSet for fieldManager, a
// ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +92,22 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// ExtractReplicaSet provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
- return extractReplicaSet(replicaSet, fieldManager, "")
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "")
}
-// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractReplicaSetStatus(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
- return extractReplicaSet(replicaSet, fieldManager, "status")
+// ExtractReplicaSetScale extracts the applied configuration owned by fieldManager from
+// replicaSet for the scale subresource.
+func ExtractReplicaSetScale(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "scale")
}
-func extractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
- b := &ReplicaSetApplyConfiguration{}
- err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.ReplicaSet"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(replicaSet.Name)
- b.WithNamespace(replicaSet.Namespace)
-
- b.WithKind("ReplicaSet")
- b.WithAPIVersion("extensions/v1beta1")
- return b, nil
+// ExtractReplicaSetStatus extracts the applied configuration owned by fieldManager from
+// replicaSet for the status subresource.
+func ExtractReplicaSetStatus(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return ExtractReplicaSetFrom(replicaSet, fieldManager, "status")
}
+
func (b ReplicaSetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
index 540079fe..e8100ce0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetcondition.go
@@ -26,12 +26,19 @@ import (
// ReplicaSetConditionApplyConfiguration represents a declarative configuration of the ReplicaSetCondition type for use
// with apply.
+//
+// ReplicaSetCondition describes the state of a replica set at a certain point.
type ReplicaSetConditionApplyConfiguration struct {
- Type *extensionsv1beta1.ReplicaSetConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // Type of replica set condition.
+ Type *extensionsv1beta1.ReplicaSetConditionType `json:"type,omitempty"`
+ // Status of the condition, one of True, False, Unknown.
+ Status *v1.ConditionStatus `json:"status,omitempty"`
+ // The last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // The reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // A human readable message indicating details about the transition.
+ Message *string `json:"message,omitempty"`
}
// ReplicaSetConditionApplyConfiguration constructs a declarative configuration of the ReplicaSetCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go
index 27653dd1..233c622e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetspec.go
@@ -25,11 +25,27 @@ import (
// ReplicaSetSpecApplyConfiguration represents a declarative configuration of the ReplicaSetSpec type for use
// with apply.
+//
+// ReplicaSetSpec is the specification of a ReplicaSet.
type ReplicaSetSpecApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
+ // Replicas is the number of desired pods.
+ // This is a pointer to distinguish between explicit zero and unspecified.
+ // Defaults to 1.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
+ Replicas *int32 `json:"replicas,omitempty"`
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing, for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
+ // Selector is a label query over pods that should match the replica count.
+ // If the selector is empty, it is defaulted to the labels present on the pod template.
+ // Label keys and values that must match in order to be controlled by this replica set.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // Template is the object that describes the pod that will be created if
+ // insufficient replicas are detected.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/#pod-template
+ Template *corev1.PodTemplateSpecApplyConfiguration `json:"template,omitempty"`
}
// ReplicaSetSpecApplyConfiguration constructs a declarative configuration of the ReplicaSetSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go
index 46abc943..6882bc66 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicasetstatus.go
@@ -20,14 +20,27 @@ package v1beta1
// ReplicaSetStatusApplyConfiguration represents a declarative configuration of the ReplicaSetStatus type for use
// with apply.
+//
+// ReplicaSetStatus represents the current status of a ReplicaSet.
type ReplicaSetStatusApplyConfiguration struct {
- Replicas *int32 `json:"replicas,omitempty"`
- FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
- ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
- AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
- TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Replicas is the most recently observed number of non-terminating pods.
+ // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset
+ Replicas *int32 `json:"replicas,omitempty"`
+ // The number of non-terminating pods that have labels matching the labels of the pod template of the replicaset.
+ FullyLabeledReplicas *int32 `json:"fullyLabeledReplicas,omitempty"`
+ // The number of non-terminating pods targeted by this ReplicaSet with a Ready Condition.
+ ReadyReplicas *int32 `json:"readyReplicas,omitempty"`
+ // The number of available non-terminating pods (ready for at least minReadySeconds) for this replica set.
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
+ // The number of terminating pods for this replica set. Terminating pods have a non-null .metadata.deletionTimestamp
+ // and have not yet reached the Failed or Succeeded .status.phase.
+ //
+ // This is a beta field and requires enabling DeploymentReplicaSetTerminatingReplicas feature (enabled by default).
+ TerminatingReplicas *int32 `json:"terminatingReplicas,omitempty"`
+ // ObservedGeneration reflects the generation of the most recently observed ReplicaSet.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // Represents the latest available observations of a replica set's current state.
+ Conditions []ReplicaSetConditionApplyConfiguration `json:"conditions,omitempty"`
}
// ReplicaSetStatusApplyConfiguration constructs a declarative configuration of the ReplicaSetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go
index 775f82ee..f73b9e67 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollbackconfig.go
@@ -20,7 +20,10 @@ package v1beta1
// RollbackConfigApplyConfiguration represents a declarative configuration of the RollbackConfig type for use
// with apply.
+//
+// DEPRECATED.
type RollbackConfigApplyConfiguration struct {
+ // The revision to rollback to. If set to 0, rollback to the last revision.
Revision *int64 `json:"revision,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go
index 4352f7fa..0ce80e98 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedaemonset.go
@@ -24,9 +24,44 @@ import (
// RollingUpdateDaemonSetApplyConfiguration represents a declarative configuration of the RollingUpdateDaemonSet type for use
// with apply.
+//
+// Spec to control the desired behavior of daemon set rolling update.
type RollingUpdateDaemonSetApplyConfiguration struct {
+ // The maximum number of DaemonSet pods that can be unavailable during the
+ // update. Value can be an absolute number (ex: 5) or a percentage of total
+ // number of DaemonSet pods at the start of the update (ex: 10%). Absolute
+ // number is calculated from percentage by rounding up.
+ // This cannot be 0 if MaxSurge is 0
+ // Default value is 1.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their pods stopped for an update at any given time. The update
+ // starts by stopping at most 30% of those DaemonSet pods and then brings
+ // up new DaemonSet pods in their place. Once the new pods are available,
+ // it then proceeds onto other DaemonSet pods, thus ensuring that at least
+ // 70% of original number of DaemonSet pods are available at all times during
+ // the update.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of nodes with an existing available DaemonSet pod that
+ // can have an updated DaemonSet pod during during an update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up to a minimum of 1.
+ // Default value is 0.
+ // Example: when this is set to 30%, at most 30% of the total number of nodes
+ // that should be running the daemon pod (i.e. status.desiredNumberScheduled)
+ // can have their a new pod created before the old pod is marked as deleted.
+ // The update starts by launching new pods on 30% of nodes. Once an updated
+ // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod
+ // on that node is marked deleted. If the old pod becomes unavailable for any
+ // reason (Ready transitions to false, is evicted, or is drained) an updated
+ // pod is immediately created on that node without considering surge limits.
+ // Allowing surge implies the possibility that the resources consumed by the
+ // daemonset on any given node can double if the readiness check fails, and
+ // so resource intensive daemonsets should take into account that they may
+ // cause evictions during disruption.
+ // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDaemonSetApplyConfiguration constructs a declarative configuration of the RollingUpdateDaemonSet type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go
index 244701a5..34461b65 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/rollingupdatedeployment.go
@@ -24,9 +24,32 @@ import (
// RollingUpdateDeploymentApplyConfiguration represents a declarative configuration of the RollingUpdateDeployment type for use
// with apply.
+//
+// Spec to control the desired behavior of rolling update.
type RollingUpdateDeploymentApplyConfiguration struct {
+ // The maximum number of pods that can be unavailable during the update.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // Absolute number is calculated from percentage by rounding down.
+ // This can not be 0 if MaxSurge is 0.
+ // By default, a fixed value of 1 is used.
+ // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods
+ // immediately when the rolling update starts. Once new pods are ready, old RC
+ // can be scaled down further, followed by scaling up the new RC, ensuring
+ // that the total number of pods available at all times during the update is at
+ // least 70% of desired pods.
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
- MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
+ // The maximum number of pods that can be scheduled above the desired number of
+ // pods.
+ // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ // This can not be 0 if MaxUnavailable is 0.
+ // Absolute number is calculated from percentage by rounding up.
+ // By default, a value of 1 is used.
+ // Example: when this is set to 30%, the new RC can be scaled up immediately when
+ // the rolling update starts, such that the total number of old and new pods do not exceed
+ // 130% of desired pods. Once old pods have been killed,
+ // new RC can be scaled up further, ensuring that total number of pods running
+ // at any time during the update is at most 130% of desired pods.
+ MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty"`
}
// RollingUpdateDeploymentApplyConfiguration constructs a declarative configuration of the RollingUpdateDeployment type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
index 84dcc97c..9637d58c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
@@ -27,11 +27,16 @@ import (
// ScaleApplyConfiguration represents a declarative configuration of the Scale type for use
// with apply.
+//
+// represents a scaling request for a resource.
type ScaleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *extensionsv1beta1.ScaleSpec `json:"spec,omitempty"`
- Status *extensionsv1beta1.ScaleStatus `json:"status,omitempty"`
+ // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+ Spec *extensionsv1beta1.ScaleSpec `json:"spec,omitempty"`
+ // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
+ Status *extensionsv1beta1.ScaleStatus `json:"status,omitempty"`
}
// ScaleApplyConfiguration constructs a declarative configuration of the Scale type for use with
@@ -42,6 +47,7 @@ func Scale() *ScaleApplyConfiguration {
b.WithAPIVersion("extensions/v1beta1")
return b
}
+
func (b ScaleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go
index 4e5805f3..e95d6a62 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/exemptprioritylevelconfiguration.go
@@ -20,9 +20,35 @@ package v1
// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
// with apply.
+//
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
}
// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
index f8923ae7..ef941c1e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowdistinguishermethod.go
@@ -24,7 +24,12 @@ import (
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
+//
+// FlowDistinguisherMethod specifies the method of a flow distinguisher.
type FlowDistinguisherMethodApplyConfiguration struct {
+ // `type` is the type of flow distinguisher method
+ // The supported types are "ByUser" and "ByNamespace".
+ // Required.
Type *flowcontrolv1.FlowDistinguisherMethodType `json:"type,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
index 5ffebfd3..1bfc945a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschema.go
@@ -29,11 +29,20 @@ import (
// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
// with apply.
+//
+// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
+// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
type FlowSchemaApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
}
// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
@@ -46,6 +55,26 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
return b
}
+// ExtractFlowSchemaFrom extracts the applied configuration owned by fieldManager from
+// flowSchema for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API.
+// ExtractFlowSchemaFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractFlowSchemaFrom(flowSchema *flowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
+ b := &FlowSchemaApplyConfiguration{}
+ err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(flowSchema.Name)
+
+ b.WithKind("FlowSchema")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1")
+ return b, nil
+}
+
// ExtractFlowSchema extracts the applied configuration owned by fieldManager from
// flowSchema. If no managedFields are found in flowSchema for fieldManager, a
// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "")
}
-// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractFlowSchemaStatus extracts the applied configuration owned by fieldManager from
+// flowSchema for the status subresource.
func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "status")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "status")
}
-func extractFlowSchema(flowSchema *flowcontrolv1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
- b := &FlowSchemaApplyConfiguration{}
- err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1.FlowSchema"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(flowSchema.Name)
-
- b.WithKind("FlowSchema")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1")
- return b, nil
-}
func (b FlowSchemaApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
index d1c3dfbc..a9023615 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemacondition.go
@@ -25,12 +25,22 @@ import (
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
+//
+// FlowSchemaCondition describes conditions for a FlowSchema.
type FlowSchemaConditionApplyConfiguration struct {
- Type *flowcontrolv1.FlowSchemaConditionType `json:"type,omitempty"`
- Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1.FlowSchemaConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go
index 4efd5d28..dc347655 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemaspec.go
@@ -20,11 +20,25 @@ package v1
// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
// with apply.
+//
+// FlowSchemaSpec describes how the FlowSchema's specification looks like.
type FlowSchemaSpecApplyConfiguration struct {
+ // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
+ // be resolved, the FlowSchema will be ignored and marked as invalid in its status.
+ // Required.
PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
- MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
- DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
- Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
+ // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
+ // FlowSchema is among those with the numerically lowest (which we take to be logically highest)
+ // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000].
+ // Note that if the precedence is not specified, it will be set to 1000 as default.
+ MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
+ // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
+ // `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
+ DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
+ // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
+ // at least one member of rules matches the request.
+ // if it is an empty slice, there will be no requests matching the FlowSchema.
+ Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
}
// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go
index 6f951967..c9bd7275 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/flowschemastatus.go
@@ -20,7 +20,10 @@ package v1
// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
// with apply.
+//
+// FlowSchemaStatus represents the current state of a FlowSchema.
type FlowSchemaStatusApplyConfiguration struct {
+ // `conditions` is a list of the current states of FlowSchema.
Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go
index 0be9eddf..50a4c9a0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/groupsubject.go
@@ -20,7 +20,13 @@ package v1
// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
// with apply.
+//
+// GroupSubject holds detailed information for group-kind subject.
type GroupSubjectApplyConfiguration struct {
+ // name is the user group that matches, or "*" to match all user groups.
+ // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
+ // well-known group names.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go
index 8e276429..cd93e926 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitedprioritylevelconfiguration.go
@@ -20,11 +20,58 @@ package v1
// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
// with apply.
+//
+// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
+// It addresses two issues:
+// - How are requests for this priority level limited?
+// - What should be done with requests that exceed the limit?
type LimitedPriorityLevelConfigurationApplyConfiguration struct {
- NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
- LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
- BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats available at this priority level.
+ // This is used both for requests dispatched from this priority level
+ // as well as requests dispatched from other priority levels
+ // borrowing seats from this level.
+ // The server's concurrency limit (ServerCL) is divided among the
+ // Limited priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ //
+ // If not specified, this field defaults to a value of 30.
+ //
+ // Setting this field to zero supports the construction of a
+ // "jail" for this priority level that is used to hold some request(s)
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
+ // `limitResponse` indicates what to do with requests that can not be executed right now
+ LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. The value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `borrowingLimitPercent`, if present, configures a limit on how many
+ // seats this priority level can borrow from other priority levels.
+ // The limit is known as this level's BorrowingConcurrencyLimit
+ // (BorrowingCL) and is a limit on the total number of seats that this
+ // level may borrow at any one time.
+ // This field holds the ratio of that limit to the level's nominal
+ // concurrency limit. When this field is non-nil, it must hold a
+ // non-negative integer and the limit is calculated as follows.
+ //
+ // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )
+ //
+ // The value of this field can be more than 100, implying that this
+ // priority level can borrow a number of seats that is greater than
+ // its own nominal concurrency limit (NominalCL).
+ // When this field is left `nil`, the limit is effectively infinite.
+ BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
}
// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
index dc2e919d..9ba5d188 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/limitresponse.go
@@ -24,8 +24,19 @@ import (
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
+//
+// LimitResponse defines how to handle requests that can not be executed right now.
type LimitResponseApplyConfiguration struct {
- Type *flowcontrolv1.LimitResponseType `json:"type,omitempty"`
+ // `type` is "Queue" or "Reject".
+ // "Queue" means that requests that can not be executed upon arrival
+ // are held in a queue until they can be executed or a queuing limit
+ // is reached.
+ // "Reject" means that requests that can not be executed upon arrival
+ // are rejected.
+ // Required.
+ Type *flowcontrolv1.LimitResponseType `json:"type,omitempty"`
+ // `queuing` holds the configuration parameters for queuing.
+ // This field may be non-empty only if `type` is `"Queue"`.
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go
index 29c26b34..5aeaa9de 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/nonresourcepolicyrule.go
@@ -20,8 +20,24 @@ package v1
// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
// with apply.
+//
+// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
+// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
+// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
type NonResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs. If it is present, it must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
+ // For example:
+ // - "/healthz" is legal
+ // - "/hea*" is illegal
+ // - "/hea" is legal but matches nothing
+ // - "/hea/*" also matches nothing
+ // - "/healthz/*" matches all per-component health checks.
+ // "*" matches all non-resource urls. if it is present, it must be the only entry.
+ // Required.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go
index 088afdc5..7e171648 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/policyruleswithsubjects.go
@@ -20,9 +20,23 @@ package v1
// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
// with apply.
+//
+// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
+// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
+// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
+// of resourceRules or nonResourceRules matches the request.
type PolicyRulesWithSubjectsApplyConfiguration struct {
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // subjects is the list of normal user, serviceaccount, or group that this rule cares about.
+ // There must be at least one member in this slice.
+ // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
+ // Required.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
+ // target resource.
+ // At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
+ ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
+ // and the target non-resource URL.
NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
index 8fb6a664..d64c3acb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfiguration.go
@@ -29,11 +29,19 @@ import (
// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
// with apply.
+//
+// PriorityLevelConfiguration represents the configuration of a priority level.
type PriorityLevelConfigurationApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
}
// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
@@ -46,6 +54,26 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
return b
}
+// ExtractPriorityLevelConfigurationFrom extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractPriorityLevelConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ b := &PriorityLevelConfigurationApplyConfiguration{}
+ err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(priorityLevelConfiguration.Name)
+
+ b.WithKind("PriorityLevelConfiguration")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1")
+ return b, nil
+}
+
// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from
// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a
// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "")
}
-// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPriorityLevelConfigurationStatus extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the status subresource.
func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "status")
}
-func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- b := &PriorityLevelConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(priorityLevelConfiguration.Name)
-
- b.WithKind("PriorityLevelConfiguration")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1")
- return b, nil
-}
func (b PriorityLevelConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
index a7810adf..82b9547d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationcondition.go
@@ -25,12 +25,22 @@ import (
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
+//
+// PriorityLevelConfigurationCondition defines the condition of priority level.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *flowcontrolv1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go
index f445713f..aec06f30 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationreference.go
@@ -20,7 +20,11 @@ package v1
// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
// with apply.
+//
+// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
type PriorityLevelConfigurationReferenceApplyConfiguration struct {
+ // `name` is the name of the priority level configuration being referenced
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
index 45e4cdcd..820b0bb6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationspec.go
@@ -24,10 +24,28 @@ import (
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
+//
+// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *flowcontrolv1.PriorityLevelEnablement `json:"type,omitempty"`
+ // `type` indicates whether this priority level is subject to
+ // limitation on request execution. A value of `"Exempt"` means
+ // that requests of this priority level are not subject to a limit
+ // (and thus are never queued) and do not detract from the
+ // capacity made available to other priority levels. A value of
+ // `"Limited"` means that (a) requests of this priority level
+ // _are_ subject to limits and (b) some of the server's limited
+ // capacity is made available exclusively to this priority level.
+ // Required.
+ Type *flowcontrolv1.PriorityLevelEnablement `json:"type,omitempty"`
+ // `limited` specifies how requests are handled for a Limited priority level.
+ // This field must be non-empty if and only if `type` is `"Limited"`.
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
- Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go
index ff650bc3..36331548 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/prioritylevelconfigurationstatus.go
@@ -20,7 +20,10 @@ package v1
// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
// with apply.
+//
+// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
type PriorityLevelConfigurationStatusApplyConfiguration struct {
+ // `conditions` is the current state of "request-priority".
Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go
index 7488f9bb..04bc6dfe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/queuingconfiguration.go
@@ -20,9 +20,32 @@ package v1
// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
// with apply.
+//
+// QueuingConfiguration holds the configuration parameters for queuing
type QueuingConfigurationApplyConfiguration struct {
- Queues *int32 `json:"queues,omitempty"`
- HandSize *int32 `json:"handSize,omitempty"`
+ // `queues` is the number of queues for this priority level. The
+ // queues exist independently at each apiserver. The value must be
+ // positive. Setting it to 1 effectively precludes
+ // shufflesharding and thus makes the distinguisher method of
+ // associated flow schemas irrelevant. This field has a default
+ // value of 64.
+ Queues *int32 `json:"queues,omitempty"`
+ // `handSize` is a small positive number that configures the
+ // shuffle sharding of requests into queues. When enqueuing a request
+ // at this priority level the request's flow identifier (a string
+ // pair) is hashed and the hash value is used to shuffle the list
+ // of queues and deal a hand of the size specified here. The
+ // request is put into one of the shortest queues in that hand.
+ // `handSize` must be no larger than `queues`, and should be
+ // significantly smaller (so that a few heavy flows do not
+ // saturate most of the queues). See the user-facing
+ // documentation for more extensive guidance on setting this
+ // field. This field has a default value of 8.
+ HandSize *int32 `json:"handSize,omitempty"`
+ // `queueLengthLimit` is the maximum number of requests allowed to
+ // be waiting in a given queue of this priority level at a time;
+ // excess requests are rejected. This value must be positive. If
+ // not specified, it will be defaulted to 50.
QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go
index 7428582a..5ff4588a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/resourcepolicyrule.go
@@ -20,12 +20,46 @@ package v1
// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
// with apply.
+//
+// ResourcePolicyRule is a predicate that matches some resource
+// requests, testing the request's verb and the target resource. A
+// ResourcePolicyRule matches a resource request if and only if: (a)
+// at least one member of verbs matches the request, (b) at least one
+// member of apiGroups matches the request, (c) at least one member of
+// resources matches the request, and (d) either (d1) the request does
+// not specify a namespace (i.e., `Namespace==""`) and clusterScope is
+// true or (d2) the request specifies a namespace and least one member
+// of namespaces matches the request's namespace.
type ResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ClusterScope *bool `json:"clusterScope,omitempty"`
- Namespaces []string `json:"namespaces,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs and, if present, must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `apiGroups` is a list of matching API groups and may not be empty.
+ // "*" matches all API groups and, if present, must be the only entry.
+ // Required.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // `resources` is a list of matching resources (i.e., lowercase
+ // and plural) with, if desired, subresource. For example, [
+ // "services", "nodes/status" ]. This list may not be empty.
+ // "*" matches all resources and, if present, must be the only entry.
+ // Required.
+ Resources []string `json:"resources,omitempty"`
+ // `clusterScope` indicates whether to match requests that do not
+ // specify a namespace (which happens either because the resource
+ // is not namespaced or the request targets all namespaces).
+ // If this field is omitted or false then the `namespaces` field
+ // must contain a non-empty list.
+ ClusterScope *bool `json:"clusterScope,omitempty"`
+ // `namespaces` is a list of target namespaces that restricts
+ // matches. A request that specifies a target namespace matches
+ // only if either (a) this list contains that target namespace or
+ // (b) this list contains "*". Note that "*" matches any
+ // specified namespace but does not match a request that _does
+ // not specify_ a namespace (see the `clusterScope` field for
+ // that).
+ // This list may be empty, but only if `clusterScope` is true.
+ Namespaces []string `json:"namespaces,omitempty"`
}
// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go
index 58ad1076..2267a836 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/serviceaccountsubject.go
@@ -20,9 +20,15 @@ package v1
// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
// with apply.
+//
+// ServiceAccountSubject holds detailed information for service-account-kind subject.
type ServiceAccountSubjectApplyConfiguration struct {
+ // `namespace` is the namespace of matching ServiceAccount objects.
+ // Required.
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
+ // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
+ // Required.
+ Name *string `json:"name,omitempty"`
}
// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
index e2f6f384..12317c58 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/subject.go
@@ -24,10 +24,18 @@ import (
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject matches the originator of a request, as identified by the request authentication system. There are three
+// ways of matching an originator; by user, group, or service account.
type SubjectApplyConfiguration struct {
- Kind *flowcontrolv1.SubjectKind `json:"kind,omitempty"`
- User *UserSubjectApplyConfiguration `json:"user,omitempty"`
- Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `kind` indicates which one of the other fields is non-empty.
+ // Required
+ Kind *flowcontrolv1.SubjectKind `json:"kind,omitempty"`
+ // `user` matches based on username.
+ User *UserSubjectApplyConfiguration `json:"user,omitempty"`
+ // `group` matches based on user group name.
+ Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `serviceAccount` matches ServiceAccounts.
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go
index fd90067d..438df030 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1/usersubject.go
@@ -20,7 +20,11 @@ package v1
// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
// with apply.
+//
+// UserSubject holds detailed information for user-kind subject.
type UserSubjectApplyConfiguration struct {
+ // `name` is the username that matches, or "*" to match all usernames.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
index 45ccc5cb..2d2504f8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/exemptprioritylevelconfiguration.go
@@ -20,9 +20,35 @@ package v1beta1
// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
// with apply.
+//
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
}
// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
index 11aa62bb..80cb05ac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowdistinguishermethod.go
@@ -24,7 +24,12 @@ import (
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
+//
+// FlowDistinguisherMethod specifies the method of a flow distinguisher.
type FlowDistinguisherMethodApplyConfiguration struct {
+ // `type` is the type of flow distinguisher method
+ // The supported types are "ByUser" and "ByNamespace".
+ // Required.
Type *flowcontrolv1beta1.FlowDistinguisherMethodType `json:"type,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
index 09c40405..80a1f720 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
@@ -29,11 +29,20 @@ import (
// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
// with apply.
+//
+// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
+// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
type FlowSchemaApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
}
// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
@@ -46,6 +55,26 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
return b
}
+// ExtractFlowSchemaFrom extracts the applied configuration owned by fieldManager from
+// flowSchema for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API.
+// ExtractFlowSchemaFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractFlowSchemaFrom(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
+ b := &FlowSchemaApplyConfiguration{}
+ err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.FlowSchema"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(flowSchema.Name)
+
+ b.WithKind("FlowSchema")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractFlowSchema extracts the applied configuration owned by fieldManager from
// flowSchema. If no managedFields are found in flowSchema for fieldManager, a
// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "")
}
-// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractFlowSchemaStatus extracts the applied configuration owned by fieldManager from
+// flowSchema for the status subresource.
func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "status")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "status")
}
-func extractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
- b := &FlowSchemaApplyConfiguration{}
- err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.FlowSchema"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(flowSchema.Name)
-
- b.WithKind("FlowSchema")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta1")
- return b, nil
-}
func (b FlowSchemaApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
index e7dcb436..9462bbd0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemacondition.go
@@ -25,12 +25,22 @@ import (
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
+//
+// FlowSchemaCondition describes conditions for a FlowSchema.
type FlowSchemaConditionApplyConfiguration struct {
- Type *flowcontrolv1beta1.FlowSchemaConditionType `json:"type,omitempty"`
- Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1beta1.FlowSchemaConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go
index 1d6e8fc5..470708eb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemaspec.go
@@ -20,11 +20,25 @@ package v1beta1
// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
// with apply.
+//
+// FlowSchemaSpec describes how the FlowSchema's specification looks like.
type FlowSchemaSpecApplyConfiguration struct {
+ // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
+ // be resolved, the FlowSchema will be ignored and marked as invalid in its status.
+ // Required.
PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
- MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
- DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
- Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
+ // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
+ // FlowSchema is among those with the numerically lowest (which we take to be logically highest)
+ // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000].
+ // Note that if the precedence is not specified, it will be set to 1000 as default.
+ MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
+ // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
+ // `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
+ DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
+ // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
+ // at least one member of rules matches the request.
+ // if it is an empty slice, there will be no requests matching the FlowSchema.
+ Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
}
// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go
index 5ad8a432..8403a3d8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschemastatus.go
@@ -20,7 +20,10 @@ package v1beta1
// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
// with apply.
+//
+// FlowSchemaStatus represents the current state of a FlowSchema.
type FlowSchemaStatusApplyConfiguration struct {
+ // `conditions` is a list of the current states of FlowSchema.
Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go
index cc274fe2..48d255c4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/groupsubject.go
@@ -20,7 +20,13 @@ package v1beta1
// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
// with apply.
+//
+// GroupSubject holds detailed information for group-kind subject.
type GroupSubjectApplyConfiguration struct {
+ // name is the user group that matches, or "*" to match all user groups.
+ // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
+ // well-known group names.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go
index 0fe5feca..b1b3fcd5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitedprioritylevelconfiguration.go
@@ -20,11 +20,54 @@ package v1beta1
// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
// with apply.
+//
+// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
+// It addresses two issues:
+// - How are requests for this priority level limited?
+// - What should be done with requests that exceed the limit?
type LimitedPriorityLevelConfigurationApplyConfiguration struct {
- AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"`
- LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
- BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
+ // `assuredConcurrencyShares` (ACS) configures the execution
+ // limit, which is a limit on the number of requests of this
+ // priority level that may be executing at a given time. ACS must
+ // be a positive number. The server's concurrency limit (SCL) is
+ // divided among the concurrency-controlled priority levels in
+ // proportion to their assured concurrency shares. This produces
+ // the assured concurrency value (ACV) --- the number of requests
+ // that may be executing at a time --- for each such priority
+ // level:
+ //
+ // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
+ //
+ // bigger numbers of ACS mean more reserved concurrent requests (at the
+ // expense of every other PL).
+ // This field has a default value of 30.
+ AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"`
+ // `limitResponse` indicates what to do with requests that can not be executed right now
+ LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. The value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `borrowingLimitPercent`, if present, configures a limit on how many
+ // seats this priority level can borrow from other priority levels.
+ // The limit is known as this level's BorrowingConcurrencyLimit
+ // (BorrowingCL) and is a limit on the total number of seats that this
+ // level may borrow at any one time.
+ // This field holds the ratio of that limit to the level's nominal
+ // concurrency limit. When this field is non-nil, it must hold a
+ // non-negative integer and the limit is calculated as follows.
+ //
+ // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )
+ //
+ // The value of this field can be more than 100, implying that this
+ // priority level can borrow a number of seats that is greater than
+ // its own nominal concurrency limit (NominalCL).
+ // When this field is left `nil`, the limit is effectively infinite.
+ BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
}
// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
index 20e1b17b..a684a3f2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/limitresponse.go
@@ -24,8 +24,19 @@ import (
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
+//
+// LimitResponse defines how to handle requests that can not be executed right now.
type LimitResponseApplyConfiguration struct {
- Type *flowcontrolv1beta1.LimitResponseType `json:"type,omitempty"`
+ // `type` is "Queue" or "Reject".
+ // "Queue" means that requests that can not be executed upon arrival
+ // are held in a queue until they can be executed or a queuing limit
+ // is reached.
+ // "Reject" means that requests that can not be executed upon arrival
+ // are rejected.
+ // Required.
+ Type *flowcontrolv1beta1.LimitResponseType `json:"type,omitempty"`
+ // `queuing` holds the configuration parameters for queuing.
+ // This field may be non-empty only if `type` is `"Queue"`.
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go
index 3c571ccb..4090136c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/nonresourcepolicyrule.go
@@ -20,8 +20,24 @@ package v1beta1
// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
// with apply.
+//
+// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
+// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
+// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
type NonResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs. If it is present, it must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
+ // For example:
+ // - "/healthz" is legal
+ // - "/hea*" is illegal
+ // - "/hea" is legal but matches nothing
+ // - "/hea/*" also matches nothing
+ // - "/healthz/*" matches all per-component health checks.
+ // "*" matches all non-resource urls. if it is present, it must be the only entry.
+ // Required.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go
index 32a082dc..e77d0a7f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/policyruleswithsubjects.go
@@ -20,9 +20,23 @@ package v1beta1
// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
// with apply.
+//
+// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
+// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
+// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
+// of resourceRules or nonResourceRules matches the request.
type PolicyRulesWithSubjectsApplyConfiguration struct {
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // subjects is the list of normal user, serviceaccount, or group that this rule cares about.
+ // There must be at least one member in this slice.
+ // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
+ // Required.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
+ // target resource.
+ // At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
+ ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
+ // and the target non-resource URL.
NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
index a6707909..8fecf868 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
@@ -29,11 +29,19 @@ import (
// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
// with apply.
+//
+// PriorityLevelConfiguration represents the configuration of a priority level.
type PriorityLevelConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
}
// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
@@ -46,6 +54,26 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
return b
}
+// ExtractPriorityLevelConfigurationFrom extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractPriorityLevelConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ b := &PriorityLevelConfigurationApplyConfiguration{}
+ err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(priorityLevelConfiguration.Name)
+
+ b.WithKind("PriorityLevelConfiguration")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from
// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a
// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "")
}
-// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPriorityLevelConfigurationStatus extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the status subresource.
func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "status")
}
-func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- b := &PriorityLevelConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(priorityLevelConfiguration.Name)
-
- b.WithKind("PriorityLevelConfiguration")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta1")
- return b, nil
-}
func (b PriorityLevelConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
index 74eda917..a2ebf71f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationcondition.go
@@ -25,12 +25,22 @@ import (
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
+//
+// PriorityLevelConfigurationCondition defines the condition of priority level.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *flowcontrolv1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1beta1.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1beta1.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go
index b5e773e8..f83ebcac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationreference.go
@@ -20,7 +20,11 @@ package v1beta1
// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
// with apply.
+//
+// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
type PriorityLevelConfigurationReferenceApplyConfiguration struct {
+ // `name` is the name of the priority level configuration being referenced
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
index 775f476d..6f5ccc07 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationspec.go
@@ -24,10 +24,28 @@ import (
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
+//
+// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *flowcontrolv1beta1.PriorityLevelEnablement `json:"type,omitempty"`
+ // `type` indicates whether this priority level is subject to
+ // limitation on request execution. A value of `"Exempt"` means
+ // that requests of this priority level are not subject to a limit
+ // (and thus are never queued) and do not detract from the
+ // capacity made available to other priority levels. A value of
+ // `"Limited"` means that (a) requests of this priority level
+ // _are_ subject to limits and (b) some of the server's limited
+ // capacity is made available exclusively to this priority level.
+ // Required.
+ Type *flowcontrolv1beta1.PriorityLevelEnablement `json:"type,omitempty"`
+ // `limited` specifies how requests are handled for a Limited priority level.
+ // This field must be non-empty if and only if `type` is `"Limited"`.
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
- Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go
index 875b01ef..ef9af41b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfigurationstatus.go
@@ -20,7 +20,10 @@ package v1beta1
// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
// with apply.
+//
+// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
type PriorityLevelConfigurationStatusApplyConfiguration struct {
+ // `conditions` is the current state of "request-priority".
Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go
index 85a8b886..fc15548d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/queuingconfiguration.go
@@ -20,9 +20,32 @@ package v1beta1
// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
// with apply.
+//
+// QueuingConfiguration holds the configuration parameters for queuing
type QueuingConfigurationApplyConfiguration struct {
- Queues *int32 `json:"queues,omitempty"`
- HandSize *int32 `json:"handSize,omitempty"`
+ // `queues` is the number of queues for this priority level. The
+ // queues exist independently at each apiserver. The value must be
+ // positive. Setting it to 1 effectively precludes
+ // shufflesharding and thus makes the distinguisher method of
+ // associated flow schemas irrelevant. This field has a default
+ // value of 64.
+ Queues *int32 `json:"queues,omitempty"`
+ // `handSize` is a small positive number that configures the
+ // shuffle sharding of requests into queues. When enqueuing a request
+ // at this priority level the request's flow identifier (a string
+ // pair) is hashed and the hash value is used to shuffle the list
+ // of queues and deal a hand of the size specified here. The
+ // request is put into one of the shortest queues in that hand.
+ // `handSize` must be no larger than `queues`, and should be
+ // significantly smaller (so that a few heavy flows do not
+ // saturate most of the queues). See the user-facing
+ // documentation for more extensive guidance on setting this
+ // field. This field has a default value of 8.
+ HandSize *int32 `json:"handSize,omitempty"`
+ // `queueLengthLimit` is the maximum number of requests allowed to
+ // be waiting in a given queue of this priority level at a time;
+ // excess requests are rejected. This value must be positive. If
+ // not specified, it will be defaulted to 50.
QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go
index 5c67dad7..4322432e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/resourcepolicyrule.go
@@ -20,12 +20,46 @@ package v1beta1
// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
// with apply.
+//
+// ResourcePolicyRule is a predicate that matches some resource
+// requests, testing the request's verb and the target resource. A
+// ResourcePolicyRule matches a resource request if and only if: (a)
+// at least one member of verbs matches the request, (b) at least one
+// member of apiGroups matches the request, (c) at least one member of
+// resources matches the request, and (d) either (d1) the request does
+// not specify a namespace (i.e., `Namespace==""`) and clusterScope is
+// true or (d2) the request specifies a namespace and least one member
+// of namespaces matches the request's namespace.
type ResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ClusterScope *bool `json:"clusterScope,omitempty"`
- Namespaces []string `json:"namespaces,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs and, if present, must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `apiGroups` is a list of matching API groups and may not be empty.
+ // "*" matches all API groups and, if present, must be the only entry.
+ // Required.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // `resources` is a list of matching resources (i.e., lowercase
+ // and plural) with, if desired, subresource. For example, [
+ // "services", "nodes/status" ]. This list may not be empty.
+ // "*" matches all resources and, if present, must be the only entry.
+ // Required.
+ Resources []string `json:"resources,omitempty"`
+ // `clusterScope` indicates whether to match requests that do not
+ // specify a namespace (which happens either because the resource
+ // is not namespaced or the request targets all namespaces).
+ // If this field is omitted or false then the `namespaces` field
+ // must contain a non-empty list.
+ ClusterScope *bool `json:"clusterScope,omitempty"`
+ // `namespaces` is a list of target namespaces that restricts
+ // matches. A request that specifies a target namespace matches
+ // only if either (a) this list contains that target namespace or
+ // (b) this list contains "*". Note that "*" matches any
+ // specified namespace but does not match a request that _does
+ // not specify_ a namespace (see the `clusterScope` field for
+ // that).
+ // This list may be empty, but only if `clusterScope` is true.
+ Namespaces []string `json:"namespaces,omitempty"`
}
// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go
index 439e5ff7..aa176c99 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/serviceaccountsubject.go
@@ -20,9 +20,15 @@ package v1beta1
// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
// with apply.
+//
+// ServiceAccountSubject holds detailed information for service-account-kind subject.
type ServiceAccountSubjectApplyConfiguration struct {
+ // `namespace` is the namespace of matching ServiceAccount objects.
+ // Required.
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
+ // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
+ // Required.
+ Name *string `json:"name,omitempty"`
}
// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
index 00050806..d3d48f2b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/subject.go
@@ -24,10 +24,18 @@ import (
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject matches the originator of a request, as identified by the request authentication system. There are three
+// ways of matching an originator; by user, group, or service account.
type SubjectApplyConfiguration struct {
- Kind *flowcontrolv1beta1.SubjectKind `json:"kind,omitempty"`
- User *UserSubjectApplyConfiguration `json:"user,omitempty"`
- Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `kind` indicates which one of the other fields is non-empty.
+ // Required
+ Kind *flowcontrolv1beta1.SubjectKind `json:"kind,omitempty"`
+ // `user` matches based on username.
+ User *UserSubjectApplyConfiguration `json:"user,omitempty"`
+ // `group` matches based on user group name.
+ Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `serviceAccount` matches ServiceAccounts.
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go
index bc2deae4..82775c30 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/usersubject.go
@@ -20,7 +20,11 @@ package v1beta1
// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
// with apply.
+//
+// UserSubject holds detailed information for user-kind subject.
type UserSubjectApplyConfiguration struct {
+ // `name` is the username that matches, or "*" to match all usernames.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
index 0c02d9b3..52d22909 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/exemptprioritylevelconfiguration.go
@@ -20,9 +20,35 @@ package v1beta2
// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
// with apply.
+//
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
}
// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
index 3922c472..cbd6e4dd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowdistinguishermethod.go
@@ -24,7 +24,12 @@ import (
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
+//
+// FlowDistinguisherMethod specifies the method of a flow distinguisher.
type FlowDistinguisherMethodApplyConfiguration struct {
+ // `type` is the type of flow distinguisher method
+ // The supported types are "ByUser" and "ByNamespace".
+ // Required.
Type *flowcontrolv1beta2.FlowDistinguisherMethodType `json:"type,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
index db8cb397..394f24b9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschema.go
@@ -29,11 +29,20 @@ import (
// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
// with apply.
+//
+// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
+// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
type FlowSchemaApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
}
// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
@@ -46,6 +55,26 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
return b
}
+// ExtractFlowSchemaFrom extracts the applied configuration owned by fieldManager from
+// flowSchema for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API.
+// ExtractFlowSchemaFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractFlowSchemaFrom(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
+ b := &FlowSchemaApplyConfiguration{}
+ err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.FlowSchema"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(flowSchema.Name)
+
+ b.WithKind("FlowSchema")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2")
+ return b, nil
+}
+
// ExtractFlowSchema extracts the applied configuration owned by fieldManager from
// flowSchema. If no managedFields are found in flowSchema for fieldManager, a
// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "")
}
-// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractFlowSchemaStatus extracts the applied configuration owned by fieldManager from
+// flowSchema for the status subresource.
func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "status")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "status")
}
-func extractFlowSchema(flowSchema *flowcontrolv1beta2.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
- b := &FlowSchemaApplyConfiguration{}
- err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.FlowSchema"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(flowSchema.Name)
-
- b.WithKind("FlowSchema")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2")
- return b, nil
-}
func (b FlowSchemaApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
index f47130ee..6bebc912 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemacondition.go
@@ -25,12 +25,22 @@ import (
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
+//
+// FlowSchemaCondition describes conditions for a FlowSchema.
type FlowSchemaConditionApplyConfiguration struct {
- Type *flowcontrolv1beta2.FlowSchemaConditionType `json:"type,omitempty"`
- Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1beta2.FlowSchemaConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go
index 6eab63bf..9e8e2398 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemaspec.go
@@ -20,11 +20,25 @@ package v1beta2
// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
// with apply.
+//
+// FlowSchemaSpec describes how the FlowSchema's specification looks like.
type FlowSchemaSpecApplyConfiguration struct {
+ // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
+ // be resolved, the FlowSchema will be ignored and marked as invalid in its status.
+ // Required.
PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
- MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
- DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
- Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
+ // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
+ // FlowSchema is among those with the numerically lowest (which we take to be logically highest)
+ // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000].
+ // Note that if the precedence is not specified, it will be set to 1000 as default.
+ MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
+ // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
+ // `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
+ DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
+ // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
+ // at least one member of rules matches the request.
+ // if it is an empty slice, there will be no requests matching the FlowSchema.
+ Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
}
// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go
index 70ac997e..8e2e8f2a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/flowschemastatus.go
@@ -20,7 +20,10 @@ package v1beta2
// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
// with apply.
+//
+// FlowSchemaStatus represents the current state of a FlowSchema.
type FlowSchemaStatusApplyConfiguration struct {
+ // `conditions` is a list of the current states of FlowSchema.
Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go
index 25207d7c..0b2bc7af 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/groupsubject.go
@@ -20,7 +20,13 @@ package v1beta2
// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
// with apply.
+//
+// GroupSubject holds detailed information for group-kind subject.
type GroupSubjectApplyConfiguration struct {
+ // name is the user group that matches, or "*" to match all user groups.
+ // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
+ // well-known group names.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go
index 298dd463..2eca0e5c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitedprioritylevelconfiguration.go
@@ -20,11 +20,54 @@ package v1beta2
// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
// with apply.
+//
+// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
+// It addresses two issues:
+// - How are requests for this priority level limited?
+// - What should be done with requests that exceed the limit?
type LimitedPriorityLevelConfigurationApplyConfiguration struct {
- AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"`
- LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
- BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
+ // `assuredConcurrencyShares` (ACS) configures the execution
+ // limit, which is a limit on the number of requests of this
+ // priority level that may be executing at a given time. ACS must
+ // be a positive number. The server's concurrency limit (SCL) is
+ // divided among the concurrency-controlled priority levels in
+ // proportion to their assured concurrency shares. This produces
+ // the assured concurrency value (ACV) --- the number of requests
+ // that may be executing at a time --- for each such priority
+ // level:
+ //
+ // ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )
+ //
+ // bigger numbers of ACS mean more reserved concurrent requests (at the
+ // expense of every other PL).
+ // This field has a default value of 30.
+ AssuredConcurrencyShares *int32 `json:"assuredConcurrencyShares,omitempty"`
+ // `limitResponse` indicates what to do with requests that can not be executed right now
+ LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. The value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `borrowingLimitPercent`, if present, configures a limit on how many
+ // seats this priority level can borrow from other priority levels.
+ // The limit is known as this level's BorrowingConcurrencyLimit
+ // (BorrowingCL) and is a limit on the total number of seats that this
+ // level may borrow at any one time.
+ // This field holds the ratio of that limit to the level's nominal
+ // concurrency limit. When this field is non-nil, it must hold a
+ // non-negative integer and the limit is calculated as follows.
+ //
+ // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )
+ //
+ // The value of this field can be more than 100, implying that this
+ // priority level can borrow a number of seats that is greater than
+ // its own nominal concurrency limit (NominalCL).
+ // When this field is left `nil`, the limit is effectively infinite.
+ BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
}
// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
index 58cd7800..6b4ea881 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/limitresponse.go
@@ -24,8 +24,19 @@ import (
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
+//
+// LimitResponse defines how to handle requests that can not be executed right now.
type LimitResponseApplyConfiguration struct {
- Type *flowcontrolv1beta2.LimitResponseType `json:"type,omitempty"`
+ // `type` is "Queue" or "Reject".
+ // "Queue" means that requests that can not be executed upon arrival
+ // are held in a queue until they can be executed or a queuing limit
+ // is reached.
+ // "Reject" means that requests that can not be executed upon arrival
+ // are rejected.
+ // Required.
+ Type *flowcontrolv1beta2.LimitResponseType `json:"type,omitempty"`
+ // `queuing` holds the configuration parameters for queuing.
+ // This field may be non-empty only if `type` is `"Queue"`.
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go
index 5032ee48..8bfd3363 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/nonresourcepolicyrule.go
@@ -20,8 +20,24 @@ package v1beta2
// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
// with apply.
+//
+// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
+// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
+// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
type NonResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs. If it is present, it must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
+ // For example:
+ // - "/healthz" is legal
+ // - "/hea*" is illegal
+ // - "/hea" is legal but matches nothing
+ // - "/hea/*" also matches nothing
+ // - "/healthz/*" matches all per-component health checks.
+ // "*" matches all non-resource urls. if it is present, it must be the only entry.
+ // Required.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go
index 2bb8c871..6a42bdf8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/policyruleswithsubjects.go
@@ -20,9 +20,23 @@ package v1beta2
// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
// with apply.
+//
+// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
+// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
+// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
+// of resourceRules or nonResourceRules matches the request.
type PolicyRulesWithSubjectsApplyConfiguration struct {
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // subjects is the list of normal user, serviceaccount, or group that this rule cares about.
+ // There must be at least one member in this slice.
+ // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
+ // Required.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
+ // target resource.
+ // At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
+ ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
+ // and the target non-resource URL.
NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
index 7cb04bb7..a60827b6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfiguration.go
@@ -29,11 +29,19 @@ import (
// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
// with apply.
+//
+// PriorityLevelConfiguration represents the configuration of a priority level.
type PriorityLevelConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
}
// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
@@ -46,6 +54,26 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
return b
}
+// ExtractPriorityLevelConfigurationFrom extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractPriorityLevelConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ b := &PriorityLevelConfigurationApplyConfiguration{}
+ err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(priorityLevelConfiguration.Name)
+
+ b.WithKind("PriorityLevelConfiguration")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2")
+ return b, nil
+}
+
// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from
// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a
// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "")
}
-// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPriorityLevelConfigurationStatus extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the status subresource.
func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "status")
}
-func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta2.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- b := &PriorityLevelConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta2.PriorityLevelConfiguration"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(priorityLevelConfiguration.Name)
-
- b.WithKind("PriorityLevelConfiguration")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta2")
- return b, nil
-}
func (b PriorityLevelConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
index caf517be..26a8b662 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationcondition.go
@@ -25,12 +25,22 @@ import (
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
+//
+// PriorityLevelConfigurationCondition defines the condition of priority level.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *flowcontrolv1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1beta2.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1beta2.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go
index bbf718b6..67c6b63a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationreference.go
@@ -20,7 +20,11 @@ package v1beta2
// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
// with apply.
+//
+// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
type PriorityLevelConfigurationReferenceApplyConfiguration struct {
+ // `name` is the name of the priority level configuration being referenced
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
index c680ea1e..4c4b743c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationspec.go
@@ -24,10 +24,28 @@ import (
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
+//
+// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *flowcontrolv1beta2.PriorityLevelEnablement `json:"type,omitempty"`
+ // `type` indicates whether this priority level is subject to
+ // limitation on request execution. A value of `"Exempt"` means
+ // that requests of this priority level are not subject to a limit
+ // (and thus are never queued) and do not detract from the
+ // capacity made available to other priority levels. A value of
+ // `"Limited"` means that (a) requests of this priority level
+ // _are_ subject to limits and (b) some of the server's limited
+ // capacity is made available exclusively to this priority level.
+ // Required.
+ Type *flowcontrolv1beta2.PriorityLevelEnablement `json:"type,omitempty"`
+ // `limited` specifies how requests are handled for a Limited priority level.
+ // This field must be non-empty if and only if `type` is `"Limited"`.
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
- Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go
index 7a1f8790..da9990cf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/prioritylevelconfigurationstatus.go
@@ -20,7 +20,10 @@ package v1beta2
// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
// with apply.
+//
+// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
type PriorityLevelConfigurationStatusApplyConfiguration struct {
+ // `conditions` is the current state of "request-priority".
Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go
index 19c34c5f..e145ab83 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/queuingconfiguration.go
@@ -20,9 +20,32 @@ package v1beta2
// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
// with apply.
+//
+// QueuingConfiguration holds the configuration parameters for queuing
type QueuingConfigurationApplyConfiguration struct {
- Queues *int32 `json:"queues,omitempty"`
- HandSize *int32 `json:"handSize,omitempty"`
+ // `queues` is the number of queues for this priority level. The
+ // queues exist independently at each apiserver. The value must be
+ // positive. Setting it to 1 effectively precludes
+ // shufflesharding and thus makes the distinguisher method of
+ // associated flow schemas irrelevant. This field has a default
+ // value of 64.
+ Queues *int32 `json:"queues,omitempty"`
+ // `handSize` is a small positive number that configures the
+ // shuffle sharding of requests into queues. When enqueuing a request
+ // at this priority level the request's flow identifier (a string
+ // pair) is hashed and the hash value is used to shuffle the list
+ // of queues and deal a hand of the size specified here. The
+ // request is put into one of the shortest queues in that hand.
+ // `handSize` must be no larger than `queues`, and should be
+ // significantly smaller (so that a few heavy flows do not
+ // saturate most of the queues). See the user-facing
+ // documentation for more extensive guidance on setting this
+ // field. This field has a default value of 8.
+ HandSize *int32 `json:"handSize,omitempty"`
+ // `queueLengthLimit` is the maximum number of requests allowed to
+ // be waiting in a given queue of this priority level at a time;
+ // excess requests are rejected. This value must be positive. If
+ // not specified, it will be defaulted to 50.
QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go
index 070d2ed4..97b1012a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/resourcepolicyrule.go
@@ -20,12 +20,46 @@ package v1beta2
// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
// with apply.
+//
+// ResourcePolicyRule is a predicate that matches some resource
+// requests, testing the request's verb and the target resource. A
+// ResourcePolicyRule matches a resource request if and only if: (a)
+// at least one member of verbs matches the request, (b) at least one
+// member of apiGroups matches the request, (c) at least one member of
+// resources matches the request, and (d) either (d1) the request does
+// not specify a namespace (i.e., `Namespace==""`) and clusterScope is
+// true or (d2) the request specifies a namespace and least one member
+// of namespaces matches the request's namespace.
type ResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ClusterScope *bool `json:"clusterScope,omitempty"`
- Namespaces []string `json:"namespaces,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs and, if present, must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `apiGroups` is a list of matching API groups and may not be empty.
+ // "*" matches all API groups and, if present, must be the only entry.
+ // Required.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // `resources` is a list of matching resources (i.e., lowercase
+ // and plural) with, if desired, subresource. For example, [
+ // "services", "nodes/status" ]. This list may not be empty.
+ // "*" matches all resources and, if present, must be the only entry.
+ // Required.
+ Resources []string `json:"resources,omitempty"`
+ // `clusterScope` indicates whether to match requests that do not
+ // specify a namespace (which happens either because the resource
+ // is not namespaced or the request targets all namespaces).
+ // If this field is omitted or false then the `namespaces` field
+ // must contain a non-empty list.
+ ClusterScope *bool `json:"clusterScope,omitempty"`
+ // `namespaces` is a list of target namespaces that restricts
+ // matches. A request that specifies a target namespace matches
+ // only if either (a) this list contains that target namespace or
+ // (b) this list contains "*". Note that "*" matches any
+ // specified namespace but does not match a request that _does
+ // not specify_ a namespace (see the `clusterScope` field for
+ // that).
+ // This list may be empty, but only if `clusterScope` is true.
+ Namespaces []string `json:"namespaces,omitempty"`
}
// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go
index c0d44721..0e41716d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/serviceaccountsubject.go
@@ -20,9 +20,15 @@ package v1beta2
// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
// with apply.
+//
+// ServiceAccountSubject holds detailed information for service-account-kind subject.
type ServiceAccountSubjectApplyConfiguration struct {
+ // `namespace` is the namespace of matching ServiceAccount objects.
+ // Required.
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
+ // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
+ // Required.
+ Name *string `json:"name,omitempty"`
}
// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
index 2b569a62..d275444e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/subject.go
@@ -24,10 +24,18 @@ import (
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject matches the originator of a request, as identified by the request authentication system. There are three
+// ways of matching an originator; by user, group, or service account.
type SubjectApplyConfiguration struct {
- Kind *flowcontrolv1beta2.SubjectKind `json:"kind,omitempty"`
- User *UserSubjectApplyConfiguration `json:"user,omitempty"`
- Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `kind` indicates which one of the other fields is non-empty.
+ // Required
+ Kind *flowcontrolv1beta2.SubjectKind `json:"kind,omitempty"`
+ // `user` matches based on username.
+ User *UserSubjectApplyConfiguration `json:"user,omitempty"`
+ // `group` matches based on user group name.
+ Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `serviceAccount` matches ServiceAccounts.
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go
index c249f042..4de656a9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta2/usersubject.go
@@ -20,7 +20,11 @@ package v1beta2
// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
// with apply.
+//
+// UserSubject holds detailed information for user-kind subject.
type UserSubjectApplyConfiguration struct {
+ // `name` is the username that matches, or "*" to match all usernames.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
index b9bf6993..410a79b7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/exemptprioritylevelconfiguration.go
@@ -20,9 +20,35 @@ package v1beta3
// ExemptPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the ExemptPriorityLevelConfiguration type for use
// with apply.
+//
+// ExemptPriorityLevelConfiguration describes the configurable aspects
+// of the handling of exempt requests.
+// In the mandatory exempt configuration object the values in the fields
+// here can be modified by authorized users, unlike the rest of the `spec`.
type ExemptPriorityLevelConfigurationApplyConfiguration struct {
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats nominally reserved for this priority level.
+ // This DOES NOT limit the dispatching from this priority level
+ // but affects the other priority levels through the borrowing mechanism.
+ // The server's concurrency limit (ServerCL) is divided among all the
+ // priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of zero.
NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. This value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
}
// ExemptPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the ExemptPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
index cc32fa10..de0f0491 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowdistinguishermethod.go
@@ -24,7 +24,12 @@ import (
// FlowDistinguisherMethodApplyConfiguration represents a declarative configuration of the FlowDistinguisherMethod type for use
// with apply.
+//
+// FlowDistinguisherMethod specifies the method of a flow distinguisher.
type FlowDistinguisherMethodApplyConfiguration struct {
+ // `type` is the type of flow distinguisher method
+ // The supported types are "ByUser" and "ByNamespace".
+ // Required.
Type *flowcontrolv1beta3.FlowDistinguisherMethodType `json:"type,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
index b20c8ce6..0079748b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschema.go
@@ -29,11 +29,20 @@ import (
// FlowSchemaApplyConfiguration represents a declarative configuration of the FlowSchema type for use
// with apply.
+//
+// FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with
+// similar attributes and is identified by a pair of strings: the name of the FlowSchema and a "flow distinguisher".
type FlowSchemaApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
- Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *FlowSchemaSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a FlowSchema.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *FlowSchemaStatusApplyConfiguration `json:"status,omitempty"`
}
// FlowSchema constructs a declarative configuration of the FlowSchema type for use with
@@ -46,6 +55,26 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
return b
}
+// ExtractFlowSchemaFrom extracts the applied configuration owned by fieldManager from
+// flowSchema for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API.
+// ExtractFlowSchemaFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractFlowSchemaFrom(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
+ b := &FlowSchemaApplyConfiguration{}
+ err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.FlowSchema"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(flowSchema.Name)
+
+ b.WithKind("FlowSchema")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3")
+ return b, nil
+}
+
// ExtractFlowSchema extracts the applied configuration owned by fieldManager from
// flowSchema. If no managedFields are found in flowSchema for fieldManager, a
// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// ExtractFlowSchema provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "")
}
-// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractFlowSchemaStatus extracts the applied configuration owned by fieldManager from
+// flowSchema for the status subresource.
func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
- return extractFlowSchema(flowSchema, fieldManager, "status")
+ return ExtractFlowSchemaFrom(flowSchema, fieldManager, "status")
}
-func extractFlowSchema(flowSchema *flowcontrolv1beta3.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
- b := &FlowSchemaApplyConfiguration{}
- err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.FlowSchema"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(flowSchema.Name)
-
- b.WithKind("FlowSchema")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3")
- return b, nil
-}
func (b FlowSchemaApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
index d5ba21f7..de2aff56 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemacondition.go
@@ -25,12 +25,22 @@ import (
// FlowSchemaConditionApplyConfiguration represents a declarative configuration of the FlowSchemaCondition type for use
// with apply.
+//
+// FlowSchemaCondition describes conditions for a FlowSchema.
type FlowSchemaConditionApplyConfiguration struct {
- Type *flowcontrolv1beta3.FlowSchemaConditionType `json:"type,omitempty"`
- Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1beta3.FlowSchemaConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// FlowSchemaConditionApplyConfiguration constructs a declarative configuration of the FlowSchemaCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go
index 7141f6a6..6d30db12 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemaspec.go
@@ -20,11 +20,25 @@ package v1beta3
// FlowSchemaSpecApplyConfiguration represents a declarative configuration of the FlowSchemaSpec type for use
// with apply.
+//
+// FlowSchemaSpec describes how the FlowSchema's specification looks like.
type FlowSchemaSpecApplyConfiguration struct {
+ // `priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot
+ // be resolved, the FlowSchema will be ignored and marked as invalid in its status.
+ // Required.
PriorityLevelConfiguration *PriorityLevelConfigurationReferenceApplyConfiguration `json:"priorityLevelConfiguration,omitempty"`
- MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
- DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
- Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
+ // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
+ // FlowSchema is among those with the numerically lowest (which we take to be logically highest)
+ // MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000].
+ // Note that if the precedence is not specified, it will be set to 1000 as default.
+ MatchingPrecedence *int32 `json:"matchingPrecedence,omitempty"`
+ // `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
+ // `nil` specifies that the distinguisher is disabled and thus will always be the empty string.
+ DistinguisherMethod *FlowDistinguisherMethodApplyConfiguration `json:"distinguisherMethod,omitempty"`
+ // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
+ // at least one member of rules matches the request.
+ // if it is an empty slice, there will be no requests matching the FlowSchema.
+ Rules []PolicyRulesWithSubjectsApplyConfiguration `json:"rules,omitempty"`
}
// FlowSchemaSpecApplyConfiguration constructs a declarative configuration of the FlowSchemaSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go
index 294ddc90..0405b071 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/flowschemastatus.go
@@ -20,7 +20,10 @@ package v1beta3
// FlowSchemaStatusApplyConfiguration represents a declarative configuration of the FlowSchemaStatus type for use
// with apply.
+//
+// FlowSchemaStatus represents the current state of a FlowSchema.
type FlowSchemaStatusApplyConfiguration struct {
+ // `conditions` is a list of the current states of FlowSchema.
Conditions []FlowSchemaConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go
index 6576e716..2c5abe56 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/groupsubject.go
@@ -20,7 +20,13 @@ package v1beta3
// GroupSubjectApplyConfiguration represents a declarative configuration of the GroupSubject type for use
// with apply.
+//
+// GroupSubject holds detailed information for group-kind subject.
type GroupSubjectApplyConfiguration struct {
+ // name is the user group that matches, or "*" to match all user groups.
+ // See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some
+ // well-known group names.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go
index bd98dd68..12173e83 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitedprioritylevelconfiguration.go
@@ -20,11 +20,54 @@ package v1beta3
// LimitedPriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the LimitedPriorityLevelConfiguration type for use
// with apply.
+//
+// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
+// It addresses two issues:
+// - How are requests for this priority level limited?
+// - What should be done with requests that exceed the limit?
type LimitedPriorityLevelConfigurationApplyConfiguration struct {
- NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
- LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
- LendablePercent *int32 `json:"lendablePercent,omitempty"`
- BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
+ // `nominalConcurrencyShares` (NCS) contributes to the computation of the
+ // NominalConcurrencyLimit (NominalCL) of this level.
+ // This is the number of execution seats available at this priority level.
+ // This is used both for requests dispatched from this priority level
+ // as well as requests dispatched from other priority levels
+ // borrowing seats from this level.
+ // The server's concurrency limit (ServerCL) is divided among the
+ // Limited priority levels in proportion to their NCS values:
+ //
+ // NominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs )
+ // sum_ncs = sum[priority level k] NCS(k)
+ //
+ // Bigger numbers mean a larger nominal concurrency limit,
+ // at the expense of every other priority level.
+ // This field has a default value of 30.
+ NominalConcurrencyShares *int32 `json:"nominalConcurrencyShares,omitempty"`
+ // `limitResponse` indicates what to do with requests that can not be executed right now
+ LimitResponse *LimitResponseApplyConfiguration `json:"limitResponse,omitempty"`
+ // `lendablePercent` prescribes the fraction of the level's NominalCL that
+ // can be borrowed by other priority levels. The value of this
+ // field must be between 0 and 100, inclusive, and it defaults to 0.
+ // The number of seats that other levels can borrow from this level, known
+ // as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.
+ //
+ // LendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )
+ LendablePercent *int32 `json:"lendablePercent,omitempty"`
+ // `borrowingLimitPercent`, if present, configures a limit on how many
+ // seats this priority level can borrow from other priority levels.
+ // The limit is known as this level's BorrowingConcurrencyLimit
+ // (BorrowingCL) and is a limit on the total number of seats that this
+ // level may borrow at any one time.
+ // This field holds the ratio of that limit to the level's nominal
+ // concurrency limit. When this field is non-nil, it must hold a
+ // non-negative integer and the limit is calculated as follows.
+ //
+ // BorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )
+ //
+ // The value of this field can be more than 100, implying that this
+ // priority level can borrow a number of seats that is greater than
+ // its own nominal concurrency limit (NominalCL).
+ // When this field is left `nil`, the limit is effectively infinite.
+ BorrowingLimitPercent *int32 `json:"borrowingLimitPercent,omitempty"`
}
// LimitedPriorityLevelConfigurationApplyConfiguration constructs a declarative configuration of the LimitedPriorityLevelConfiguration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
index 2c289c77..6f253a2a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/limitresponse.go
@@ -24,8 +24,19 @@ import (
// LimitResponseApplyConfiguration represents a declarative configuration of the LimitResponse type for use
// with apply.
+//
+// LimitResponse defines how to handle requests that can not be executed right now.
type LimitResponseApplyConfiguration struct {
- Type *flowcontrolv1beta3.LimitResponseType `json:"type,omitempty"`
+ // `type` is "Queue" or "Reject".
+ // "Queue" means that requests that can not be executed upon arrival
+ // are held in a queue until they can be executed or a queuing limit
+ // is reached.
+ // "Reject" means that requests that can not be executed upon arrival
+ // are rejected.
+ // Required.
+ Type *flowcontrolv1beta3.LimitResponseType `json:"type,omitempty"`
+ // `queuing` holds the configuration parameters for queuing.
+ // This field may be non-empty only if `type` is `"Queue"`.
Queuing *QueuingConfigurationApplyConfiguration `json:"queuing,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go
index 2dd0d2b0..6e350a17 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/nonresourcepolicyrule.go
@@ -20,8 +20,24 @@ package v1beta3
// NonResourcePolicyRuleApplyConfiguration represents a declarative configuration of the NonResourcePolicyRule type for use
// with apply.
+//
+// NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the
+// target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member
+// of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.
type NonResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs. If it is present, it must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty.
+ // For example:
+ // - "/healthz" is legal
+ // - "/hea*" is illegal
+ // - "/hea" is legal but matches nothing
+ // - "/hea/*" also matches nothing
+ // - "/healthz/*" matches all per-component health checks.
+ // "*" matches all non-resource urls. if it is present, it must be the only entry.
+ // Required.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go
index cc64dc58..2588bf99 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/policyruleswithsubjects.go
@@ -20,9 +20,23 @@ package v1beta3
// PolicyRulesWithSubjectsApplyConfiguration represents a declarative configuration of the PolicyRulesWithSubjects type for use
// with apply.
+//
+// PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject
+// making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches
+// a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member
+// of resourceRules or nonResourceRules matches the request.
type PolicyRulesWithSubjectsApplyConfiguration struct {
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // subjects is the list of normal user, serviceaccount, or group that this rule cares about.
+ // There must be at least one member in this slice.
+ // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
+ // Required.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
+ // target resource.
+ // At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
+ ResourceRules []ResourcePolicyRuleApplyConfiguration `json:"resourceRules,omitempty"`
+ // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
+ // and the target non-resource URL.
NonResourceRules []NonResourcePolicyRuleApplyConfiguration `json:"nonResourceRules,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
index 9a0dad02..910cb4c2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfiguration.go
@@ -29,11 +29,19 @@ import (
// PriorityLevelConfigurationApplyConfiguration represents a declarative configuration of the PriorityLevelConfiguration type for use
// with apply.
+//
+// PriorityLevelConfiguration represents the configuration of a priority level.
type PriorityLevelConfigurationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // `metadata` is the standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
+ // `spec` is the specification of the desired behavior of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *PriorityLevelConfigurationSpecApplyConfiguration `json:"spec,omitempty"`
+ // `status` is the current status of a "request-priority".
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *PriorityLevelConfigurationStatusApplyConfiguration `json:"status,omitempty"`
}
// PriorityLevelConfiguration constructs a declarative configuration of the PriorityLevelConfiguration type for use with
@@ -46,6 +54,26 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
return b
}
+// ExtractPriorityLevelConfigurationFrom extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API.
+// ExtractPriorityLevelConfigurationFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ b := &PriorityLevelConfigurationApplyConfiguration{}
+ err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(priorityLevelConfiguration.Name)
+
+ b.WithKind("PriorityLevelConfiguration")
+ b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3")
+ return b, nil
+}
+
// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from
// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a
// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +84,16 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// ExtractPriorityLevelConfiguration provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "")
}
-// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPriorityLevelConfigurationStatus extracts the applied configuration owned by fieldManager from
+// priorityLevelConfiguration for the status subresource.
func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
+ return ExtractPriorityLevelConfigurationFrom(priorityLevelConfiguration, fieldManager, "status")
}
-func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta3.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
- b := &PriorityLevelConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(priorityLevelConfiguration.Name)
-
- b.WithKind("PriorityLevelConfiguration")
- b.WithAPIVersion("flowcontrol.apiserver.k8s.io/v1beta3")
- return b, nil
-}
func (b PriorityLevelConfigurationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
index 01695f14..bd14650a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationcondition.go
@@ -25,12 +25,22 @@ import (
// PriorityLevelConfigurationConditionApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationCondition type for use
// with apply.
+//
+// PriorityLevelConfigurationCondition defines the condition of priority level.
type PriorityLevelConfigurationConditionApplyConfiguration struct {
- Type *flowcontrolv1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
- Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"`
- LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // `type` is the type of the condition.
+ // Required.
+ Type *flowcontrolv1beta3.PriorityLevelConfigurationConditionType `json:"type,omitempty"`
+ // `status` is the status of the condition.
+ // Can be True, False, Unknown.
+ // Required.
+ Status *flowcontrolv1beta3.ConditionStatus `json:"status,omitempty"`
+ // `lastTransitionTime` is the last time the condition transitioned from one status to another.
+ LastTransitionTime *v1.Time `json:"lastTransitionTime,omitempty"`
+ // `reason` is a unique, one-word, CamelCase reason for the condition's last transition.
+ Reason *string `json:"reason,omitempty"`
+ // `message` is a human-readable message indicating details about last transition.
+ Message *string `json:"message,omitempty"`
}
// PriorityLevelConfigurationConditionApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationCondition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go
index 566aaa91..7221983d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationreference.go
@@ -20,7 +20,11 @@ package v1beta3
// PriorityLevelConfigurationReferenceApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationReference type for use
// with apply.
+//
+// PriorityLevelConfigurationReference contains information that points to the "request-priority" being used.
type PriorityLevelConfigurationReferenceApplyConfiguration struct {
+ // `name` is the name of the priority level configuration being referenced
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
index c9508547..3e3f107e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationspec.go
@@ -24,10 +24,28 @@ import (
// PriorityLevelConfigurationSpecApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationSpec type for use
// with apply.
+//
+// PriorityLevelConfigurationSpec specifies the configuration of a priority level.
type PriorityLevelConfigurationSpecApplyConfiguration struct {
- Type *flowcontrolv1beta3.PriorityLevelEnablement `json:"type,omitempty"`
+ // `type` indicates whether this priority level is subject to
+ // limitation on request execution. A value of `"Exempt"` means
+ // that requests of this priority level are not subject to a limit
+ // (and thus are never queued) and do not detract from the
+ // capacity made available to other priority levels. A value of
+ // `"Limited"` means that (a) requests of this priority level
+ // _are_ subject to limits and (b) some of the server's limited
+ // capacity is made available exclusively to this priority level.
+ // Required.
+ Type *flowcontrolv1beta3.PriorityLevelEnablement `json:"type,omitempty"`
+ // `limited` specifies how requests are handled for a Limited priority level.
+ // This field must be non-empty if and only if `type` is `"Limited"`.
Limited *LimitedPriorityLevelConfigurationApplyConfiguration `json:"limited,omitempty"`
- Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
+ // `exempt` specifies how requests are handled for an exempt priority level.
+ // This field MUST be empty if `type` is `"Limited"`.
+ // This field MAY be non-empty if `type` is `"Exempt"`.
+ // If empty and `type` is `"Exempt"` then the default values
+ // for `ExemptPriorityLevelConfiguration` apply.
+ Exempt *ExemptPriorityLevelConfigurationApplyConfiguration `json:"exempt,omitempty"`
}
// PriorityLevelConfigurationSpecApplyConfiguration constructs a declarative configuration of the PriorityLevelConfigurationSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go
index be243645..1d5e87a0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/prioritylevelconfigurationstatus.go
@@ -20,7 +20,10 @@ package v1beta3
// PriorityLevelConfigurationStatusApplyConfiguration represents a declarative configuration of the PriorityLevelConfigurationStatus type for use
// with apply.
+//
+// PriorityLevelConfigurationStatus represents the current state of a "request-priority".
type PriorityLevelConfigurationStatusApplyConfiguration struct {
+ // `conditions` is the current state of "request-priority".
Conditions []PriorityLevelConfigurationConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go
index f9a3c6d1..b73113b6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/queuingconfiguration.go
@@ -20,9 +20,32 @@ package v1beta3
// QueuingConfigurationApplyConfiguration represents a declarative configuration of the QueuingConfiguration type for use
// with apply.
+//
+// QueuingConfiguration holds the configuration parameters for queuing
type QueuingConfigurationApplyConfiguration struct {
- Queues *int32 `json:"queues,omitempty"`
- HandSize *int32 `json:"handSize,omitempty"`
+ // `queues` is the number of queues for this priority level. The
+ // queues exist independently at each apiserver. The value must be
+ // positive. Setting it to 1 effectively precludes
+ // shufflesharding and thus makes the distinguisher method of
+ // associated flow schemas irrelevant. This field has a default
+ // value of 64.
+ Queues *int32 `json:"queues,omitempty"`
+ // `handSize` is a small positive number that configures the
+ // shuffle sharding of requests into queues. When enqueuing a request
+ // at this priority level the request's flow identifier (a string
+ // pair) is hashed and the hash value is used to shuffle the list
+ // of queues and deal a hand of the size specified here. The
+ // request is put into one of the shortest queues in that hand.
+ // `handSize` must be no larger than `queues`, and should be
+ // significantly smaller (so that a few heavy flows do not
+ // saturate most of the queues). See the user-facing
+ // documentation for more extensive guidance on setting this
+ // field. This field has a default value of 8.
+ HandSize *int32 `json:"handSize,omitempty"`
+ // `queueLengthLimit` is the maximum number of requests allowed to
+ // be waiting in a given queue of this priority level at a time;
+ // excess requests are rejected. This value must be positive. If
+ // not specified, it will be defaulted to 50.
QueueLengthLimit *int32 `json:"queueLengthLimit,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go
index e38f711d..572c56e8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/resourcepolicyrule.go
@@ -20,12 +20,46 @@ package v1beta3
// ResourcePolicyRuleApplyConfiguration represents a declarative configuration of the ResourcePolicyRule type for use
// with apply.
+//
+// ResourcePolicyRule is a predicate that matches some resource
+// requests, testing the request's verb and the target resource. A
+// ResourcePolicyRule matches a resource request if and only if: (a)
+// at least one member of verbs matches the request, (b) at least one
+// member of apiGroups matches the request, (c) at least one member of
+// resources matches the request, and (d) either (d1) the request does
+// not specify a namespace (i.e., `Namespace==""`) and clusterScope is
+// true or (d2) the request specifies a namespace and least one member
+// of namespaces matches the request's namespace.
type ResourcePolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ClusterScope *bool `json:"clusterScope,omitempty"`
- Namespaces []string `json:"namespaces,omitempty"`
+ // `verbs` is a list of matching verbs and may not be empty.
+ // "*" matches all verbs and, if present, must be the only entry.
+ // Required.
+ Verbs []string `json:"verbs,omitempty"`
+ // `apiGroups` is a list of matching API groups and may not be empty.
+ // "*" matches all API groups and, if present, must be the only entry.
+ // Required.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // `resources` is a list of matching resources (i.e., lowercase
+ // and plural) with, if desired, subresource. For example, [
+ // "services", "nodes/status" ]. This list may not be empty.
+ // "*" matches all resources and, if present, must be the only entry.
+ // Required.
+ Resources []string `json:"resources,omitempty"`
+ // `clusterScope` indicates whether to match requests that do not
+ // specify a namespace (which happens either because the resource
+ // is not namespaced or the request targets all namespaces).
+ // If this field is omitted or false then the `namespaces` field
+ // must contain a non-empty list.
+ ClusterScope *bool `json:"clusterScope,omitempty"`
+ // `namespaces` is a list of target namespaces that restricts
+ // matches. A request that specifies a target namespace matches
+ // only if either (a) this list contains that target namespace or
+ // (b) this list contains "*". Note that "*" matches any
+ // specified namespace but does not match a request that _does
+ // not specify_ a namespace (see the `clusterScope` field for
+ // that).
+ // This list may be empty, but only if `clusterScope` is true.
+ Namespaces []string `json:"namespaces,omitempty"`
}
// ResourcePolicyRuleApplyConfiguration constructs a declarative configuration of the ResourcePolicyRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go
index a5ed40c2..a298d9d0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/serviceaccountsubject.go
@@ -20,9 +20,15 @@ package v1beta3
// ServiceAccountSubjectApplyConfiguration represents a declarative configuration of the ServiceAccountSubject type for use
// with apply.
+//
+// ServiceAccountSubject holds detailed information for service-account-kind subject.
type ServiceAccountSubjectApplyConfiguration struct {
+ // `namespace` is the namespace of matching ServiceAccount objects.
+ // Required.
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
+ // `name` is the name of matching ServiceAccount objects, or "*" to match regardless of name.
+ // Required.
+ Name *string `json:"name,omitempty"`
}
// ServiceAccountSubjectApplyConfiguration constructs a declarative configuration of the ServiceAccountSubject type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
index 46499f54..426c65e6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/subject.go
@@ -24,10 +24,18 @@ import (
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject matches the originator of a request, as identified by the request authentication system. There are three
+// ways of matching an originator; by user, group, or service account.
type SubjectApplyConfiguration struct {
- Kind *flowcontrolv1beta3.SubjectKind `json:"kind,omitempty"`
- User *UserSubjectApplyConfiguration `json:"user,omitempty"`
- Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `kind` indicates which one of the other fields is non-empty.
+ // Required
+ Kind *flowcontrolv1beta3.SubjectKind `json:"kind,omitempty"`
+ // `user` matches based on username.
+ User *UserSubjectApplyConfiguration `json:"user,omitempty"`
+ // `group` matches based on user group name.
+ Group *GroupSubjectApplyConfiguration `json:"group,omitempty"`
+ // `serviceAccount` matches ServiceAccounts.
ServiceAccount *ServiceAccountSubjectApplyConfiguration `json:"serviceAccount,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go
index 7b3ec2ba..f17a99cf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta3/usersubject.go
@@ -20,7 +20,11 @@ package v1beta3
// UserSubjectApplyConfiguration represents a declarative configuration of the UserSubject type for use
// with apply.
+//
+// UserSubject holds detailed information for user-kind subject.
type UserSubjectApplyConfiguration struct {
+ // `name` is the username that matches, or "*" to match all usernames.
+ // Required.
Name *string `json:"name,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go
index 0d428e06..f63c86ce 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go
@@ -29,11 +29,17 @@ import (
// ImageReviewApplyConfiguration represents a declarative configuration of the ImageReview type for use
// with apply.
+//
+// ImageReview checks if the set of images in a pod are allowed.
type ImageReviewApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ImageReviewSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ImageReviewStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec holds information about the pod being evaluated
+ Spec *ImageReviewSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status is filled in by the backend and indicates whether the pod should be allowed.
+ Status *ImageReviewStatusApplyConfiguration `json:"status,omitempty"`
}
// ImageReview constructs a declarative configuration of the ImageReview type for use with
@@ -46,6 +52,26 @@ func ImageReview(name string) *ImageReviewApplyConfiguration {
return b
}
+// ExtractImageReviewFrom extracts the applied configuration owned by fieldManager from
+// imageReview for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// imageReview must be a unmodified ImageReview API object that was retrieved from the Kubernetes API.
+// ExtractImageReviewFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractImageReviewFrom(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) {
+ b := &ImageReviewApplyConfiguration{}
+ err := managedfields.ExtractInto(imageReview, internal.Parser().Type("io.k8s.api.imagepolicy.v1alpha1.ImageReview"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(imageReview.Name)
+
+ b.WithKind("ImageReview")
+ b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1")
+ return b, nil
+}
+
// ExtractImageReview extracts the applied configuration owned by fieldManager from
// imageReview. If no managedFields are found in imageReview for fieldManager, a
// ImageReviewApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +82,16 @@ func ImageReview(name string) *ImageReviewApplyConfiguration {
// ExtractImageReview provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) {
- return extractImageReview(imageReview, fieldManager, "")
+ return ExtractImageReviewFrom(imageReview, fieldManager, "")
}
-// ExtractImageReviewStatus is the same as ExtractImageReview except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractImageReviewStatus extracts the applied configuration owned by fieldManager from
+// imageReview for the status subresource.
func ExtractImageReviewStatus(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string) (*ImageReviewApplyConfiguration, error) {
- return extractImageReview(imageReview, fieldManager, "status")
+ return ExtractImageReviewFrom(imageReview, fieldManager, "status")
}
-func extractImageReview(imageReview *imagepolicyv1alpha1.ImageReview, fieldManager string, subresource string) (*ImageReviewApplyConfiguration, error) {
- b := &ImageReviewApplyConfiguration{}
- err := managedfields.ExtractInto(imageReview, internal.Parser().Type("io.k8s.api.imagepolicy.v1alpha1.ImageReview"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(imageReview.Name)
-
- b.WithKind("ImageReview")
- b.WithAPIVersion("imagepolicy.k8s.io/v1alpha1")
- return b, nil
-}
func (b ImageReviewApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go
index adfdb325..1f0193f0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go
@@ -20,7 +20,10 @@ package v1alpha1
// ImageReviewContainerSpecApplyConfiguration represents a declarative configuration of the ImageReviewContainerSpec type for use
// with apply.
+//
+// ImageReviewContainerSpec is a description of a container within the pod creation request.
type ImageReviewContainerSpecApplyConfiguration struct {
+ // This can be in the form image:tag or image@SHA:012345679abcdef.
Image *string `json:"image,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go
index 7efc36a3..14c6603f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go
@@ -20,10 +20,17 @@ package v1alpha1
// ImageReviewSpecApplyConfiguration represents a declarative configuration of the ImageReviewSpec type for use
// with apply.
+//
+// ImageReviewSpec is a description of the pod creation request.
type ImageReviewSpecApplyConfiguration struct {
- Containers []ImageReviewContainerSpecApplyConfiguration `json:"containers,omitempty"`
- Annotations map[string]string `json:"annotations,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
+ // Containers is a list of a subset of the information in each container of the Pod being created.
+ Containers []ImageReviewContainerSpecApplyConfiguration `json:"containers,omitempty"`
+ // Annotations is a list of key-value pairs extracted from the Pod's annotations.
+ // It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
+ // It is up to each webhook backend to determine how to interpret these annotations, if at all.
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // Namespace is the namespace the pod is being created in.
+ Namespace *string `json:"namespace,omitempty"`
}
// ImageReviewSpecApplyConfiguration constructs a declarative configuration of the ImageReviewSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go
index e26a427e..52828a29 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go
@@ -20,9 +20,19 @@ package v1alpha1
// ImageReviewStatusApplyConfiguration represents a declarative configuration of the ImageReviewStatus type for use
// with apply.
+//
+// ImageReviewStatus is the result of the review for the pod creation request.
type ImageReviewStatusApplyConfiguration struct {
- Allowed *bool `json:"allowed,omitempty"`
- Reason *string `json:"reason,omitempty"`
+ // Allowed indicates that all images were allowed to be run.
+ Allowed *bool `json:"allowed,omitempty"`
+ // Reason should be empty unless Allowed is false in which case it
+ // may contain a short description of what is wrong. Kubernetes
+ // may truncate excessively long errors when displaying to the user.
+ Reason *string `json:"reason,omitempty"`
+ // AuditAnnotations will be added to the attributes object of the
+ // admission controller request using 'AddAnnotation'. The keys should
+ // be prefix-less (i.e., the admission controller will add an
+ // appropriate prefix).
AuditAnnotations map[string]string `json:"auditAnnotations,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/operator/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
index ed8b7a18..2d66c9ba 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
@@ -4458,91 +4458,6 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
-- name: io.k8s.api.certificates.v1alpha1.PodCertificateRequest
- map:
- fields:
- - name: apiVersion
- type:
- scalar: string
- - name: kind
- type:
- scalar: string
- - name: metadata
- type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
- default: {}
- - name: spec
- type:
- namedType: io.k8s.api.certificates.v1alpha1.PodCertificateRequestSpec
- default: {}
- - name: status
- type:
- namedType: io.k8s.api.certificates.v1alpha1.PodCertificateRequestStatus
- default: {}
-- name: io.k8s.api.certificates.v1alpha1.PodCertificateRequestSpec
- map:
- fields:
- - name: maxExpirationSeconds
- type:
- scalar: numeric
- default: 86400
- - name: nodeName
- type:
- scalar: string
- default: ""
- - name: nodeUID
- type:
- scalar: string
- default: ""
- - name: pkixPublicKey
- type:
- scalar: string
- - name: podName
- type:
- scalar: string
- default: ""
- - name: podUID
- type:
- scalar: string
- default: ""
- - name: proofOfPossession
- type:
- scalar: string
- - name: serviceAccountName
- type:
- scalar: string
- default: ""
- - name: serviceAccountUID
- type:
- scalar: string
- default: ""
- - name: signerName
- type:
- scalar: string
- default: ""
-- name: io.k8s.api.certificates.v1alpha1.PodCertificateRequestStatus
- map:
- fields:
- - name: beginRefreshAt
- type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- - name: certificateChain
- type:
- scalar: string
- - name: conditions
- type:
- list:
- elementType:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
- elementRelationship: associative
- keys:
- - type
- - name: notAfter
- type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- - name: notBefore
- type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequest
map:
fields:
@@ -4666,6 +4581,96 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+- name: io.k8s.api.certificates.v1beta1.PodCertificateRequest
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.certificates.v1beta1.PodCertificateRequestSpec
+ default: {}
+ - name: status
+ type:
+ namedType: io.k8s.api.certificates.v1beta1.PodCertificateRequestStatus
+ default: {}
+- name: io.k8s.api.certificates.v1beta1.PodCertificateRequestSpec
+ map:
+ fields:
+ - name: maxExpirationSeconds
+ type:
+ scalar: numeric
+ default: 86400
+ - name: nodeName
+ type:
+ scalar: string
+ default: ""
+ - name: nodeUID
+ type:
+ scalar: string
+ default: ""
+ - name: pkixPublicKey
+ type:
+ scalar: string
+ - name: podName
+ type:
+ scalar: string
+ default: ""
+ - name: podUID
+ type:
+ scalar: string
+ default: ""
+ - name: proofOfPossession
+ type:
+ scalar: string
+ - name: serviceAccountName
+ type:
+ scalar: string
+ default: ""
+ - name: serviceAccountUID
+ type:
+ scalar: string
+ default: ""
+ - name: signerName
+ type:
+ scalar: string
+ default: ""
+ - name: unverifiedUserAnnotations
+ type:
+ map:
+ elementType:
+ scalar: string
+- name: io.k8s.api.certificates.v1beta1.PodCertificateRequestStatus
+ map:
+ fields:
+ - name: beginRefreshAt
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: certificateChain
+ type:
+ scalar: string
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
+ - name: notAfter
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
+ - name: notBefore
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- name: io.k8s.api.coordination.v1.Lease
map:
fields:
@@ -6765,6 +6770,12 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.core.v1.NodeDaemonEndpoints
default: {}
+ - name: declaredFeatures
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: atomic
- name: features
type:
namedType: io.k8s.api.core.v1.NodeFeatures
@@ -7282,6 +7293,11 @@ var schemaYAML = typed.YAMLObject(`types:
- name: signerName
type:
scalar: string
+ - name: userAnnotations
+ type:
+ map:
+ elementType:
+ scalar: string
- name: io.k8s.api.core.v1.PodCondition
map:
fields:
@@ -7633,9 +7649,17 @@ var schemaYAML = typed.YAMLObject(`types:
elementRelationship: associative
keys:
- name
+ - name: workloadRef
+ type:
+ namedType: io.k8s.api.core.v1.WorkloadReference
- name: io.k8s.api.core.v1.PodStatus
map:
fields:
+ - name: allocatedResources
+ type:
+ map:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
- name: conditions
type:
list:
@@ -7714,6 +7738,9 @@ var schemaYAML = typed.YAMLObject(`types:
elementRelationship: associative
keys:
- name
+ - name: resources
+ type:
+ namedType: io.k8s.api.core.v1.ResourceRequirements
- name: startTime
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
@@ -9007,6 +9034,20 @@ var schemaYAML = typed.YAMLObject(`types:
- name: runAsUserName
type:
scalar: string
+- name: io.k8s.api.core.v1.WorkloadReference
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: podGroup
+ type:
+ scalar: string
+ default: ""
+ - name: podGroupReplicaKey
+ type:
+ scalar: string
- name: io.k8s.api.discovery.v1.Endpoint
map:
fields:
@@ -13476,19 +13517,6 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.resource.v1.CounterSet
elementRelationship: atomic
-- name: io.k8s.api.resource.v1alpha3.CELDeviceSelector
- map:
- fields:
- - name: expression
- type:
- scalar: string
- default: ""
-- name: io.k8s.api.resource.v1alpha3.DeviceSelector
- map:
- fields:
- - name: cel
- type:
- namedType: io.k8s.api.resource.v1alpha3.CELDeviceSelector
- name: io.k8s.api.resource.v1alpha3.DeviceTaint
map:
fields:
@@ -13523,6 +13551,10 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.resource.v1alpha3.DeviceTaintRuleSpec
default: {}
+ - name: status
+ type:
+ namedType: io.k8s.api.resource.v1alpha3.DeviceTaintRuleStatus
+ default: {}
- name: io.k8s.api.resource.v1alpha3.DeviceTaintRuleSpec
map:
fields:
@@ -13533,27 +13565,29 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.resource.v1alpha3.DeviceTaint
default: {}
+- name: io.k8s.api.resource.v1alpha3.DeviceTaintRuleStatus
+ map:
+ fields:
+ - name: conditions
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
+ elementRelationship: associative
+ keys:
+ - type
- name: io.k8s.api.resource.v1alpha3.DeviceTaintSelector
map:
fields:
- name: device
type:
scalar: string
- - name: deviceClassName
- type:
- scalar: string
- name: driver
type:
scalar: string
- name: pool
type:
scalar: string
- - name: selectors
- type:
- list:
- elementType:
- namedType: io.k8s.api.resource.v1alpha3.DeviceSelector
- elementRelationship: atomic
- name: io.k8s.api.resource.v1beta1.AllocatedDeviceStatus
map:
fields:
@@ -14934,6 +14968,45 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: numeric
default: 0
+- name: io.k8s.api.scheduling.v1alpha1.BasicSchedulingPolicy
+ map:
+ elementType:
+ scalar: untyped
+ list:
+ elementType:
+ namedType: __untyped_atomic_
+ elementRelationship: atomic
+ map:
+ elementType:
+ namedType: __untyped_deduced_
+ elementRelationship: separable
+- name: io.k8s.api.scheduling.v1alpha1.GangSchedulingPolicy
+ map:
+ fields:
+ - name: minCount
+ type:
+ scalar: numeric
+ default: 0
+- name: io.k8s.api.scheduling.v1alpha1.PodGroup
+ map:
+ fields:
+ - name: name
+ type:
+ scalar: string
+ default: ""
+ - name: policy
+ type:
+ namedType: io.k8s.api.scheduling.v1alpha1.PodGroupPolicy
+ default: {}
+- name: io.k8s.api.scheduling.v1alpha1.PodGroupPolicy
+ map:
+ fields:
+ - name: basic
+ type:
+ namedType: io.k8s.api.scheduling.v1alpha1.BasicSchedulingPolicy
+ - name: gang
+ type:
+ namedType: io.k8s.api.scheduling.v1alpha1.GangSchedulingPolicy
- name: io.k8s.api.scheduling.v1alpha1.PriorityClass
map:
fields:
@@ -14960,6 +15033,51 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: numeric
default: 0
+- name: io.k8s.api.scheduling.v1alpha1.TypedLocalObjectReference
+ map:
+ fields:
+ - name: apiGroup
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ default: ""
+ - name: name
+ type:
+ scalar: string
+ default: ""
+- name: io.k8s.api.scheduling.v1alpha1.Workload
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
+ - name: spec
+ type:
+ namedType: io.k8s.api.scheduling.v1alpha1.WorkloadSpec
+ default: {}
+- name: io.k8s.api.scheduling.v1alpha1.WorkloadSpec
+ map:
+ fields:
+ - name: controllerRef
+ type:
+ namedType: io.k8s.api.scheduling.v1alpha1.TypedLocalObjectReference
+ - name: podGroups
+ type:
+ list:
+ elementType:
+ namedType: io.k8s.api.scheduling.v1alpha1.PodGroup
+ elementRelationship: associative
+ keys:
+ - name
- name: io.k8s.api.scheduling.v1beta1.PriorityClass
map:
fields:
@@ -15024,6 +15142,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: seLinuxMount
type:
scalar: boolean
+ - name: serviceAccountTokenInSecrets
+ type:
+ scalar: boolean
- name: storageCapacity
type:
scalar: boolean
@@ -15430,6 +15551,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: seLinuxMount
type:
scalar: boolean
+ - name: serviceAccountTokenInSecrets
+ type:
+ scalar: boolean
- name: storageCapacity
type:
scalar: boolean
@@ -15675,39 +15799,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: count
type:
scalar: numeric
-- name: io.k8s.api.storagemigration.v1alpha1.GroupVersionResource
- map:
- fields:
- - name: group
- type:
- scalar: string
- - name: resource
- type:
- scalar: string
- - name: version
- type:
- scalar: string
-- name: io.k8s.api.storagemigration.v1alpha1.MigrationCondition
- map:
- fields:
- - name: lastUpdateTime
- type:
- namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
- - name: message
- type:
- scalar: string
- - name: reason
- type:
- scalar: string
- - name: status
- type:
- scalar: string
- default: ""
- - name: type
- type:
- scalar: string
- default: ""
-- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration
+- name: io.k8s.api.storagemigration.v1beta1.StorageVersionMigration
map:
fields:
- name: apiVersion
@@ -15722,30 +15814,27 @@ var schemaYAML = typed.YAMLObject(`types:
default: {}
- name: spec
type:
- namedType: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec
+ namedType: io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationSpec
default: {}
- name: status
type:
- namedType: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus
+ namedType: io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationStatus
default: {}
-- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationSpec
+- name: io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationSpec
map:
fields:
- - name: continueToken
- type:
- scalar: string
- name: resource
type:
- namedType: io.k8s.api.storagemigration.v1alpha1.GroupVersionResource
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.GroupResource
default: {}
-- name: io.k8s.api.storagemigration.v1alpha1.StorageVersionMigrationStatus
+- name: io.k8s.api.storagemigration.v1beta1.StorageVersionMigrationStatus
map:
fields:
- name: conditions
type:
list:
elementType:
- namedType: io.k8s.api.storagemigration.v1alpha1.MigrationCondition
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
elementRelationship: associative
keys:
- type
@@ -15821,6 +15910,17 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: __untyped_deduced_
elementRelationship: separable
+- name: io.k8s.apimachinery.pkg.apis.meta.v1.GroupResource
+ map:
+ fields:
+ - name: group
+ type:
+ scalar: string
+ default: ""
+ - name: resource
+ type:
+ scalar: string
+ default: ""
- name: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
map:
fields:
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
index 69063df6..6b0a4431 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/condition.go
@@ -24,13 +24,47 @@ import (
// ConditionApplyConfiguration represents a declarative configuration of the Condition type for use
// with apply.
+//
+// Condition contains details for one aspect of the current state of this API Resource.
+// ---
+// This struct is intended for direct use as an array at the field path .status.conditions. For example,
+//
+// type FooStatus struct{
+// // Represents the observations of a foo's current state.
+// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
+// // +patchMergeKey=type
+// // +patchStrategy=merge
+// // +listType=map
+// // +listMapKey=type
+// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
+//
+// // other fields
+// }
type ConditionApplyConfiguration struct {
- Type *string `json:"type,omitempty"`
- Status *metav1.ConditionStatus `json:"status,omitempty"`
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
+ // type of condition in CamelCase or in foo.example.com/CamelCase.
+ // ---
+ // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+ // useful (see .node.status.conditions), the ability to deconflict is important.
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ Type *string `json:"type,omitempty"`
+ // status of the condition, one of True, False, Unknown.
+ Status *metav1.ConditionStatus `json:"status,omitempty"`
+ // observedGeneration represents the .metadata.generation that the condition was set based upon.
+ // For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ // with respect to the current state of the instance.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // lastTransitionTime is the last time the condition transitioned from one status to another.
+ // This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
+ // reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ // Producers of specific condition types may define expected values and meanings for this field,
+ // and whether the values are considered a guaranteed API.
+ // The value should be a CamelCase string.
+ // This field may not be empty.
+ Reason *string `json:"reason,omitempty"`
+ // message is a human readable message indicating details about the transition.
+ // This may be an empty string.
+ Message *string `json:"message,omitempty"`
}
// ConditionApplyConfiguration constructs a declarative configuration of the Condition type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
index a872d19c..ed4c4bfa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/deleteoptions.go
@@ -24,14 +24,51 @@ import (
// DeleteOptionsApplyConfiguration represents a declarative configuration of the DeleteOptions type for use
// with apply.
+//
+// DeleteOptions may be provided when deleting an API object.
type DeleteOptionsApplyConfiguration struct {
- TypeMetaApplyConfiguration `json:",inline"`
- GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
- Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"`
- OrphanDependents *bool `json:"orphanDependents,omitempty"`
- PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"`
- DryRun []string `json:"dryRun,omitempty"`
- IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty"`
+ TypeMetaApplyConfiguration `json:",inline"`
+ // The duration in seconds before the object should be deleted. Value must be non-negative integer.
+ // The value zero indicates delete immediately. If this value is nil, the default grace period for the
+ // specified type will be used.
+ // Defaults to a per object value if not specified. zero means delete immediately.
+ GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty"`
+ // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
+ // returned.
+ Preconditions *PreconditionsApplyConfiguration `json:"preconditions,omitempty"`
+ // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
+ // Should the dependent objects be orphaned. If true/false, the "orphan"
+ // finalizer will be added to/removed from the object's finalizers list.
+ // Either this field or PropagationPolicy may be set, but not both.
+ OrphanDependents *bool `json:"orphanDependents,omitempty"`
+ // Whether and how garbage collection will be performed.
+ // Either this field or OrphanDependents may be set, but not both.
+ // The default policy is decided by the existing finalizer set in the
+ // metadata.finalizers and the resource-specific default policy.
+ // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' -
+ // allow the garbage collector to delete the dependents in the background;
+ // 'Foreground' - a cascading policy that deletes all dependents in the
+ // foreground.
+ PropagationPolicy *metav1.DeletionPropagation `json:"propagationPolicy,omitempty"`
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ DryRun []string `json:"dryRun,omitempty"`
+ // if set to true, it will trigger an unsafe deletion of the resource in
+ // case the normal deletion flow fails with a corrupt object error.
+ // A resource is considered corrupt if it can not be retrieved from
+ // the underlying storage successfully because of a) its data can
+ // not be transformed e.g. decryption failure, or b) it fails
+ // to decode into an object.
+ // NOTE: unsafe deletion ignores finalizer constraints, skips
+ // precondition checks, and removes the object from the storage.
+ // WARNING: This may potentially break the cluster if the workload
+ // associated with the resource being unsafe-deleted relies on normal
+ // deletion flow. Use only if you REALLY know what you are doing.
+ // The default value is false, and the user must opt in to enable it
+ IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty"`
}
// DeleteOptionsApplyConfiguration constructs a declarative configuration of the DeleteOptions type for use with
@@ -42,6 +79,7 @@ func DeleteOptions() *DeleteOptionsApplyConfiguration {
b.WithAPIVersion("meta.k8s.io/v1")
return b
}
+
func (b DeleteOptionsApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupresource.go
similarity index 54%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupresource.go
index c8f9f009..d2e56d63 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/groupresource.go
@@ -16,42 +16,36 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1
-// GroupVersionResourceApplyConfiguration represents a declarative configuration of the GroupVersionResource type for use
+// GroupResourceApplyConfiguration represents a declarative configuration of the GroupResource type for use
// with apply.
-type GroupVersionResourceApplyConfiguration struct {
+//
+// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying
+// concepts during lookup stages without having partially valid types
+type GroupResourceApplyConfiguration struct {
Group *string `json:"group,omitempty"`
- Version *string `json:"version,omitempty"`
Resource *string `json:"resource,omitempty"`
}
-// GroupVersionResourceApplyConfiguration constructs a declarative configuration of the GroupVersionResource type for use with
+// GroupResourceApplyConfiguration constructs a declarative configuration of the GroupResource type for use with
// apply.
-func GroupVersionResource() *GroupVersionResourceApplyConfiguration {
- return &GroupVersionResourceApplyConfiguration{}
+func GroupResource() *GroupResourceApplyConfiguration {
+ return &GroupResourceApplyConfiguration{}
}
// WithGroup sets the Group field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Group field is set to the value of the last call.
-func (b *GroupVersionResourceApplyConfiguration) WithGroup(value string) *GroupVersionResourceApplyConfiguration {
+func (b *GroupResourceApplyConfiguration) WithGroup(value string) *GroupResourceApplyConfiguration {
b.Group = &value
return b
}
-// WithVersion sets the Version field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Version field is set to the value of the last call.
-func (b *GroupVersionResourceApplyConfiguration) WithVersion(value string) *GroupVersionResourceApplyConfiguration {
- b.Version = &value
- return b
-}
-
// WithResource sets the Resource field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resource field is set to the value of the last call.
-func (b *GroupVersionResourceApplyConfiguration) WithResource(value string) *GroupVersionResourceApplyConfiguration {
+func (b *GroupResourceApplyConfiguration) WithResource(value string) *GroupResourceApplyConfiguration {
b.Resource = &value
return b
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go
index 1f33c94e..7342725e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselector.go
@@ -20,8 +20,21 @@ package v1
// LabelSelectorApplyConfiguration represents a declarative configuration of the LabelSelector type for use
// with apply.
+//
+// Note:
+// There are two different styles of label selectors used in versioned types:
+// an older style which is represented as just a string in versioned types, and a
+// newer style that is structured. LabelSelector is an internal representation for the
+// latter style.
+// A label selector is a label query over a set of resources. The result of matchLabels and
+// matchExpressions are ANDed. An empty label selector matches all objects. A null
+// label selector matches no objects.
type LabelSelectorApplyConfiguration struct {
- MatchLabels map[string]string `json:"matchLabels,omitempty"`
+ // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ // map is equivalent to an element of matchExpressions, whose key field is "key", the
+ // operator is "In", and the values array contains only "value". The requirements are ANDed.
+ MatchLabels map[string]string `json:"matchLabels,omitempty"`
+ // matchExpressions is a list of label selector requirements. The requirements are ANDed.
MatchExpressions []LabelSelectorRequirementApplyConfiguration `json:"matchExpressions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
index c8b015c9..14031357 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/labelselectorrequirement.go
@@ -24,10 +24,20 @@ import (
// LabelSelectorRequirementApplyConfiguration represents a declarative configuration of the LabelSelectorRequirement type for use
// with apply.
+//
+// A label selector requirement is a selector that contains values, a key, and an operator that
+// relates the key and values.
type LabelSelectorRequirementApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
+ // key is the label key that the selector applies to.
+ Key *string `json:"key,omitempty"`
+ // operator represents a key's relationship to a set of values.
+ // Valid operators are In, NotIn, Exists and DoesNotExist.
Operator *metav1.LabelSelectorOperator `json:"operator,omitempty"`
- Values []string `json:"values,omitempty"`
+ // values is an array of string values. If the operator is In or NotIn,
+ // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ // the values array must be empty. This array is replaced during a strategic
+ // merge patch.
+ Values []string `json:"values,omitempty"`
}
// LabelSelectorRequirementApplyConfiguration constructs a declarative configuration of the LabelSelectorRequirement type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
index 7175537c..beb9d5e5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
@@ -24,14 +24,39 @@ import (
// ManagedFieldsEntryApplyConfiguration represents a declarative configuration of the ManagedFieldsEntry type for use
// with apply.
+//
+// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource
+// that the fieldset applies to.
type ManagedFieldsEntryApplyConfiguration struct {
- Manager *string `json:"manager,omitempty"`
- Operation *metav1.ManagedFieldsOperationType `json:"operation,omitempty"`
- APIVersion *string `json:"apiVersion,omitempty"`
- Time *metav1.Time `json:"time,omitempty"`
- FieldsType *string `json:"fieldsType,omitempty"`
- FieldsV1 *metav1.FieldsV1 `json:"fieldsV1,omitempty"`
- Subresource *string `json:"subresource,omitempty"`
+ // Manager is an identifier of the workflow managing these fields.
+ Manager *string `json:"manager,omitempty"`
+ // Operation is the type of operation which lead to this ManagedFieldsEntry being created.
+ // The only valid values for this field are 'Apply' and 'Update'.
+ Operation *metav1.ManagedFieldsOperationType `json:"operation,omitempty"`
+ // APIVersion defines the version of this resource that this field set
+ // applies to. The format is "group/version" just like the top-level
+ // APIVersion field. It is necessary to track the version of a field
+ // set because it cannot be automatically converted.
+ APIVersion *string `json:"apiVersion,omitempty"`
+ // Time is the timestamp of when the ManagedFields entry was added. The
+ // timestamp will also be updated if a field is added, the manager
+ // changes any of the owned fields value or removes a field. The
+ // timestamp does not update when a field is removed from the entry
+ // because another manager took it over.
+ Time *metav1.Time `json:"time,omitempty"`
+ // FieldsType is the discriminator for the different fields format and version.
+ // There is currently only one possible value: "FieldsV1"
+ FieldsType *string `json:"fieldsType,omitempty"`
+ // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
+ FieldsV1 *metav1.FieldsV1 `json:"fieldsV1,omitempty"`
+ // Subresource is the name of the subresource used to update that object, or
+ // empty string if the object was updated through the main resource. The
+ // value of this field is used to distinguish between managers, even if they
+ // share the same name. For example, a status update will be distinct from a
+ // regular update using the same manager name.
+ // Note that the APIVersion field is not related to the Subresource field and
+ // it always corresponds to the version of the main resource.
+ Subresource *string `json:"subresource,omitempty"`
}
// ManagedFieldsEntryApplyConfiguration constructs a declarative configuration of the ManagedFieldsEntry type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
index 13e1366d..fae9d620 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/objectmeta.go
@@ -25,20 +25,123 @@ import (
// ObjectMetaApplyConfiguration represents a declarative configuration of the ObjectMeta type for use
// with apply.
+//
+// ObjectMeta is metadata that all persisted resources must have, which includes all objects
+// users must create.
type ObjectMetaApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- GenerateName *string `json:"generateName,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
- ResourceVersion *string `json:"resourceVersion,omitempty"`
- Generation *int64 `json:"generation,omitempty"`
- CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"`
- DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty"`
- DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
- Labels map[string]string `json:"labels,omitempty"`
- Annotations map[string]string `json:"annotations,omitempty"`
- OwnerReferences []OwnerReferenceApplyConfiguration `json:"ownerReferences,omitempty"`
- Finalizers []string `json:"finalizers,omitempty"`
+ // Name must be unique within a namespace. Is required when creating resources, although
+ // some resources may allow a client to request the generation of an appropriate name
+ // automatically. Name is primarily intended for creation idempotence and configuration
+ // definition.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names
+ Name *string `json:"name,omitempty"`
+ // GenerateName is an optional prefix, used by the server, to generate a unique
+ // name ONLY IF the Name field has not been provided.
+ // If this field is used, the name returned to the client will be different
+ // than the name passed. This value will also be combined with a unique suffix.
+ // The provided value has the same validation rules as the Name field,
+ // and may be truncated by the length of the suffix required to make the value
+ // unique on the server.
+ //
+ // If this field is specified and the generated name exists, the server will return a 409.
+ //
+ // Applied only if Name is not specified.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency
+ GenerateName *string `json:"generateName,omitempty"`
+ // Namespace defines the space within which each name must be unique. An empty namespace is
+ // equivalent to the "default" namespace, but "default" is the canonical representation.
+ // Not all objects are required to be scoped to a namespace - the value of this field for
+ // those objects will be empty.
+ //
+ // Must be a DNS_LABEL.
+ // Cannot be updated.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces
+ Namespace *string `json:"namespace,omitempty"`
+ // UID is the unique in time and space value for this object. It is typically generated by
+ // the server on successful creation of a resource and is not allowed to change on PUT
+ // operations.
+ //
+ // Populated by the system.
+ // Read-only.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids
+ UID *types.UID `json:"uid,omitempty"`
+ // An opaque value that represents the internal version of this object that can
+ // be used by clients to determine when objects have changed. May be used for optimistic
+ // concurrency, change detection, and the watch operation on a resource or set of resources.
+ // Clients must treat these values as opaque and passed unmodified back to the server.
+ // They may only be valid for a particular resource or set of resources.
+ //
+ // Populated by the system.
+ // Read-only.
+ // Value must be treated as opaque by clients and .
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+ ResourceVersion *string `json:"resourceVersion,omitempty"`
+ // A sequence number representing a specific generation of the desired state.
+ // Populated by the system. Read-only.
+ Generation *int64 `json:"generation,omitempty"`
+ // CreationTimestamp is a timestamp representing the server time when this object was
+ // created. It is not guaranteed to be set in happens-before order across separate operations.
+ // Clients may not set this value. It is represented in RFC3339 form and is in UTC.
+ //
+ // Populated by the system.
+ // Read-only.
+ // Null for lists.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"`
+ // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
+ // field is set by the server when a graceful deletion is requested by the user, and is not
+ // directly settable by a client. The resource is expected to be deleted (no longer visible
+ // from resource lists, and not reachable by name) after the time in this field, once the
+ // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
+ // Once the deletionTimestamp is set, this value may not be unset or be set further into the
+ // future, although it may be shortened or the resource may be deleted prior to this time.
+ // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
+ // by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
+ // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
+ // remove the pod from the API. In the presence of network partitions, this object may still
+ // exist after this timestamp, until an administrator or automated process can determine the
+ // resource is fully terminated.
+ // If not set, graceful deletion of the object has not been requested.
+ //
+ // Populated by the system when a graceful deletion is requested.
+ // Read-only.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty"`
+ // Number of seconds allowed for this object to gracefully terminate before
+ // it will be removed from the system. Only set when deletionTimestamp is also set.
+ // May only be shortened.
+ // Read-only.
+ DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"`
+ // Map of string keys and values that can be used to organize and categorize
+ // (scope and select) objects. May match selectors of replication controllers
+ // and services.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ Labels map[string]string `json:"labels,omitempty"`
+ // Annotations is an unstructured key value map stored with a resource that may be
+ // set by external tools to store and retrieve arbitrary metadata. They are not
+ // queryable and should be preserved when modifying objects.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations
+ Annotations map[string]string `json:"annotations,omitempty"`
+ // List of objects depended by this object. If ALL objects in the list have
+ // been deleted, this object will be garbage collected. If this object is managed by a controller,
+ // then an entry in this list will point to this controller, with the controller field set to true.
+ // There cannot be more than one managing controller.
+ OwnerReferences []OwnerReferenceApplyConfiguration `json:"ownerReferences,omitempty"`
+ // Must be empty before the object is deleted from the registry. Each entry
+ // is an identifier for the responsible component that will remove the entry
+ // from the list. If the deletionTimestamp of the object is non-nil, entries
+ // in this list can only be removed.
+ // Finalizers may be processed and removed in any order. Order is NOT enforced
+ // because it introduces significant risk of stuck finalizers.
+ // finalizers is a shared field, any actor with permission can reorder it.
+ // If the finalizer list is processed in order, then this can lead to a situation
+ // in which the component responsible for the first finalizer in the list is
+ // waiting for a signal (field value, external system, or other) produced by a
+ // component responsible for a finalizer later in the list, resulting in a deadlock.
+ // Without enforced ordering finalizers are free to order amongst themselves and
+ // are not vulnerable to ordering changes in the list.
+ Finalizers []string `json:"finalizers,omitempty"`
}
// ObjectMetaApplyConfiguration constructs a declarative configuration of the ObjectMeta type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go
index 27761523..35501b16 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/ownerreference.go
@@ -24,13 +24,33 @@ import (
// OwnerReferenceApplyConfiguration represents a declarative configuration of the OwnerReference type for use
// with apply.
+//
+// OwnerReference contains enough information to let you identify an owning
+// object. An owning object must be in the same namespace as the dependent, or
+// be cluster-scoped, so there is no namespace field.
type OwnerReferenceApplyConfiguration struct {
- APIVersion *string `json:"apiVersion,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
- Controller *bool `json:"controller,omitempty"`
- BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"`
+ // API version of the referent.
+ APIVersion *string `json:"apiVersion,omitempty"`
+ // Kind of the referent.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // Name of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names
+ Name *string `json:"name,omitempty"`
+ // UID of the referent.
+ // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids
+ UID *types.UID `json:"uid,omitempty"`
+ // If true, this reference points to the managing controller.
+ Controller *bool `json:"controller,omitempty"`
+ // If true, AND if the owner has the "foregroundDeletion" finalizer, then
+ // the owner cannot be deleted from the key-value store until this
+ // reference is removed.
+ // See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion
+ // for how the garbage collector interacts with this field and enforces the foreground deletion.
+ // Defaults to false.
+ // To set this field, a user needs "delete" permission of the owner,
+ // otherwise 422 (Unprocessable Entity) will be returned.
+ BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty"`
}
// OwnerReferenceApplyConfiguration constructs a declarative configuration of the OwnerReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go
index 8f8b6c6b..cc4e4726 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/preconditions.go
@@ -24,9 +24,13 @@ import (
// PreconditionsApplyConfiguration represents a declarative configuration of the Preconditions type for use
// with apply.
+//
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
type PreconditionsApplyConfiguration struct {
- UID *types.UID `json:"uid,omitempty"`
- ResourceVersion *string `json:"resourceVersion,omitempty"`
+ // Specifies the target UID.
+ UID *types.UID `json:"uid,omitempty"`
+ // Specifies the target ResourceVersion
+ ResourceVersion *string `json:"resourceVersion,omitempty"`
}
// PreconditionsApplyConfiguration constructs a declarative configuration of the Preconditions type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go
index 29a47e44..feb29e1b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/meta/v1/typemeta.go
@@ -20,8 +20,21 @@ package v1
// TypeMetaApplyConfiguration represents a declarative configuration of the TypeMeta type for use
// with apply.
+//
+// TypeMeta describes an individual object in an API response or request
+// with strings representing the type of the object and its API schema version.
+// Structures that are versioned or persisted should inline TypeMeta.
type TypeMetaApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
+ // Kind is a string value representing the REST resource this object represents.
+ // Servers may infer this from the endpoint the client submits requests to.
+ // Cannot be updated.
+ // In CamelCase.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind *string `json:"kind,omitempty"`
+ // APIVersion defines the versioned schema of this representation of an object.
+ // Servers should convert recognized schemas to the latest internal value, and
+ // may reject unrecognized values.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
APIVersion *string `json:"apiVersion,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
index 96f9b1f5..e6a0116c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingresspath.go
@@ -24,10 +24,33 @@ import (
// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
// with apply.
+//
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
type HTTPIngressPathApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
- PathType *networkingv1.PathType `json:"pathType,omitempty"`
- Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
+ // path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
+ Path *string `json:"path,omitempty"`
+ // pathType determines the interpretation of the path matching. PathType can
+ // be one of the following values:
+ // * Exact: Matches the URL path exactly.
+ // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+ // done on a path element by element basis. A path element refers is the
+ // list of labels in the path split by the '/' separator. A request is a
+ // match for path p if every p is an element-wise prefix of p of the
+ // request path. Note that if the last element of the path is a substring
+ // of the last element in request path, it is not a match (e.g. /foo/bar
+ // matches /foo/bar/baz, but does not match /foo/barbaz).
+ // * ImplementationSpecific: Interpretation of the Path matching is up to
+ // the IngressClass. Implementations can treat this as a separate PathType
+ // or treat it identically to Prefix or Exact path types.
+ // Implementations are required to support all path types.
+ PathType *networkingv1.PathType `json:"pathType,omitempty"`
+ // backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
}
// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go
index ad9a7a67..ff3a476a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/httpingressrulevalue.go
@@ -20,7 +20,14 @@ package v1
// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use
// with apply.
+//
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http:///? -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
type HTTPIngressRuleValueApplyConfiguration struct {
+ // paths is a collection of paths that map requests to backends.
Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
index 3085e4cf..e6bb9205 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
@@ -29,11 +29,22 @@ import (
// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
// with apply.
+//
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
type IngressApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
- Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the desired state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
}
// Ingress constructs a declarative configuration of the Ingress type for use with
@@ -47,6 +58,27 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
return b
}
+// ExtractIngressFrom extracts the applied configuration owned by fieldManager from
+// ingress for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
+// ExtractIngressFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIngressFrom(ingress *networkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
+ b := &IngressApplyConfiguration{}
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(ingress.Name)
+ b.WithNamespace(ingress.Namespace)
+
+ b.WithKind("Ingress")
+ b.WithAPIVersion("networking.k8s.io/v1")
+ return b, nil
+}
+
// ExtractIngress extracts the applied configuration owned by fieldManager from
// ingress. If no managedFields are found in ingress for fieldManager, a
// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +89,16 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// ExtractIngress provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractIngress(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
- return extractIngress(ingress, fieldManager, "")
+ return ExtractIngressFrom(ingress, fieldManager, "")
}
-// ExtractIngressStatus is the same as ExtractIngress except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractIngressStatus extracts the applied configuration owned by fieldManager from
+// ingress for the status subresource.
func ExtractIngressStatus(ingress *networkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
- return extractIngress(ingress, fieldManager, "status")
+ return ExtractIngressFrom(ingress, fieldManager, "status")
}
-func extractIngress(ingress *networkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
- b := &IngressApplyConfiguration{}
- err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(ingress.Name)
- b.WithNamespace(ingress.Namespace)
-
- b.WithKind("Ingress")
- b.WithAPIVersion("networking.k8s.io/v1")
- return b, nil
-}
func (b IngressApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go
index b014b7be..4c7f6bfe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressbackend.go
@@ -24,8 +24,16 @@ import (
// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use
// with apply.
+//
+// IngressBackend describes all endpoints for a given service and port.
type IngressBackendApplyConfiguration struct {
- Service *IngressServiceBackendApplyConfiguration `json:"service,omitempty"`
+ // service references a service as a backend.
+ // This is a mutually exclusive setting with "Resource".
+ Service *IngressServiceBackendApplyConfiguration `json:"service,omitempty"`
+ // resource is an ObjectRef to another Kubernetes resource in the namespace
+ // of the Ingress object. If resource is specified, a service.Name and
+ // service.Port must not be specified.
+ // This is a mutually exclusive setting with "Service".
Resource *corev1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
index a03b9127..387e4572 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
@@ -29,10 +29,20 @@ import (
// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use
// with apply.
+//
+// IngressClass represents the class of the Ingress, referenced by the Ingress
+// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
+// used to indicate that an IngressClass should be considered default. When a
+// single IngressClass resource has this annotation set to true, new Ingress
+// resources without a class specified will be assigned this default class.
type IngressClassApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec is the desired state of the IngressClass.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// IngressClass constructs a declarative configuration of the IngressClass type for use with
@@ -45,29 +55,14 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
return b
}
-// ExtractIngressClass extracts the applied configuration owned by fieldManager from
-// ingressClass. If no managedFields are found in ingressClass for fieldManager, a
-// IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractIngressClassFrom extracts the applied configuration owned by fieldManager from
+// ingressClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API.
-// ExtractIngressClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractIngressClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
- return extractIngressClass(ingressClass, fieldManager, "")
-}
-
-// ExtractIngressClassStatus is the same as ExtractIngressClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractIngressClassStatus(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
- return extractIngressClass(ingressClass, fieldManager, "status")
-}
-
-func extractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
+func ExtractIngressClassFrom(ingressClass *networkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
b := &IngressClassApplyConfiguration{}
err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +74,21 @@ func extractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager s
b.WithAPIVersion("networking.k8s.io/v1")
return b, nil
}
+
+// ExtractIngressClass extracts the applied configuration owned by fieldManager from
+// ingressClass. If no managedFields are found in ingressClass for fieldManager, a
+// IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API.
+// ExtractIngressClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIngressClass(ingressClass *networkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+ return ExtractIngressClassFrom(ingressClass, fieldManager, "")
+}
+
func (b IngressClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go
index 0dba1ebc..6be74dfb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassparametersreference.go
@@ -20,11 +20,24 @@ package v1
// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use
// with apply.
+//
+// IngressClassParametersReference identifies an API object. This can be used
+// to specify a cluster or namespace-scoped resource.
type IngressClassParametersReferenceApplyConfiguration struct {
- APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
- Scope *string `json:"scope,omitempty"`
+ // apiGroup is the group for the resource being referenced. If APIGroup is
+ // not specified, the specified Kind must be in the core API group. For any
+ // other third-party types, APIGroup is required.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // kind is the type of resource being referenced.
+ Kind *string `json:"kind,omitempty"`
+ // name is the name of resource being referenced.
+ Name *string `json:"name,omitempty"`
+ // scope represents if this refers to a cluster or namespace scoped resource.
+ // This may be set to "Cluster" (default) or "Namespace".
+ Scope *string `json:"scope,omitempty"`
+ // namespace is the namespace of the resource being referenced. This field is
+ // required when scope is set to "Namespace" and must be unset when scope is set to
+ // "Cluster".
Namespace *string `json:"namespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go
index 23e84843..9dda6ae6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclassspec.go
@@ -20,8 +20,19 @@ package v1
// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use
// with apply.
+//
+// IngressClassSpec provides information about the class of an Ingress.
type IngressClassSpecApplyConfiguration struct {
- Controller *string `json:"controller,omitempty"`
+ // controller refers to the name of the controller that should handle this
+ // class. This allows for different "flavors" that are controlled by the
+ // same controller. For example, you may have different parameters for the
+ // same implementing controller. This should be specified as a
+ // domain-prefixed path no more than 250 characters in length, e.g.
+ // "acme.io/ingress-controller". This field is immutable.
+ Controller *string `json:"controller,omitempty"`
+ // parameters is a link to a custom resource containing additional
+ // configuration for the controller. This is optional if the controller does
+ // not require extra parameters.
Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go
index d0feb44d..2ebbad36 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalanceringress.go
@@ -20,10 +20,15 @@ package v1
// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use
// with apply.
+//
+// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngressApplyConfiguration struct {
- IP *string `json:"ip,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
+ // ip is set for load-balancer ingress points that are IP based.
+ IP *string `json:"ip,omitempty"`
+ // hostname is set for load-balancer ingress points that are DNS based.
+ Hostname *string `json:"hostname,omitempty"`
+ // ports provides information about the ports exposed by this LoadBalancer.
+ Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
}
// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go
index 08c841f0..393daf1f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressloadbalancerstatus.go
@@ -20,7 +20,10 @@ package v1
// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use
// with apply.
+//
+// IngressLoadBalancerStatus represents the status of a load-balancer.
type IngressLoadBalancerStatusApplyConfiguration struct {
+ // ingress is a list containing ingress points for the load-balancer.
Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
index 84ba243a..ad9e977a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressportstatus.go
@@ -24,10 +24,23 @@ import (
// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
// with apply.
+//
+// IngressPortStatus represents the error condition of a service port
type IngressPortStatusApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
+ // port is the port number of the ingress port.
+ Port *int32 `json:"port,omitempty"`
+ // protocol is the protocol of the ingress port.
+ // The supported values are: "TCP", "UDP", "SCTP"
Protocol *corev1.Protocol `json:"protocol,omitempty"`
- Error *string `json:"error,omitempty"`
+ // error is to record the problem with the service port
+ // The format of the error shall comply with the following rules:
+ // - built-in error values shall be specified in this file and those shall use
+ // CamelCase names
+ // - cloud provider specific error values must have names that comply with the
+ // format foo.example.com/CamelCase.
+ // ---
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ Error *string `json:"error,omitempty"`
}
// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
index 20a1816b..e77494d8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrule.go
@@ -20,8 +20,39 @@ package v1
// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use
// with apply.
+//
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
type IngressRuleApplyConfiguration struct {
- Host *string `json:"host,omitempty"`
+ // host is the fully qualified domain name of a network host, as defined by RFC 3986.
+ // Note the following deviations from the "host" part of the
+ // URI as defined in RFC 3986:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+ // the IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the
+ // IngressRuleValue. If the host is unspecified, the Ingress routes all
+ // traffic based on the specified IngressRuleValue.
+ //
+ // host can be "precise" which is a domain name without the terminating dot of
+ // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+ // prefixed with a single wildcard label (e.g. "*.foo.com").
+ // The wildcard character '*' must appear by itself as the first DNS label and
+ // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+ // Requests will be matched against the Host field in the following way:
+ // 1. If host is precise, the request matches this rule if the http host header is equal to Host.
+ // 2. If host is a wildcard, then the request matches this rule if the http host header
+ // is to equal to the suffix (removing the first label) of the wildcard rule.
+ Host *string `json:"host,omitempty"`
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
IngressRuleValueApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go
index 1e13e378..c1ca4853 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressrulevalue.go
@@ -20,6 +20,11 @@ package v1
// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use
// with apply.
+//
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
type IngressRuleValueApplyConfiguration struct {
HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go
index 07876afd..bc48a6e0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressservicebackend.go
@@ -20,8 +20,14 @@ package v1
// IngressServiceBackendApplyConfiguration represents a declarative configuration of the IngressServiceBackend type for use
// with apply.
+//
+// IngressServiceBackend references a Kubernetes Service as a Backend.
type IngressServiceBackendApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // name is the referenced service. The service must exist in
+ // the same namespace as the Ingress object.
+ Name *string `json:"name,omitempty"`
+ // port of the referenced service. A port name or port number
+ // is required for a IngressServiceBackend.
Port *ServiceBackendPortApplyConfiguration `json:"port,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go
index 0572153a..67b1ac4d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressspec.go
@@ -20,11 +20,34 @@ package v1
// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use
// with apply.
+//
+// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpecApplyConfiguration struct {
- IngressClassName *string `json:"ingressClassName,omitempty"`
- DefaultBackend *IngressBackendApplyConfiguration `json:"defaultBackend,omitempty"`
- TLS []IngressTLSApplyConfiguration `json:"tls,omitempty"`
- Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"`
+ // ingressClassName is the name of an IngressClass cluster resource. Ingress
+ // controller implementations use this field to know whether they should be
+ // serving this Ingress resource, by a transitive connection
+ // (controller -> IngressClass -> Ingress resource). Although the
+ // `kubernetes.io/ingress.class` annotation (simple constant name) was never
+ // formally defined, it was widely supported by Ingress controllers to create
+ // a direct binding between Ingress controller and Ingress resources. Newly
+ // created Ingress resources should prefer using the field. However, even
+ // though the annotation is officially deprecated, for backwards compatibility
+ // reasons, ingress controllers should still honor that annotation if present.
+ IngressClassName *string `json:"ingressClassName,omitempty"`
+ // defaultBackend is the backend that should handle requests that don't
+ // match any rule. If Rules are not specified, DefaultBackend must be specified.
+ // If DefaultBackend is not set, the handling of requests that do not match any
+ // of the rules will be up to the Ingress controller.
+ DefaultBackend *IngressBackendApplyConfiguration `json:"defaultBackend,omitempty"`
+ // tls represents the TLS configuration. Currently the Ingress only supports a
+ // single TLS port, 443. If multiple members of this list specify different hosts,
+ // they will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ TLS []IngressTLSApplyConfiguration `json:"tls,omitempty"`
+ // rules is a list of host rules used to configure the Ingress. If unspecified,
+ // or no rule matches, all traffic is sent to the default backend.
+ Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"`
}
// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go
index bd1327c9..1df2804b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressstatus.go
@@ -20,7 +20,10 @@ package v1
// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use
// with apply.
+//
+// IngressStatus describe the current state of the Ingress.
type IngressStatusApplyConfiguration struct {
+ // loadBalancer contains the current status of the load-balancer.
LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go
index 44092503..07b403b7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingresstls.go
@@ -20,9 +20,20 @@ package v1
// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use
// with apply.
+//
+// IngressTLS describes the transport layer security associated with an ingress.
type IngressTLSApplyConfiguration struct {
- Hosts []string `json:"hosts,omitempty"`
- SecretName *string `json:"secretName,omitempty"`
+ // hosts is a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ Hosts []string `json:"hosts,omitempty"`
+ // secretName is the name of the secret used to terminate TLS traffic on
+ // port 443. Field is left optional to allow TLS routing based on SNI
+ // hostname alone. If the SNI host in a listener conflicts with the "Host"
+ // header field used by an IngressRule, the SNI host is used for termination
+ // and value of the "Host" header is used for routing.
+ SecretName *string `json:"secretName,omitempty"`
}
// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go
index a5407dbc..c0e4298a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddress.go
@@ -29,10 +29,22 @@ import (
// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use
// with apply.
+//
+// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
+// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
+// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
+// the name of the object is the IP address in canonical format, four decimal digits separated
+// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
+// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
+// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
type IPAddressApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec is the desired state of the IPAddress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"`
}
// IPAddress constructs a declarative configuration of the IPAddress type for use with
@@ -45,29 +57,14 @@ func IPAddress(name string) *IPAddressApplyConfiguration {
return b
}
-// ExtractIPAddress extracts the applied configuration owned by fieldManager from
-// iPAddress. If no managedFields are found in iPAddress for fieldManager, a
-// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractIPAddressFrom extracts the applied configuration owned by fieldManager from
+// iPAddress for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API.
-// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractIPAddressFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractIPAddress(iPAddress *networkingv1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
- return extractIPAddress(iPAddress, fieldManager, "")
-}
-
-// ExtractIPAddressStatus is the same as ExtractIPAddress except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractIPAddressStatus(iPAddress *networkingv1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
- return extractIPAddress(iPAddress, fieldManager, "status")
-}
-
-func extractIPAddress(iPAddress *networkingv1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) {
+func ExtractIPAddressFrom(iPAddress *networkingv1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) {
b := &IPAddressApplyConfiguration{}
err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1.IPAddress"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +76,21 @@ func extractIPAddress(iPAddress *networkingv1.IPAddress, fieldManager string, su
b.WithAPIVersion("networking.k8s.io/v1")
return b, nil
}
+
+// ExtractIPAddress extracts the applied configuration owned by fieldManager from
+// iPAddress. If no managedFields are found in iPAddress for fieldManager, a
+// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API.
+// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIPAddress(iPAddress *networkingv1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
+ return ExtractIPAddressFrom(iPAddress, fieldManager, "")
+}
+
func (b IPAddressApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go
index bac6e791..e9dce93b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipaddressspec.go
@@ -20,7 +20,11 @@ package v1
// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use
// with apply.
+//
+// IPAddressSpec describe the attributes in an IP Address.
type IPAddressSpecApplyConfiguration struct {
+ // ParentRef references the resource that an IPAddress is attached to.
+ // An IPAddress must reference a parent object.
ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go
index f3447a8f..95bc99f0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ipblock.go
@@ -20,8 +20,17 @@ package v1
// IPBlockApplyConfiguration represents a declarative configuration of the IPBlock type for use
// with apply.
+//
+// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
type IPBlockApplyConfiguration struct {
- CIDR *string `json:"cidr,omitempty"`
+ // cidr is a string representing the IPBlock
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ CIDR *string `json:"cidr,omitempty"`
+ // except is a slice of CIDRs that should not be included within an IPBlock
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ // Except values will be rejected if they are outside the cidr range
Except []string `json:"except,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
index 6c6a76e8..f3e1e1e5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
@@ -29,10 +29,15 @@ import (
// NetworkPolicyApplyConfiguration represents a declarative configuration of the NetworkPolicy type for use
// with apply.
+//
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
type NetworkPolicyApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
+ // spec represents the specification of the desired behavior for this NetworkPolicy.
+ Spec *NetworkPolicySpecApplyConfiguration `json:"spec,omitempty"`
}
// NetworkPolicy constructs a declarative configuration of the NetworkPolicy type for use with
@@ -46,29 +51,14 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
return b
}
-// ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from
-// networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a
-// NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractNetworkPolicyFrom extracts the applied configuration owned by fieldManager from
+// networkPolicy for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API.
-// ExtractNetworkPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractNetworkPolicyFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
- return extractNetworkPolicy(networkPolicy, fieldManager, "")
-}
-
-// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractNetworkPolicyStatus(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
- return extractNetworkPolicy(networkPolicy, fieldManager, "status")
-}
-
-func extractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
+func ExtractNetworkPolicyFrom(networkPolicy *networkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
b := &NetworkPolicyApplyConfiguration{}
err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +71,21 @@ func extractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManage
b.WithAPIVersion("networking.k8s.io/v1")
return b, nil
}
+
+// ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from
+// networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a
+// NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API.
+// ExtractNetworkPolicy provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractNetworkPolicy(networkPolicy *networkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+ return ExtractNetworkPolicyFrom(networkPolicy, fieldManager, "")
+}
+
func (b NetworkPolicyApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go
index 46e2706e..694c148c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyegressrule.go
@@ -20,9 +20,23 @@ package v1
// NetworkPolicyEgressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyEgressRule type for use
// with apply.
+//
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
type NetworkPolicyEgressRuleApplyConfiguration struct {
+ // ports is a list of destination ports for outgoing traffic.
+ // Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
- To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"`
+ // to is a list of destinations for outgoing traffic of pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all destinations (traffic not restricted by
+ // destination). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the to list.
+ To []NetworkPolicyPeerApplyConfiguration `json:"to,omitempty"`
}
// NetworkPolicyEgressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyEgressRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go
index 6e987597..a597f92e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyingressrule.go
@@ -20,9 +20,22 @@ package v1
// NetworkPolicyIngressRuleApplyConfiguration represents a declarative configuration of the NetworkPolicyIngressRule type for use
// with apply.
+//
+// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
type NetworkPolicyIngressRuleApplyConfiguration struct {
+ // ports is a list of ports which should be made accessible on the pods selected for
+ // this rule. Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
Ports []NetworkPolicyPortApplyConfiguration `json:"ports,omitempty"`
- From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"`
+ // from is a list of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all sources (traffic not restricted by
+ // source). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the from list.
+ From []NetworkPolicyPeerApplyConfiguration `json:"from,omitempty"`
}
// NetworkPolicyIngressRuleApplyConfiguration constructs a declarative configuration of the NetworkPolicyIngressRule type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
index 716ceeee..c16fbf07 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicypeer.go
@@ -24,10 +24,27 @@ import (
// NetworkPolicyPeerApplyConfiguration represents a declarative configuration of the NetworkPolicyPeer type for use
// with apply.
+//
+// NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of
+// fields are allowed
type NetworkPolicyPeerApplyConfiguration struct {
- PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ // podSelector is a label selector which selects pods. This field follows standard label
+ // selector semantics; if present but empty, it selects all pods.
+ //
+ // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the pods matching podSelector in the Namespaces selected by NamespaceSelector.
+ // Otherwise it selects the pods matching podSelector in the policy's own namespace.
+ PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ // namespaceSelector selects namespaces using cluster-scoped labels. This field follows
+ // standard label selector semantics; if present but empty, it selects all namespaces.
+ //
+ // If podSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the pods matching podSelector in the namespaces selected by namespaceSelector.
+ // Otherwise it selects all pods in the namespaces selected by namespaceSelector.
NamespaceSelector *metav1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"`
- IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"`
+ // ipBlock defines policy on a particular IPBlock. If this field is set then
+ // neither of the other fields can be.
+ IPBlock *IPBlockApplyConfiguration `json:"ipBlock,omitempty"`
}
// NetworkPolicyPeerApplyConfiguration constructs a declarative configuration of the NetworkPolicyPeer type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
index 2ded0aec..483a0f95 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyport.go
@@ -25,10 +25,22 @@ import (
// NetworkPolicyPortApplyConfiguration represents a declarative configuration of the NetworkPolicyPort type for use
// with apply.
+//
+// NetworkPolicyPort describes a port to allow traffic on
type NetworkPolicyPortApplyConfiguration struct {
- Protocol *corev1.Protocol `json:"protocol,omitempty"`
- Port *intstr.IntOrString `json:"port,omitempty"`
- EndPort *int32 `json:"endPort,omitempty"`
+ // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ Protocol *corev1.Protocol `json:"protocol,omitempty"`
+ // port represents the port on the given protocol. This can either be a numerical or named
+ // port on a pod. If this field is not provided, this matches all port names and
+ // numbers.
+ // If present, only traffic on the specified protocol AND port will be matched.
+ Port *intstr.IntOrString `json:"port,omitempty"`
+ // endPort indicates that the range of ports from port to endPort if set, inclusive,
+ // should be allowed by the policy. This field cannot be defined if the port field
+ // is not defined or if the port field is defined as a named (string) port.
+ // The endPort must be equal or greater than port.
+ EndPort *int32 `json:"endPort,omitempty"`
}
// NetworkPolicyPortApplyConfiguration constructs a declarative configuration of the NetworkPolicyPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
index 48369b92..734c238e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicyspec.go
@@ -25,11 +25,43 @@ import (
// NetworkPolicySpecApplyConfiguration represents a declarative configuration of the NetworkPolicySpec type for use
// with apply.
+//
+// NetworkPolicySpec provides the specification of a NetworkPolicy
type NetworkPolicySpecApplyConfiguration struct {
- PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
- Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"`
- Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"`
- PolicyTypes []networkingv1.PolicyType `json:"policyTypes,omitempty"`
+ // podSelector selects the pods to which this NetworkPolicy object applies.
+ // The array of rules is applied to any pods selected by this field. An empty
+ // selector matches all pods in the policy's namespace.
+ // Multiple network policies can select the same set of pods. In this case,
+ // the ingress rules for each are combined additively.
+ // This field is optional. If it is not specified, it defaults to an empty selector.
+ PodSelector *metav1.LabelSelectorApplyConfiguration `json:"podSelector,omitempty"`
+ // ingress is a list of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
+ // (and cluster policy otherwise allows the traffic), OR if the traffic source is
+ // the pod's local node, OR if the traffic matches at least one ingress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy does not allow any traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default)
+ Ingress []NetworkPolicyIngressRuleApplyConfiguration `json:"ingress,omitempty"`
+ // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic
+ // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+ // otherwise allows the traffic), OR if the traffic matches at least one egress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default).
+ // This field is beta-level in 1.8
+ Egress []NetworkPolicyEgressRuleApplyConfiguration `json:"egress,omitempty"`
+ // policyTypes is a list of rule types that the NetworkPolicy relates to.
+ // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
+ // If this field is not specified, it will default based on the existence of ingress or egress rules;
+ // policies that contain an egress section are assumed to affect egress, and all policies
+ // (whether or not they contain an ingress section) are assumed to affect ingress.
+ // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+ // Likewise, if you want to write a policy that specifies that no egress is allowed,
+ // you must specify a policyTypes value that include "Egress" (since such a policy would not include
+ // an egress section and would otherwise default to just [ "Ingress" ]).
+ // This field is beta-level in 1.8
+ PolicyTypes []networkingv1.PolicyType `json:"policyTypes,omitempty"`
}
// NetworkPolicySpecApplyConfiguration constructs a declarative configuration of the NetworkPolicySpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go
index 896c0f8a..c725e697 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/parentreference.go
@@ -20,11 +20,17 @@ package v1
// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use
// with apply.
+//
+// ParentReference describes a reference to a parent object.
type ParentReferenceApplyConfiguration struct {
- Group *string `json:"group,omitempty"`
- Resource *string `json:"resource,omitempty"`
+ // Group is the group of the object being referenced.
+ Group *string `json:"group,omitempty"`
+ // Resource is the resource of the object being referenced.
+ Resource *string `json:"resource,omitempty"`
+ // Namespace is the namespace of the object being referenced.
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Name is the name of the object being referenced.
+ Name *string `json:"name,omitempty"`
}
// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go
index 517f9748..aa381237 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicebackendport.go
@@ -20,9 +20,15 @@ package v1
// ServiceBackendPortApplyConfiguration represents a declarative configuration of the ServiceBackendPort type for use
// with apply.
+//
+// ServiceBackendPort is the service port being referenced.
type ServiceBackendPortApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Number *int32 `json:"number,omitempty"`
+ // name is the name of the port on the Service.
+ // This is a mutually exclusive setting with "Number".
+ Name *string `json:"name,omitempty"`
+ // number is the numerical port number (e.g. 80) on the Service.
+ // This is a mutually exclusive setting with "Name".
+ Number *int32 `json:"number,omitempty"`
}
// ServiceBackendPortApplyConfiguration constructs a declarative configuration of the ServiceBackendPort type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go
index fc06d85e..8eede13e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidr.go
@@ -29,11 +29,20 @@ import (
// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use
// with apply.
+//
+// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
+// This range is used to allocate ClusterIPs to Service objects.
type ServiceCIDRApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the desired state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents the current state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
}
// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with
@@ -46,6 +55,26 @@ func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
return b
}
+// ExtractServiceCIDRFrom extracts the applied configuration owned by fieldManager from
+// serviceCIDR for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API.
+// ExtractServiceCIDRFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractServiceCIDRFrom(serviceCIDR *networkingv1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) {
+ b := &ServiceCIDRApplyConfiguration{}
+ err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1.ServiceCIDR"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(serviceCIDR.Name)
+
+ b.WithKind("ServiceCIDR")
+ b.WithAPIVersion("networking.k8s.io/v1")
+ return b, nil
+}
+
// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from
// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a
// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractServiceCIDR(serviceCIDR *networkingv1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
- return extractServiceCIDR(serviceCIDR, fieldManager, "")
+ return ExtractServiceCIDRFrom(serviceCIDR, fieldManager, "")
}
-// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractServiceCIDRStatus extracts the applied configuration owned by fieldManager from
+// serviceCIDR for the status subresource.
func ExtractServiceCIDRStatus(serviceCIDR *networkingv1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
- return extractServiceCIDR(serviceCIDR, fieldManager, "status")
+ return ExtractServiceCIDRFrom(serviceCIDR, fieldManager, "status")
}
-func extractServiceCIDR(serviceCIDR *networkingv1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) {
- b := &ServiceCIDRApplyConfiguration{}
- err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1.ServiceCIDR"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(serviceCIDR.Name)
-
- b.WithKind("ServiceCIDR")
- b.WithAPIVersion("networking.k8s.io/v1")
- return b, nil
-}
func (b ServiceCIDRApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go
index f84b7ba1..9372a162 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrspec.go
@@ -20,7 +20,12 @@ package v1
// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use
// with apply.
+//
+// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
type ServiceCIDRSpecApplyConfiguration struct {
+ // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
+ // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
+ // This field is immutable.
CIDRs []string `json:"cidrs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go
index 9e3d52ae..d7135dc1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1/servicecidrstatus.go
@@ -24,7 +24,11 @@ import (
// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use
// with apply.
+//
+// ServiceCIDRStatus describes the current state of the ServiceCIDR.
type ServiceCIDRStatusApplyConfiguration struct {
+ // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
+ // Current service state
Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
index c7301c6a..c8e51bfd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingresspath.go
@@ -24,10 +24,34 @@ import (
// HTTPIngressPathApplyConfiguration represents a declarative configuration of the HTTPIngressPath type for use
// with apply.
+//
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
type HTTPIngressPathApplyConfiguration struct {
- Path *string `json:"path,omitempty"`
- PathType *networkingv1beta1.PathType `json:"pathType,omitempty"`
- Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
+ // path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
+ Path *string `json:"path,omitempty"`
+ // pathType determines the interpretation of the path matching. PathType can
+ // be one of the following values:
+ // * Exact: Matches the URL path exactly.
+ // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+ // done on a path element by element basis. A path element refers is the
+ // list of labels in the path split by the '/' separator. A request is a
+ // match for path p if every p is an element-wise prefix of p of the
+ // request path. Note that if the last element of the path is a substring
+ // of the last element in request path, it is not a match (e.g. /foo/bar
+ // matches /foo/bar/baz, but does not match /foo/barbaz).
+ // * ImplementationSpecific: Interpretation of the Path matching is up to
+ // the IngressClass. Implementations can treat this as a separate PathType
+ // or treat it identically to Prefix or Exact path types.
+ // Implementations are required to support all path types.
+ // Defaults to ImplementationSpecific.
+ PathType *networkingv1beta1.PathType `json:"pathType,omitempty"`
+ // backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
}
// HTTPIngressPathApplyConfiguration constructs a declarative configuration of the HTTPIngressPath type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go
index 12454522..86569751 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/httpingressrulevalue.go
@@ -20,7 +20,14 @@ package v1beta1
// HTTPIngressRuleValueApplyConfiguration represents a declarative configuration of the HTTPIngressRuleValue type for use
// with apply.
+//
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http:///? -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
type HTTPIngressRuleValueApplyConfiguration struct {
+ // paths is a collection of paths that map requests to backends.
Paths []HTTPIngressPathApplyConfiguration `json:"paths,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
index 9ef43b14..061c29c9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
@@ -29,11 +29,22 @@ import (
// IngressApplyConfiguration represents a declarative configuration of the Ingress type for use
// with apply.
+//
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
type IngressApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
- Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the desired state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IngressSpecApplyConfiguration `json:"spec,omitempty"`
+ // status is the current state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *IngressStatusApplyConfiguration `json:"status,omitempty"`
}
// Ingress constructs a declarative configuration of the Ingress type for use with
@@ -47,6 +58,27 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
return b
}
+// ExtractIngressFrom extracts the applied configuration owned by fieldManager from
+// ingress for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
+// ExtractIngressFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIngressFrom(ingress *networkingv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
+ b := &IngressApplyConfiguration{}
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1beta1.Ingress"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(ingress.Name)
+ b.WithNamespace(ingress.Namespace)
+
+ b.WithKind("Ingress")
+ b.WithAPIVersion("networking.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractIngress extracts the applied configuration owned by fieldManager from
// ingress. If no managedFields are found in ingress for fieldManager, a
// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +89,16 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// ExtractIngress provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractIngress(ingress *networkingv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
- return extractIngress(ingress, fieldManager, "")
+ return ExtractIngressFrom(ingress, fieldManager, "")
}
-// ExtractIngressStatus is the same as ExtractIngress except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractIngressStatus extracts the applied configuration owned by fieldManager from
+// ingress for the status subresource.
func ExtractIngressStatus(ingress *networkingv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
- return extractIngress(ingress, fieldManager, "status")
+ return ExtractIngressFrom(ingress, fieldManager, "status")
}
-func extractIngress(ingress *networkingv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
- b := &IngressApplyConfiguration{}
- err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1beta1.Ingress"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(ingress.Name)
- b.WithNamespace(ingress.Namespace)
-
- b.WithKind("Ingress")
- b.WithAPIVersion("networking.k8s.io/v1beta1")
- return b, nil
-}
func (b IngressApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go
index 9d386f16..9d85f45a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressbackend.go
@@ -25,10 +25,17 @@ import (
// IngressBackendApplyConfiguration represents a declarative configuration of the IngressBackend type for use
// with apply.
+//
+// IngressBackend describes all endpoints for a given service and port.
type IngressBackendApplyConfiguration struct {
- ServiceName *string `json:"serviceName,omitempty"`
- ServicePort *intstr.IntOrString `json:"servicePort,omitempty"`
- Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
+ // serviceName specifies the name of the referenced service.
+ ServiceName *string `json:"serviceName,omitempty"`
+ // servicePort Specifies the port of the referenced service.
+ ServicePort *intstr.IntOrString `json:"servicePort,omitempty"`
+ // resource is an ObjectRef to another Kubernetes resource in the namespace
+ // of the Ingress object. If resource is specified, serviceName and servicePort
+ // must not be specified.
+ Resource *v1.TypedLocalObjectReferenceApplyConfiguration `json:"resource,omitempty"`
}
// IngressBackendApplyConfiguration constructs a declarative configuration of the IngressBackend type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
index ec8062c5..c4d6df50 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
@@ -29,10 +29,20 @@ import (
// IngressClassApplyConfiguration represents a declarative configuration of the IngressClass type for use
// with apply.
+//
+// IngressClass represents the class of the Ingress, referenced by the Ingress
+// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
+// used to indicate that an IngressClass should be considered default. When a
+// single IngressClass resource has this annotation set to true, new Ingress
+// resources without a class specified will be assigned this default class.
type IngressClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec is the desired state of the IngressClass.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IngressClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// IngressClass constructs a declarative configuration of the IngressClass type for use with
@@ -45,29 +55,14 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
return b
}
-// ExtractIngressClass extracts the applied configuration owned by fieldManager from
-// ingressClass. If no managedFields are found in ingressClass for fieldManager, a
-// IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractIngressClassFrom extracts the applied configuration owned by fieldManager from
+// ingressClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API.
-// ExtractIngressClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractIngressClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
- return extractIngressClass(ingressClass, fieldManager, "")
-}
-
-// ExtractIngressClassStatus is the same as ExtractIngressClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractIngressClassStatus(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
- return extractIngressClass(ingressClass, fieldManager, "status")
-}
-
-func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
+func ExtractIngressClassFrom(ingressClass *networkingv1beta1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
b := &IngressClassApplyConfiguration{}
err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1beta1.IngressClass"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +74,21 @@ func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldMana
b.WithAPIVersion("networking.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractIngressClass extracts the applied configuration owned by fieldManager from
+// ingressClass. If no managedFields are found in ingressClass for fieldManager, a
+// IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API.
+// ExtractIngressClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+ return ExtractIngressClassFrom(ingressClass, fieldManager, "")
+}
+
func (b IngressClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go
index 2a307a67..a5ca2ed3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassparametersreference.go
@@ -20,11 +20,24 @@ package v1beta1
// IngressClassParametersReferenceApplyConfiguration represents a declarative configuration of the IngressClassParametersReference type for use
// with apply.
+//
+// IngressClassParametersReference identifies an API object. This can be used
+// to specify a cluster or namespace-scoped resource.
type IngressClassParametersReferenceApplyConfiguration struct {
- APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
- Scope *string `json:"scope,omitempty"`
+ // apiGroup is the group for the resource being referenced. If APIGroup is
+ // not specified, the specified Kind must be in the core API group. For any
+ // other third-party types, APIGroup is required.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // kind is the type of resource being referenced.
+ Kind *string `json:"kind,omitempty"`
+ // name is the name of resource being referenced.
+ Name *string `json:"name,omitempty"`
+ // scope represents if this refers to a cluster or namespace scoped resource.
+ // This may be set to "Cluster" (default) or "Namespace".
+ Scope *string `json:"scope,omitempty"`
+ // namespace is the namespace of the resource being referenced. This field is
+ // required when scope is set to "Namespace" and must be unset when scope is set to
+ // "Cluster".
Namespace *string `json:"namespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go
index eefbf62b..b17f3e10 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclassspec.go
@@ -20,8 +20,19 @@ package v1beta1
// IngressClassSpecApplyConfiguration represents a declarative configuration of the IngressClassSpec type for use
// with apply.
+//
+// IngressClassSpec provides information about the class of an Ingress.
type IngressClassSpecApplyConfiguration struct {
- Controller *string `json:"controller,omitempty"`
+ // controller refers to the name of the controller that should handle this
+ // class. This allows for different "flavors" that are controlled by the
+ // same controller. For example, you may have different parameters for the
+ // same implementing controller. This should be specified as a
+ // domain-prefixed path no more than 250 characters in length, e.g.
+ // "acme.io/ingress-controller". This field is immutable.
+ Controller *string `json:"controller,omitempty"`
+ // parameters is a link to a custom resource containing additional
+ // configuration for the controller. This is optional if the controller does
+ // not require extra parameters.
Parameters *IngressClassParametersReferenceApplyConfiguration `json:"parameters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go
index 12dbc359..b831be67 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalanceringress.go
@@ -20,10 +20,15 @@ package v1beta1
// IngressLoadBalancerIngressApplyConfiguration represents a declarative configuration of the IngressLoadBalancerIngress type for use
// with apply.
+//
+// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
type IngressLoadBalancerIngressApplyConfiguration struct {
- IP *string `json:"ip,omitempty"`
- Hostname *string `json:"hostname,omitempty"`
- Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
+ // ip is set for load-balancer ingress points that are IP based.
+ IP *string `json:"ip,omitempty"`
+ // hostname is set for load-balancer ingress points that are DNS based.
+ Hostname *string `json:"hostname,omitempty"`
+ // ports provides information about the ports exposed by this LoadBalancer.
+ Ports []IngressPortStatusApplyConfiguration `json:"ports,omitempty"`
}
// IngressLoadBalancerIngressApplyConfiguration constructs a declarative configuration of the IngressLoadBalancerIngress type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go
index e896ab34..322935e3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressloadbalancerstatus.go
@@ -20,7 +20,10 @@ package v1beta1
// IngressLoadBalancerStatusApplyConfiguration represents a declarative configuration of the IngressLoadBalancerStatus type for use
// with apply.
+//
+// LoadBalancerStatus represents the status of a load-balancer.
type IngressLoadBalancerStatusApplyConfiguration struct {
+ // ingress is a list containing ingress points for the load-balancer.
Ingress []IngressLoadBalancerIngressApplyConfiguration `json:"ingress,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go
index 4ee3f016..5de062db 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressportstatus.go
@@ -24,10 +24,23 @@ import (
// IngressPortStatusApplyConfiguration represents a declarative configuration of the IngressPortStatus type for use
// with apply.
+//
+// IngressPortStatus represents the error condition of a service port
type IngressPortStatusApplyConfiguration struct {
- Port *int32 `json:"port,omitempty"`
+ // port is the port number of the ingress port.
+ Port *int32 `json:"port,omitempty"`
+ // protocol is the protocol of the ingress port.
+ // The supported values are: "TCP", "UDP", "SCTP"
Protocol *v1.Protocol `json:"protocol,omitempty"`
- Error *string `json:"error,omitempty"`
+ // error is to record the problem with the service port
+ // The format of the error shall comply with the following rules:
+ // - built-in error values shall be specified in this file and those shall use
+ // CamelCase names
+ // - cloud provider specific error values must have names that comply with the
+ // format foo.example.com/CamelCase.
+ // ---
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ Error *string `json:"error,omitempty"`
}
// IngressPortStatusApplyConfiguration constructs a declarative configuration of the IngressPortStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
index 809fada9..6c8b973e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrule.go
@@ -20,8 +20,39 @@ package v1beta1
// IngressRuleApplyConfiguration represents a declarative configuration of the IngressRule type for use
// with apply.
+//
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
type IngressRuleApplyConfiguration struct {
- Host *string `json:"host,omitempty"`
+ // host is the fully qualified domain name of a network host, as defined by RFC 3986.
+ // Note the following deviations from the "host" part of the
+ // URI as defined in RFC 3986:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+ // the IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the
+ // IngressRuleValue. If the host is unspecified, the Ingress routes all
+ // traffic based on the specified IngressRuleValue.
+ //
+ // host can be "precise" which is a domain name without the terminating dot of
+ // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+ // prefixed with a single wildcard label (e.g. "*.foo.com").
+ // The wildcard character '*' must appear by itself as the first DNS label and
+ // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+ // Requests will be matched against the Host field in the following way:
+ // 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
+ // 2. If Host is a wildcard, then the request matches this rule if the http host header
+ // is to equal to the suffix (removing the first label) of the wildcard rule.
+ Host *string `json:"host,omitempty"`
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
IngressRuleValueApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go
index 4a641247..502384fa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressrulevalue.go
@@ -20,6 +20,11 @@ package v1beta1
// IngressRuleValueApplyConfiguration represents a declarative configuration of the IngressRuleValue type for use
// with apply.
+//
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
type IngressRuleValueApplyConfiguration struct {
HTTP *HTTPIngressRuleValueApplyConfiguration `json:"http,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go
index 58fbde8b..fd616d68 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressspec.go
@@ -20,11 +20,34 @@ package v1beta1
// IngressSpecApplyConfiguration represents a declarative configuration of the IngressSpec type for use
// with apply.
+//
+// IngressSpec describes the Ingress the user wishes to exist.
type IngressSpecApplyConfiguration struct {
- IngressClassName *string `json:"ingressClassName,omitempty"`
- Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
- TLS []IngressTLSApplyConfiguration `json:"tls,omitempty"`
- Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"`
+ // ingressClassName is the name of the IngressClass cluster resource. The
+ // associated IngressClass defines which controller will implement the
+ // resource. This replaces the deprecated `kubernetes.io/ingress.class`
+ // annotation. For backwards compatibility, when that annotation is set, it
+ // must be given precedence over this field. The controller may emit a
+ // warning if the field and annotation have different values.
+ // Implementations of this API should ignore Ingresses without a class
+ // specified. An IngressClass resource may be marked as default, which can
+ // be used to set a default value for this field. For more information,
+ // refer to the IngressClass documentation.
+ IngressClassName *string `json:"ingressClassName,omitempty"`
+ // backend is the default backend capable of servicing requests that don't match any
+ // rule. At least one of 'backend' or 'rules' must be specified. This field
+ // is optional to allow the loadbalancer controller or defaulting logic to
+ // specify a global default.
+ Backend *IngressBackendApplyConfiguration `json:"backend,omitempty"`
+ // tls represents the TLS configuration. Currently the Ingress only supports a
+ // single TLS port, 443. If multiple members of this list specify different hosts,
+ // they will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ TLS []IngressTLSApplyConfiguration `json:"tls,omitempty"`
+ // rules is a list of host rules used to configure the Ingress. If unspecified, or
+ // no rule matches, all traffic is sent to the default backend.
+ Rules []IngressRuleApplyConfiguration `json:"rules,omitempty"`
}
// IngressSpecApplyConfiguration constructs a declarative configuration of the IngressSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go
index 3aed6168..2c7d01c9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressstatus.go
@@ -20,7 +20,10 @@ package v1beta1
// IngressStatusApplyConfiguration represents a declarative configuration of the IngressStatus type for use
// with apply.
+//
+// IngressStatus describes the current state of the Ingress.
type IngressStatusApplyConfiguration struct {
+ // loadBalancer contains the current status of the load-balancer.
LoadBalancer *IngressLoadBalancerStatusApplyConfiguration `json:"loadBalancer,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go
index 63648cd4..cad7f0ed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingresstls.go
@@ -20,9 +20,20 @@ package v1beta1
// IngressTLSApplyConfiguration represents a declarative configuration of the IngressTLS type for use
// with apply.
+//
+// IngressTLS describes the transport layer security associated with an Ingress.
type IngressTLSApplyConfiguration struct {
- Hosts []string `json:"hosts,omitempty"`
- SecretName *string `json:"secretName,omitempty"`
+ // hosts is a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ Hosts []string `json:"hosts,omitempty"`
+ // secretName is the name of the secret used to terminate TLS traffic on
+ // port 443. Field is left optional to allow TLS routing based on SNI
+ // hostname alone. If the SNI host in a listener conflicts with the "Host"
+ // header field used by an IngressRule, the SNI host is used for termination
+ // and value of the Host header is used for routing.
+ SecretName *string `json:"secretName,omitempty"`
}
// IngressTLSApplyConfiguration constructs a declarative configuration of the IngressTLS type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
index f1f9680d..28167f1f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go
@@ -29,10 +29,22 @@ import (
// IPAddressApplyConfiguration represents a declarative configuration of the IPAddress type for use
// with apply.
+//
+// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
+// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
+// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
+// the name of the object is the IP address in canonical format, four decimal digits separated
+// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
+// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
+// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
type IPAddressApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec is the desired state of the IPAddress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *IPAddressSpecApplyConfiguration `json:"spec,omitempty"`
}
// IPAddress constructs a declarative configuration of the IPAddress type for use with
@@ -45,29 +57,14 @@ func IPAddress(name string) *IPAddressApplyConfiguration {
return b
}
-// ExtractIPAddress extracts the applied configuration owned by fieldManager from
-// iPAddress. If no managedFields are found in iPAddress for fieldManager, a
-// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractIPAddressFrom extracts the applied configuration owned by fieldManager from
+// iPAddress for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API.
-// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractIPAddressFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
- return extractIPAddress(iPAddress, fieldManager, "")
-}
-
-// ExtractIPAddressStatus is the same as ExtractIPAddress except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractIPAddressStatus(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
- return extractIPAddress(iPAddress, fieldManager, "status")
-}
-
-func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) {
+func ExtractIPAddressFrom(iPAddress *networkingv1beta1.IPAddress, fieldManager string, subresource string) (*IPAddressApplyConfiguration, error) {
b := &IPAddressApplyConfiguration{}
err := managedfields.ExtractInto(iPAddress, internal.Parser().Type("io.k8s.api.networking.v1beta1.IPAddress"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +76,21 @@ func extractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager strin
b.WithAPIVersion("networking.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractIPAddress extracts the applied configuration owned by fieldManager from
+// iPAddress. If no managedFields are found in iPAddress for fieldManager, a
+// IPAddressApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// iPAddress must be a unmodified IPAddress API object that was retrieved from the Kubernetes API.
+// ExtractIPAddress provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractIPAddress(iPAddress *networkingv1beta1.IPAddress, fieldManager string) (*IPAddressApplyConfiguration, error) {
+ return ExtractIPAddressFrom(iPAddress, fieldManager, "")
+}
+
func (b IPAddressApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go
index 76b02137..c0c012aa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go
@@ -20,7 +20,11 @@ package v1beta1
// IPAddressSpecApplyConfiguration represents a declarative configuration of the IPAddressSpec type for use
// with apply.
+//
+// IPAddressSpec describe the attributes in an IP Address.
type IPAddressSpecApplyConfiguration struct {
+ // ParentRef references the resource that an IPAddress is attached to.
+ // An IPAddress must reference a parent object.
ParentRef *ParentReferenceApplyConfiguration `json:"parentRef,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go
index 1863938f..edcc3b95 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/parentreference.go
@@ -20,11 +20,17 @@ package v1beta1
// ParentReferenceApplyConfiguration represents a declarative configuration of the ParentReference type for use
// with apply.
+//
+// ParentReference describes a reference to a parent object.
type ParentReferenceApplyConfiguration struct {
- Group *string `json:"group,omitempty"`
- Resource *string `json:"resource,omitempty"`
+ // Group is the group of the object being referenced.
+ Group *string `json:"group,omitempty"`
+ // Resource is the resource of the object being referenced.
+ Resource *string `json:"resource,omitempty"`
+ // Namespace is the namespace of the object being referenced.
Namespace *string `json:"namespace,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Name is the name of the object being referenced.
+ Name *string `json:"name,omitempty"`
}
// ParentReferenceApplyConfiguration constructs a declarative configuration of the ParentReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
index 37a6ee6f..07281de6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidr.go
@@ -29,11 +29,20 @@ import (
// ServiceCIDRApplyConfiguration represents a declarative configuration of the ServiceCIDR type for use
// with apply.
+//
+// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
+// This range is used to allocate ClusterIPs to Service objects.
type ServiceCIDRApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
+ // spec is the desired state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents the current state of the ServiceCIDR.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
}
// ServiceCIDR constructs a declarative configuration of the ServiceCIDR type for use with
@@ -46,6 +55,26 @@ func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
return b
}
+// ExtractServiceCIDRFrom extracts the applied configuration owned by fieldManager from
+// serviceCIDR for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API.
+// ExtractServiceCIDRFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractServiceCIDRFrom(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) {
+ b := &ServiceCIDRApplyConfiguration{}
+ err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(serviceCIDR.Name)
+
+ b.WithKind("ServiceCIDR")
+ b.WithAPIVersion("networking.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from
// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a
// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +85,16 @@ func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
- return extractServiceCIDR(serviceCIDR, fieldManager, "")
+ return ExtractServiceCIDRFrom(serviceCIDR, fieldManager, "")
}
-// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractServiceCIDRStatus extracts the applied configuration owned by fieldManager from
+// serviceCIDR for the status subresource.
func ExtractServiceCIDRStatus(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
- return extractServiceCIDR(serviceCIDR, fieldManager, "status")
+ return ExtractServiceCIDRFrom(serviceCIDR, fieldManager, "status")
}
-func extractServiceCIDR(serviceCIDR *networkingv1beta1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) {
- b := &ServiceCIDRApplyConfiguration{}
- err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1beta1.ServiceCIDR"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(serviceCIDR.Name)
-
- b.WithKind("ServiceCIDR")
- b.WithAPIVersion("networking.k8s.io/v1beta1")
- return b, nil
-}
func (b ServiceCIDRApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go
index 1f283532..7652700e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go
@@ -20,7 +20,12 @@ package v1beta1
// ServiceCIDRSpecApplyConfiguration represents a declarative configuration of the ServiceCIDRSpec type for use
// with apply.
+//
+// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
type ServiceCIDRSpecApplyConfiguration struct {
+ // CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
+ // from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
+ // This field is immutable.
CIDRs []string `json:"cidrs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go
index f2dd9240..307043d9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go
@@ -24,7 +24,11 @@ import (
// ServiceCIDRStatusApplyConfiguration represents a declarative configuration of the ServiceCIDRStatus type for use
// with apply.
+//
+// ServiceCIDRStatus describes the current state of the ServiceCIDR.
type ServiceCIDRStatusApplyConfiguration struct {
+ // conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
+ // Current service state
Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
index 30ce9fb4..5584ddfd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/overhead.go
@@ -24,7 +24,10 @@ import (
// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
// with apply.
+//
+// Overhead structure represents the resource overhead associated with running a pod.
type OverheadApplyConfiguration struct {
+ // podFixed represents the fixed resource overhead associated with running a pod.
PodFixed *corev1.ResourceList `json:"podFixed,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
index 0c855cfd..6b33701c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
@@ -29,12 +29,38 @@ import (
// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
// with apply.
+//
+// RuntimeClass defines a class of container runtime supported in the cluster.
+// The RuntimeClass is used to determine which container runtime is used to run
+// all containers in a pod. RuntimeClasses are manually defined by a
+// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
+// responsible for resolving the RuntimeClassName reference before running the
+// pod. For more details, see
+// https://kubernetes.io/docs/concepts/containers/runtime-class/
type RuntimeClassApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Handler *string `json:"handler,omitempty"`
- Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
- Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
+ // handler specifies the underlying runtime and configuration that the CRI
+ // implementation will use to handle pods of this class. The possible values
+ // are specific to the node & CRI configuration. It is assumed that all
+ // handlers are available on every node, and handlers of the same name are
+ // equivalent on every node.
+ // For example, a handler called "runc" might specify that the runc OCI
+ // runtime (using native Linux containers) will be used to run the containers
+ // in a pod.
+ // The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements,
+ // and is immutable.
+ Handler *string `json:"handler,omitempty"`
+ // overhead represents the resource overhead associated with running a pod for a
+ // given RuntimeClass. For more details, see
+ // https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/
+ Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
+ // scheduling holds the scheduling constraints to ensure that pods running
+ // with this RuntimeClass are scheduled to nodes that support it.
+ // If scheduling is nil, this RuntimeClass is assumed to be supported by all
+ // nodes.
+ Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
}
// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
@@ -47,29 +73,14 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
return b
}
-// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
-// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
-// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRuntimeClassFrom extracts the applied configuration owned by fieldManager from
+// runtimeClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
-// ExtractRuntimeClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRuntimeClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
- return extractRuntimeClass(runtimeClass, fieldManager, "")
-}
-
-// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRuntimeClassStatus(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
- return extractRuntimeClass(runtimeClass, fieldManager, "status")
-}
-
-func extractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
+func ExtractRuntimeClassFrom(runtimeClass *nodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +92,21 @@ func extractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string,
b.WithAPIVersion("node.k8s.io/v1")
return b, nil
}
+
+// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
+// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
+// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
+// ExtractRuntimeClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRuntimeClass(runtimeClass *nodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return ExtractRuntimeClassFrom(runtimeClass, fieldManager, "")
+}
+
func (b RuntimeClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
index b45400fb..849a9a4e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1/scheduling.go
@@ -24,9 +24,20 @@ import (
// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
// with apply.
+//
+// Scheduling specifies the scheduling constraints for nodes supporting a
+// RuntimeClass.
type SchedulingApplyConfiguration struct {
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
- Tolerations []corev1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // nodeSelector lists labels that must be present on nodes that support this
+ // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a
+ // node matched by this selector. The RuntimeClass nodeSelector is merged
+ // with a pod's existing nodeSelector. Any conflicts will cause the pod to
+ // be rejected in admission.
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // tolerations are appended (excluding duplicates) to pods running with this
+ // RuntimeClass during admission, effectively unioning the set of nodes
+ // tolerated by the pod and the RuntimeClass.
+ Tolerations []corev1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
}
// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go
index 84770a09..ce66afc6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/overhead.go
@@ -24,7 +24,10 @@ import (
// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
// with apply.
+//
+// Overhead structure represents the resource overhead associated with running a pod.
type OverheadApplyConfiguration struct {
+ // podFixed represents the fixed resource overhead associated with running a pod.
PodFixed *v1.ResourceList `json:"podFixed,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
index f185c316..5fbf9a64 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
@@ -29,10 +29,21 @@ import (
// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
// with apply.
+//
+// RuntimeClass defines a class of container runtime supported in the cluster.
+// The RuntimeClass is used to determine which container runtime is used to run
+// all containers in a pod. RuntimeClasses are (currently) manually defined by a
+// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
+// responsible for resolving the RuntimeClassName reference before running the
+// pod. For more details, see
+// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
type RuntimeClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec represents specification of the RuntimeClass
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ Spec *RuntimeClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
@@ -45,29 +56,14 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
return b
}
-// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
-// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
-// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRuntimeClassFrom extracts the applied configuration owned by fieldManager from
+// runtimeClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
-// ExtractRuntimeClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRuntimeClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
- return extractRuntimeClass(runtimeClass, fieldManager, "")
-}
-
-// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRuntimeClassStatus(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
- return extractRuntimeClass(runtimeClass, fieldManager, "status")
-}
-
-func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
+func ExtractRuntimeClassFrom(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1alpha1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +75,21 @@ func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager s
b.WithAPIVersion("node.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
+// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
+// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
+// ExtractRuntimeClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return ExtractRuntimeClassFrom(runtimeClass, fieldManager, "")
+}
+
func (b RuntimeClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go
index 1aa43eb1..0ad3269b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclassspec.go
@@ -20,10 +20,32 @@ package v1alpha1
// RuntimeClassSpecApplyConfiguration represents a declarative configuration of the RuntimeClassSpec type for use
// with apply.
+//
+// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters
+// that are required to describe the RuntimeClass to the Container Runtime
+// Interface (CRI) implementation, as well as any other components that need to
+// understand how the pod will be run. The RuntimeClassSpec is immutable.
type RuntimeClassSpecApplyConfiguration struct {
- RuntimeHandler *string `json:"runtimeHandler,omitempty"`
- Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
- Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
+ // runtimeHandler specifies the underlying runtime and configuration that the
+ // CRI implementation will use to handle pods of this class. The possible
+ // values are specific to the node & CRI configuration. It is assumed that
+ // all handlers are available on every node, and handlers of the same name are
+ // equivalent on every node.
+ // For example, a handler called "runc" might specify that the runc OCI
+ // runtime (using native Linux containers) will be used to run the containers
+ // in a pod.
+ // The runtimeHandler must be lowercase, conform to the DNS Label (RFC 1123)
+ // requirements, and is immutable.
+ RuntimeHandler *string `json:"runtimeHandler,omitempty"`
+ // overhead represents the resource overhead associated with running a pod for a
+ // given RuntimeClass. For more details, see
+ // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
+ // scheduling holds the scheduling constraints to ensure that pods running
+ // with this RuntimeClass are scheduled to nodes that support it.
+ // If scheduling is nil, this RuntimeClass is assumed to be supported by all
+ // nodes.
+ Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
}
// RuntimeClassSpecApplyConfiguration constructs a declarative configuration of the RuntimeClassSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go
index 6ce49ad8..b79301de 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/scheduling.go
@@ -24,9 +24,20 @@ import (
// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
// with apply.
+//
+// Scheduling specifies the scheduling constraints for nodes supporting a
+// RuntimeClass.
type SchedulingApplyConfiguration struct {
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
- Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // nodeSelector lists labels that must be present on nodes that support this
+ // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a
+ // node matched by this selector. The RuntimeClass nodeSelector is merged
+ // with a pod's existing nodeSelector. Any conflicts will cause the pod to
+ // be rejected in admission.
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // tolerations are appended (excluding duplicates) to pods running with this
+ // RuntimeClass during admission, effectively unioning the set of nodes
+ // tolerated by the pod and the RuntimeClass.
+ Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
}
// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go
index cf767e70..2c3d3425 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/overhead.go
@@ -24,7 +24,10 @@ import (
// OverheadApplyConfiguration represents a declarative configuration of the Overhead type for use
// with apply.
+//
+// Overhead structure represents the resource overhead associated with running a pod.
type OverheadApplyConfiguration struct {
+ // podFixed represents the fixed resource overhead associated with running a pod.
PodFixed *v1.ResourceList `json:"podFixed,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
index f6cbcf8f..fc75967f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
@@ -29,12 +29,38 @@ import (
// RuntimeClassApplyConfiguration represents a declarative configuration of the RuntimeClass type for use
// with apply.
+//
+// RuntimeClass defines a class of container runtime supported in the cluster.
+// The RuntimeClass is used to determine which container runtime is used to run
+// all containers in a pod. RuntimeClasses are (currently) manually defined by a
+// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
+// responsible for resolving the RuntimeClassName reference before running the
+// pod. For more details, see
+// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
type RuntimeClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Handler *string `json:"handler,omitempty"`
- Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
- Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
+ // handler specifies the underlying runtime and configuration that the CRI
+ // implementation will use to handle pods of this class. The possible values
+ // are specific to the node & CRI configuration. It is assumed that all
+ // handlers are available on every node, and handlers of the same name are
+ // equivalent on every node.
+ // For example, a handler called "runc" might specify that the runc OCI
+ // runtime (using native Linux containers) will be used to run the containers
+ // in a pod.
+ // The handler must be lowercase, conform to the DNS Label (RFC 1123) requirements,
+ // and is immutable.
+ Handler *string `json:"handler,omitempty"`
+ // overhead represents the resource overhead associated with running a pod for a
+ // given RuntimeClass. For more details, see
+ // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ Overhead *OverheadApplyConfiguration `json:"overhead,omitempty"`
+ // scheduling holds the scheduling constraints to ensure that pods running
+ // with this RuntimeClass are scheduled to nodes that support it.
+ // If scheduling is nil, this RuntimeClass is assumed to be supported by all
+ // nodes.
+ Scheduling *SchedulingApplyConfiguration `json:"scheduling,omitempty"`
}
// RuntimeClass constructs a declarative configuration of the RuntimeClass type for use with
@@ -47,29 +73,14 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
return b
}
-// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
-// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
-// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRuntimeClassFrom extracts the applied configuration owned by fieldManager from
+// runtimeClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
-// ExtractRuntimeClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRuntimeClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
- return extractRuntimeClass(runtimeClass, fieldManager, "")
-}
-
-// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRuntimeClassStatus(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
- return extractRuntimeClass(runtimeClass, fieldManager, "status")
-}
-
-func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
+func ExtractRuntimeClassFrom(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1beta1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +92,21 @@ func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager st
b.WithAPIVersion("node.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
+// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
+// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
+// ExtractRuntimeClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return ExtractRuntimeClassFrom(runtimeClass, fieldManager, "")
+}
+
func (b RuntimeClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go
index 23d0b975..9eab351b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/scheduling.go
@@ -24,9 +24,20 @@ import (
// SchedulingApplyConfiguration represents a declarative configuration of the Scheduling type for use
// with apply.
+//
+// Scheduling specifies the scheduling constraints for nodes supporting a
+// RuntimeClass.
type SchedulingApplyConfiguration struct {
- NodeSelector map[string]string `json:"nodeSelector,omitempty"`
- Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // nodeSelector lists labels that must be present on nodes that support this
+ // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a
+ // node matched by this selector. The RuntimeClass nodeSelector is merged
+ // with a pod's existing nodeSelector. Any conflicts will cause the pod to
+ // be rejected in admission.
+ NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+ // tolerations are appended (excluding duplicates) to pods running with this
+ // RuntimeClass during admission, effectively unioning the set of nodes
+ // tolerated by the pod and the RuntimeClass.
+ Tolerations []v1.TolerationApplyConfiguration `json:"tolerations,omitempty"`
}
// SchedulingApplyConfiguration constructs a declarative configuration of the Scheduling type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
index da18b73a..bd944a5e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
@@ -29,10 +29,16 @@ import (
// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use
// with apply.
+//
+// Eviction evicts a pod from its node subject to certain policies and safety constraints.
+// This is a subresource of Pod. A request to cause such an eviction is
+// created by POSTing to .../pods//evictions.
type EvictionApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // ObjectMeta describes the pod that is being evicted.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- DeleteOptions *metav1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
+ // DeleteOptions may be provided
+ DeleteOptions *metav1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
}
// Eviction constructs a declarative configuration of the Eviction type for use with
@@ -46,29 +52,14 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration {
return b
}
-// ExtractEviction extracts the applied configuration owned by fieldManager from
-// eviction. If no managedFields are found in eviction for fieldManager, a
-// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEvictionFrom extracts the applied configuration owned by fieldManager from
+// eviction for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API.
-// ExtractEviction provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEvictionFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEviction(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
- return extractEviction(eviction, fieldManager, "")
-}
-
-// ExtractEvictionStatus is the same as ExtractEviction except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEvictionStatus(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
- return extractEviction(eviction, fieldManager, "status")
-}
-
-func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
+func ExtractEvictionFrom(eviction *policyv1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
b := &EvictionApplyConfiguration{}
err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1.Eviction"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresour
b.WithAPIVersion("policy/v1")
return b, nil
}
+
+// ExtractEviction extracts the applied configuration owned by fieldManager from
+// eviction. If no managedFields are found in eviction for fieldManager, a
+// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API.
+// ExtractEviction provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEviction(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+ return ExtractEvictionFrom(eviction, fieldManager, "")
+}
+
func (b EvictionApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
index 995a4f66..c9d4fd1a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
@@ -29,11 +29,17 @@ import (
// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use
// with apply.
+//
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
type PodDisruptionBudgetApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the PodDisruptionBudget.
+ Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the PodDisruptionBudget.
+ Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
}
// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with
@@ -47,6 +53,27 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
return b
}
+// ExtractPodDisruptionBudgetFrom extracts the applied configuration owned by fieldManager from
+// podDisruptionBudget for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// podDisruptionBudget must be a unmodified PodDisruptionBudget API object that was retrieved from the Kubernetes API.
+// ExtractPodDisruptionBudgetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPodDisruptionBudgetFrom(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
+ b := &PodDisruptionBudgetApplyConfiguration{}
+ err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(podDisruptionBudget.Name)
+ b.WithNamespace(podDisruptionBudget.Namespace)
+
+ b.WithKind("PodDisruptionBudget")
+ b.WithAPIVersion("policy/v1")
+ return b, nil
+}
+
// ExtractPodDisruptionBudget extracts the applied configuration owned by fieldManager from
// podDisruptionBudget. If no managedFields are found in podDisruptionBudget for fieldManager, a
// PodDisruptionBudgetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +84,16 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// ExtractPodDisruptionBudget provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
- return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "")
+ return ExtractPodDisruptionBudgetFrom(podDisruptionBudget, fieldManager, "")
}
-// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPodDisruptionBudgetStatus extracts the applied configuration owned by fieldManager from
+// podDisruptionBudget for the status subresource.
func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
- return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status")
+ return ExtractPodDisruptionBudgetFrom(podDisruptionBudget, fieldManager, "status")
}
-func extractPodDisruptionBudget(podDisruptionBudget *policyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
- b := &PodDisruptionBudgetApplyConfiguration{}
- err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(podDisruptionBudget.Name)
- b.WithNamespace(podDisruptionBudget.Namespace)
-
- b.WithKind("PodDisruptionBudget")
- b.WithAPIVersion("policy/v1")
- return b, nil
-}
func (b PodDisruptionBudgetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
index 3c66739b..0d230733 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetspec.go
@@ -26,10 +26,46 @@ import (
// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use
// with apply.
+//
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
type PodDisruptionBudgetSpecApplyConfiguration struct {
- MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
- Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+ // An eviction is allowed if at least "minAvailable" pods selected by
+ // "selector" will still be available after the eviction, i.e. even in the
+ // absence of the evicted pod. So for example you can prevent all voluntary
+ // evictions by specifying "100%".
+ MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
+ // Label query over pods whose evictions are managed by the disruption
+ // budget.
+ // A null selector will match no pods, while an empty ({}) selector will select
+ // all pods within the namespace.
+ Selector *metav1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // An eviction is allowed if at most "maxUnavailable" pods selected by
+ // "selector" are unavailable after the eviction, i.e. even in absence of
+ // the evicted pod. For example, one can prevent all voluntary evictions
+ // by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+ // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
+ // should be considered for eviction. Current implementation considers healthy pods,
+ // as pods that have status.conditions item with type="Ready",status="True".
+ //
+ // Valid policies are IfHealthyBudget and AlwaysAllow.
+ // If no policy is specified, the default behavior will be used,
+ // which corresponds to the IfHealthyBudget policy.
+ //
+ // IfHealthyBudget policy means that running pods (status.phase="Running"),
+ // but not yet healthy can be evicted only if the guarded application is not
+ // disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
+ // Healthy pods will be subject to the PDB for eviction.
+ //
+ // AlwaysAllow policy means that all running pods (status.phase="Running"),
+ // but not yet healthy are considered disrupted and can be evicted regardless
+ // of whether the criteria in a PDB is met. This means perspective running
+ // pods of a disrupted application might not get a chance to become healthy.
+ // Healthy pods will be subject to the PDB for eviction.
+ //
+ // Additional policies may be added in the future.
+ // Clients making eviction decisions should disallow eviction of unhealthy pods
+ // if they encounter an unrecognized policy in this field.
UnhealthyPodEvictionPolicy *policyv1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
index d3c44d90..7e05f150 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudgetstatus.go
@@ -25,14 +25,46 @@ import (
// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use
// with apply.
+//
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty"`
- DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"`
- CurrentHealthy *int32 `json:"currentHealthy,omitempty"`
- DesiredHealthy *int32 `json:"desiredHealthy,omitempty"`
- ExpectedPods *int32 `json:"expectedPods,omitempty"`
- Conditions []applyconfigurationsmetav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other
+ // status information is valid only if observedGeneration equals to PDB's object generation.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // DisruptedPods contains information about pods whose eviction was
+ // processed by the API server eviction subresource handler but has not
+ // yet been observed by the PodDisruptionBudget controller.
+ // A pod will be in this map from the time when the API server processed the
+ // eviction request to the time when the pod is seen by PDB controller
+ // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
+ // and the value is the time when the API server processed the eviction request. If
+ // the deletion didn't occur and a pod is still there it will be removed from
+ // the list automatically by PodDisruptionBudget controller after some time.
+ // If everything goes smooth this map should be empty for the most of the time.
+ // Large number of entries in the map may indicate problems with pod deletions.
+ DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty"`
+ // Number of pod disruptions that are currently allowed.
+ DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"`
+ // current number of healthy pods
+ CurrentHealthy *int32 `json:"currentHealthy,omitempty"`
+ // minimum desired number of healthy pods
+ DesiredHealthy *int32 `json:"desiredHealthy,omitempty"`
+ // total number of pods counted by this disruption budget
+ ExpectedPods *int32 `json:"expectedPods,omitempty"`
+ // Conditions contain conditions for PDB. The disruption controller sets the
+ // DisruptionAllowed condition. The following are known values for the reason field
+ // (additional reasons could be added in the future):
+ // - SyncFailed: The controller encountered an error and wasn't able to compute
+ // the number of allowed disruptions. Therefore no disruptions are
+ // allowed and the status of the condition will be False.
+ // - InsufficientPods: The number of pods are either at or below the number
+ // required by the PodDisruptionBudget. No disruptions are
+ // allowed and the status of the condition will be False.
+ // - SufficientPods: There are more pods than required by the PodDisruptionBudget.
+ // The condition will be True, and the number of allowed
+ // disruptions are provided by the disruptionsAllowed property.
+ Conditions []applyconfigurationsmetav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
index be0f1c1c..eeadbbcb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
@@ -29,10 +29,16 @@ import (
// EvictionApplyConfiguration represents a declarative configuration of the Eviction type for use
// with apply.
+//
+// Eviction evicts a pod from its node subject to certain policies and safety constraints.
+// This is a subresource of Pod. A request to cause such an eviction is
+// created by POSTing to .../pods//evictions.
type EvictionApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // ObjectMeta describes the pod that is being evicted.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
+ // DeleteOptions may be provided
+ DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
}
// Eviction constructs a declarative configuration of the Eviction type for use with
@@ -46,29 +52,14 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration {
return b
}
-// ExtractEviction extracts the applied configuration owned by fieldManager from
-// eviction. If no managedFields are found in eviction for fieldManager, a
-// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractEvictionFrom extracts the applied configuration owned by fieldManager from
+// eviction for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API.
-// ExtractEviction provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractEvictionFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractEviction(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
- return extractEviction(eviction, fieldManager, "")
-}
-
-// ExtractEvictionStatus is the same as ExtractEviction except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractEvictionStatus(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
- return extractEviction(eviction, fieldManager, "status")
-}
-
-func extractEviction(eviction *policyv1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
+func ExtractEvictionFrom(eviction *policyv1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
b := &EvictionApplyConfiguration{}
err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +72,21 @@ func extractEviction(eviction *policyv1beta1.Eviction, fieldManager string, subr
b.WithAPIVersion("policy/v1beta1")
return b, nil
}
+
+// ExtractEviction extracts the applied configuration owned by fieldManager from
+// eviction. If no managedFields are found in eviction for fieldManager, a
+// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API.
+// ExtractEviction provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractEviction(eviction *policyv1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+ return ExtractEvictionFrom(eviction, fieldManager, "")
+}
+
func (b EvictionApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
index 159f19ea..ca2d839d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
@@ -29,11 +29,17 @@ import (
// PodDisruptionBudgetApplyConfiguration represents a declarative configuration of the PodDisruptionBudget type for use
// with apply.
+//
+// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
type PodDisruptionBudgetApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"`
- Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the desired behavior of the PodDisruptionBudget.
+ Spec *PodDisruptionBudgetSpecApplyConfiguration `json:"spec,omitempty"`
+ // Most recently observed status of the PodDisruptionBudget.
+ Status *PodDisruptionBudgetStatusApplyConfiguration `json:"status,omitempty"`
}
// PodDisruptionBudget constructs a declarative configuration of the PodDisruptionBudget type for use with
@@ -47,6 +53,27 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
return b
}
+// ExtractPodDisruptionBudgetFrom extracts the applied configuration owned by fieldManager from
+// podDisruptionBudget for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// podDisruptionBudget must be a unmodified PodDisruptionBudget API object that was retrieved from the Kubernetes API.
+// ExtractPodDisruptionBudgetFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPodDisruptionBudgetFrom(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
+ b := &PodDisruptionBudgetApplyConfiguration{}
+ err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodDisruptionBudget"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(podDisruptionBudget.Name)
+ b.WithNamespace(podDisruptionBudget.Namespace)
+
+ b.WithKind("PodDisruptionBudget")
+ b.WithAPIVersion("policy/v1beta1")
+ return b, nil
+}
+
// ExtractPodDisruptionBudget extracts the applied configuration owned by fieldManager from
// podDisruptionBudget. If no managedFields are found in podDisruptionBudget for fieldManager, a
// PodDisruptionBudgetApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +84,16 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// ExtractPodDisruptionBudget provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
- return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "")
+ return ExtractPodDisruptionBudgetFrom(podDisruptionBudget, fieldManager, "")
}
-// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractPodDisruptionBudgetStatus extracts the applied configuration owned by fieldManager from
+// podDisruptionBudget for the status subresource.
func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
- return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status")
+ return ExtractPodDisruptionBudgetFrom(podDisruptionBudget, fieldManager, "status")
}
-func extractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
- b := &PodDisruptionBudgetApplyConfiguration{}
- err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodDisruptionBudget"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(podDisruptionBudget.Name)
- b.WithNamespace(podDisruptionBudget.Namespace)
-
- b.WithKind("PodDisruptionBudget")
- b.WithAPIVersion("policy/v1beta1")
- return b, nil
-}
func (b PodDisruptionBudgetApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
index d8fecf7a..1fe247b4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetspec.go
@@ -26,10 +26,47 @@ import (
// PodDisruptionBudgetSpecApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetSpec type for use
// with apply.
+//
+// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
type PodDisruptionBudgetSpecApplyConfiguration struct {
- MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
- Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
- MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+ // An eviction is allowed if at least "minAvailable" pods selected by
+ // "selector" will still be available after the eviction, i.e. even in the
+ // absence of the evicted pod. So for example you can prevent all voluntary
+ // evictions by specifying "100%".
+ MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
+ // Label query over pods whose evictions are managed by the disruption
+ // budget.
+ // A null selector selects no pods.
+ // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods.
+ // In policy/v1, an empty selector will select all pods in the namespace.
+ Selector *v1.LabelSelectorApplyConfiguration `json:"selector,omitempty"`
+ // An eviction is allowed if at most "maxUnavailable" pods selected by
+ // "selector" are unavailable after the eviction, i.e. even in absence of
+ // the evicted pod. For example, one can prevent all voluntary evictions
+ // by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+ // UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods
+ // should be considered for eviction. Current implementation considers healthy pods,
+ // as pods that have status.conditions item with type="Ready",status="True".
+ //
+ // Valid policies are IfHealthyBudget and AlwaysAllow.
+ // If no policy is specified, the default behavior will be used,
+ // which corresponds to the IfHealthyBudget policy.
+ //
+ // IfHealthyBudget policy means that running pods (status.phase="Running"),
+ // but not yet healthy can be evicted only if the guarded application is not
+ // disrupted (status.currentHealthy is at least equal to status.desiredHealthy).
+ // Healthy pods will be subject to the PDB for eviction.
+ //
+ // AlwaysAllow policy means that all running pods (status.phase="Running"),
+ // but not yet healthy are considered disrupted and can be evicted regardless
+ // of whether the criteria in a PDB is met. This means perspective running
+ // pods of a disrupted application might not get a chance to become healthy.
+ // Healthy pods will be subject to the PDB for eviction.
+ //
+ // Additional policies may be added in the future.
+ // Clients making eviction decisions should disallow eviction of unhealthy pods
+ // if they encounter an unrecognized policy in this field.
UnhealthyPodEvictionPolicy *policyv1beta1.UnhealthyPodEvictionPolicyType `json:"unhealthyPodEvictionPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go
index e66a7fb3..e7faa861 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudgetstatus.go
@@ -25,14 +25,46 @@ import (
// PodDisruptionBudgetStatusApplyConfiguration represents a declarative configuration of the PodDisruptionBudgetStatus type for use
// with apply.
+//
+// PodDisruptionBudgetStatus represents information about the status of a
+// PodDisruptionBudget. Status may trail the actual state of a system.
type PodDisruptionBudgetStatusApplyConfiguration struct {
- ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
- DisruptedPods map[string]v1.Time `json:"disruptedPods,omitempty"`
- DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"`
- CurrentHealthy *int32 `json:"currentHealthy,omitempty"`
- DesiredHealthy *int32 `json:"desiredHealthy,omitempty"`
- ExpectedPods *int32 `json:"expectedPods,omitempty"`
- Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other
+ // status information is valid only if observedGeneration equals to PDB's object generation.
+ ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
+ // DisruptedPods contains information about pods whose eviction was
+ // processed by the API server eviction subresource handler but has not
+ // yet been observed by the PodDisruptionBudget controller.
+ // A pod will be in this map from the time when the API server processed the
+ // eviction request to the time when the pod is seen by PDB controller
+ // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
+ // and the value is the time when the API server processed the eviction request. If
+ // the deletion didn't occur and a pod is still there it will be removed from
+ // the list automatically by PodDisruptionBudget controller after some time.
+ // If everything goes smooth this map should be empty for the most of the time.
+ // Large number of entries in the map may indicate problems with pod deletions.
+ DisruptedPods map[string]v1.Time `json:"disruptedPods,omitempty"`
+ // Number of pod disruptions that are currently allowed.
+ DisruptionsAllowed *int32 `json:"disruptionsAllowed,omitempty"`
+ // current number of healthy pods
+ CurrentHealthy *int32 `json:"currentHealthy,omitempty"`
+ // minimum desired number of healthy pods
+ DesiredHealthy *int32 `json:"desiredHealthy,omitempty"`
+ // total number of pods counted by this disruption budget
+ ExpectedPods *int32 `json:"expectedPods,omitempty"`
+ // Conditions contain conditions for PDB. The disruption controller sets the
+ // DisruptionAllowed condition. The following are known values for the reason field
+ // (additional reasons could be added in the future):
+ // - SyncFailed: The controller encountered an error and wasn't able to compute
+ // the number of allowed disruptions. Therefore no disruptions are
+ // allowed and the status of the condition will be False.
+ // - InsufficientPods: The number of pods are either at or below the number
+ // required by the PodDisruptionBudget. No disruptions are
+ // allowed and the status of the condition will be False.
+ // - SufficientPods: There are more pods than required by the PodDisruptionBudget.
+ // The condition will be True, and the number of allowed
+ // disruptions are provided by the disruptionsAllowed property.
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
}
// PodDisruptionBudgetStatusApplyConfiguration constructs a declarative configuration of the PodDisruptionBudgetStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
index b7049a8e..6f44e797 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/aggregationrule.go
@@ -24,7 +24,11 @@ import (
// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
// with apply.
+//
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
type AggregationRuleApplyConfiguration struct {
+ // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+ // If any of the selectors match, then the ClusterRole's permissions will be added
ClusterRoleSelectors []metav1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
index b8634870..d65387e2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
@@ -29,11 +29,18 @@ import (
// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
// with apply.
+//
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
type ClusterRoleApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
- AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
+ // Rules holds all the PolicyRules for this ClusterRole
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+ // stomped by the controller.
+ AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
}
// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
@@ -46,29 +53,14 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
return b
}
-// ExtractClusterRole extracts the applied configuration owned by fieldManager from
-// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
-// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterRoleFrom extracts the applied configuration owned by fieldManager from
+// clusterRole for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
-// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterRoleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
- return extractClusterRole(clusterRole, fieldManager, "")
-}
-
-// ExtractClusterRoleStatus is the same as ExtractClusterRole except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterRoleStatus(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
- return extractClusterRole(clusterRole, fieldManager, "status")
-}
-
-func extractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
+func ExtractClusterRoleFrom(clusterRole *rbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +72,21 @@ func extractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string, su
b.WithAPIVersion("rbac.authorization.k8s.io/v1")
return b, nil
}
+
+// ExtractClusterRole extracts the applied configuration owned by fieldManager from
+// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
+// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
+// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterRole(clusterRole *rbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return ExtractClusterRoleFrom(clusterRole, fieldManager, "")
+}
+
func (b ClusterRoleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
index 0fd5a951..13d3d020 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
@@ -29,11 +29,19 @@ import (
// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
// with apply.
+//
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
type ClusterRoleBindingApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ // Subjects holds references to the objects the role applies to.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // RoleRef can only reference a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // This field is immutable.
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
@@ -46,29 +54,14 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
return b
}
-// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
-// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
-// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterRoleBindingFrom extracts the applied configuration owned by fieldManager from
+// clusterRoleBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
-// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterRoleBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
- return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
-}
-
-// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
- return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
-}
-
-func extractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
+func ExtractClusterRoleBindingFrom(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +73,21 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fi
b.WithAPIVersion("rbac.authorization.k8s.io/v1")
return b, nil
}
+
+// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
+// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
+// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
+// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return ExtractClusterRoleBindingFrom(clusterRoleBinding, fieldManager, "")
+}
+
func (b ClusterRoleBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go
index a2e66d10..03556b01 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/policyrule.go
@@ -20,11 +20,26 @@ package v1
// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use
// with apply.
+//
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
type PolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ResourceNames []string `json:"resourceNames,omitempty"`
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.
+ Verbs []string `json:"verbs,omitempty"`
+ // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
+ // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // Resources is a list of resources this rule applies to. '*' represents all resources.
+ Resources []string `json:"resources,omitempty"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+ // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
index 1a363eeb..9e2895b1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
@@ -29,10 +29,14 @@ import (
// RoleApplyConfiguration represents a declarative configuration of the Role type for use
// with apply.
+//
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
type RoleApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ // Rules holds all the PolicyRules for this Role
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
// Role constructs a declarative configuration of the Role type for use with
@@ -46,29 +50,14 @@ func Role(name, namespace string) *RoleApplyConfiguration {
return b
}
-// ExtractRole extracts the applied configuration owned by fieldManager from
-// role. If no managedFields are found in role for fieldManager, a
-// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRoleFrom extracts the applied configuration owned by fieldManager from
+// role for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
-// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRoleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRole(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
- return extractRole(role, fieldManager, "")
-}
-
-// ExtractRoleStatus is the same as ExtractRole except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRoleStatus(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
- return extractRole(role, fieldManager, "status")
-}
-
-func extractRole(role *rbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
+func ExtractRoleFrom(role *rbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +70,21 @@ func extractRole(role *rbacv1.Role, fieldManager string, subresource string) (*R
b.WithAPIVersion("rbac.authorization.k8s.io/v1")
return b, nil
}
+
+// ExtractRole extracts the applied configuration owned by fieldManager from
+// role. If no managedFields are found in role for fieldManager, a
+// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
+// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRole(role *rbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return ExtractRoleFrom(role, fieldManager, "")
+}
+
func (b RoleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
index fcda064c..776fb3ed 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
@@ -29,11 +29,20 @@ import (
// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
// with apply.
+//
+// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
+// namespace only have effect in that namespace.
type RoleBindingApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ // Subjects holds references to the objects the role applies to.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ // This field is immutable.
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
@@ -47,29 +56,14 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
return b
}
-// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
-// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
-// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRoleBindingFrom extracts the applied configuration owned by fieldManager from
+// roleBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
-// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRoleBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
- return extractRoleBinding(roleBinding, fieldManager, "")
-}
-
-// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRoleBindingStatus(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
- return extractRoleBinding(roleBinding, fieldManager, "status")
-}
-
-func extractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
+func ExtractRoleBindingFrom(roleBinding *rbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -82,6 +76,21 @@ func extractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string, su
b.WithAPIVersion("rbac.authorization.k8s.io/v1")
return b, nil
}
+
+// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
+// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
+// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
+// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRoleBinding(roleBinding *rbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return ExtractRoleBindingFrom(roleBinding, fieldManager, "")
+}
+
func (b RoleBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go
index 646a3bb1..231fe0dd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/roleref.go
@@ -20,10 +20,15 @@ package v1
// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use
// with apply.
+//
+// RoleRef contains information that points to the role being used
type RoleRefApplyConfiguration struct {
+ // APIGroup is the group for the resource being referenced
APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind is the type of resource being referenced
+ Kind *string `json:"kind,omitempty"`
+ // Name is the name of resource being referenced
+ Name *string `json:"name,omitempty"`
}
// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go
index e1d9c5cf..1914086c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/subject.go
@@ -20,10 +20,21 @@ package v1
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
type SubjectApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- APIGroup *string `json:"apiGroup,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+ // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+ Kind *string `json:"kind,omitempty"`
+ // APIGroup holds the API group of the referenced subject.
+ // Defaults to "" for ServiceAccount subjects.
+ // Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Name of the object being referenced.
+ Name *string `json:"name,omitempty"`
+ // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+ // the Authorizer should report an error.
Namespace *string `json:"namespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go
index ff4aeb59..397d14a1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/aggregationrule.go
@@ -24,7 +24,11 @@ import (
// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
// with apply.
+//
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
type AggregationRuleApplyConfiguration struct {
+ // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+ // If any of the selectors match, then the ClusterRole's permissions will be added
ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
index e0ccc04b..678761e6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
@@ -29,11 +29,19 @@ import (
// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
// with apply.
+//
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22.
type ClusterRoleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
- AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
+ // Rules holds all the PolicyRules for this ClusterRole
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+ // stomped by the controller.
+ AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
}
// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
@@ -46,29 +54,14 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
return b
}
-// ExtractClusterRole extracts the applied configuration owned by fieldManager from
-// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
-// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterRoleFrom extracts the applied configuration owned by fieldManager from
+// clusterRole for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
-// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterRoleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
- return extractClusterRole(clusterRole, fieldManager, "")
-}
-
-// ExtractClusterRoleStatus is the same as ExtractClusterRole except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterRoleStatus(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
- return extractClusterRole(clusterRole, fieldManager, "status")
-}
-
-func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
+func ExtractClusterRoleFrom(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +73,21 @@ func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager stri
b.WithAPIVersion("rbac.authorization.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractClusterRole extracts the applied configuration owned by fieldManager from
+// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
+// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
+// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return ExtractClusterRoleFrom(clusterRole, fieldManager, "")
+}
+
func (b ClusterRoleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
index d7085ae9..f4ee74f1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
@@ -29,11 +29,19 @@ import (
// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
// with apply.
+//
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.22.
type ClusterRoleBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ // Subjects holds references to the objects the role applies to.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // RoleRef can only reference a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
@@ -46,29 +54,14 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
return b
}
-// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
-// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
-// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterRoleBindingFrom extracts the applied configuration owned by fieldManager from
+// clusterRoleBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
-// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterRoleBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
- return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
-}
-
-// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
- return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
-}
-
-func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
+func ExtractClusterRoleBindingFrom(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +73,21 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBindi
b.WithAPIVersion("rbac.authorization.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
+// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
+// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
+// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return ExtractClusterRoleBindingFrom(clusterRoleBinding, fieldManager, "")
+}
+
func (b ClusterRoleBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go
index 89d7a291..f4a6d3ff 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/policyrule.go
@@ -20,11 +20,26 @@ package v1alpha1
// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use
// with apply.
+//
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
type PolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ResourceNames []string `json:"resourceNames,omitempty"`
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.
+ Verbs []string `json:"verbs,omitempty"`
+ // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
+ // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // Resources is a list of resources this rule applies to. '*' represents all resources.
+ Resources []string `json:"resources,omitempty"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+ // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
index 0cefea4f..19b123b2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
@@ -29,10 +29,15 @@ import (
// RoleApplyConfiguration represents a declarative configuration of the Role type for use
// with apply.
+//
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22.
type RoleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ // Rules holds all the PolicyRules for this Role
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
// Role constructs a declarative configuration of the Role type for use with
@@ -46,29 +51,14 @@ func Role(name, namespace string) *RoleApplyConfiguration {
return b
}
-// ExtractRole extracts the applied configuration owned by fieldManager from
-// role. If no managedFields are found in role for fieldManager, a
-// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRoleFrom extracts the applied configuration owned by fieldManager from
+// role for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
-// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRoleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRole(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
- return extractRole(role, fieldManager, "")
-}
-
-// ExtractRoleStatus is the same as ExtractRole except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRoleStatus(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
- return extractRole(role, fieldManager, "status")
-}
-
-func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
+func ExtractRoleFrom(role *rbacv1alpha1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.Role"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +71,21 @@ func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource strin
b.WithAPIVersion("rbac.authorization.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractRole extracts the applied configuration owned by fieldManager from
+// role. If no managedFields are found in role for fieldManager, a
+// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
+// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRole(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return ExtractRoleFrom(role, fieldManager, "")
+}
+
func (b RoleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
index d4078180..116f2937 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
@@ -29,11 +29,20 @@ import (
// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
// with apply.
+//
+// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
+// namespace only have effect in that namespace.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22.
type RoleBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ // Subjects holds references to the objects the role applies to.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
@@ -47,29 +56,14 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
return b
}
-// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
-// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
-// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRoleBindingFrom extracts the applied configuration owned by fieldManager from
+// roleBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
-// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRoleBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
- return extractRoleBinding(roleBinding, fieldManager, "")
-}
-
-// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRoleBindingStatus(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
- return extractRoleBinding(roleBinding, fieldManager, "status")
-}
-
-func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
+func ExtractRoleBindingFrom(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -82,6 +76,21 @@ func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager stri
b.WithAPIVersion("rbac.authorization.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
+// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
+// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
+// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return ExtractRoleBindingFrom(roleBinding, fieldManager, "")
+}
+
func (b RoleBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go
index 4b255311..a02a36be 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/roleref.go
@@ -20,10 +20,15 @@ package v1alpha1
// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use
// with apply.
+//
+// RoleRef contains information that points to the role being used
type RoleRefApplyConfiguration struct {
+ // APIGroup is the group for the resource being referenced
APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind is the type of resource being referenced
+ Kind *string `json:"kind,omitempty"`
+ // Name is the name of resource being referenced
+ Name *string `json:"name,omitempty"`
}
// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go
index 665b42af..d24f865d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/subject.go
@@ -20,11 +20,22 @@ package v1alpha1
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
type SubjectApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
+ // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+ // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+ Kind *string `json:"kind,omitempty"`
+ // APIVersion holds the API group and version of the referenced subject.
+ // Defaults to "v1" for ServiceAccount subjects.
+ // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects.
APIVersion *string `json:"apiVersion,omitempty"`
- Name *string `json:"name,omitempty"`
- Namespace *string `json:"namespace,omitempty"`
+ // Name of the object being referenced.
+ Name *string `json:"name,omitempty"`
+ // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+ // the Authorizer should report an error.
+ Namespace *string `json:"namespace,omitempty"`
}
// SubjectApplyConfiguration constructs a declarative configuration of the Subject type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go
index e9bb68dc..b4c6bd5a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/aggregationrule.go
@@ -24,7 +24,11 @@ import (
// AggregationRuleApplyConfiguration represents a declarative configuration of the AggregationRule type for use
// with apply.
+//
+// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole
type AggregationRuleApplyConfiguration struct {
+ // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules.
+ // If any of the selectors match, then the ClusterRole's permissions will be added
ClusterRoleSelectors []v1.LabelSelectorApplyConfiguration `json:"clusterRoleSelectors,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
index 6fe51e22..a105df5f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
@@ -29,11 +29,19 @@ import (
// ClusterRoleApplyConfiguration represents a declarative configuration of the ClusterRole type for use
// with apply.
+//
+// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.22.
type ClusterRoleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
- AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
+ // Rules holds all the PolicyRules for this ClusterRole
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
+ // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be
+ // stomped by the controller.
+ AggregationRule *AggregationRuleApplyConfiguration `json:"aggregationRule,omitempty"`
}
// ClusterRole constructs a declarative configuration of the ClusterRole type for use with
@@ -46,29 +54,14 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
return b
}
-// ExtractClusterRole extracts the applied configuration owned by fieldManager from
-// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
-// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterRoleFrom extracts the applied configuration owned by fieldManager from
+// clusterRole for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
-// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterRoleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
- return extractClusterRole(clusterRole, fieldManager, "")
-}
-
-// ExtractClusterRoleStatus is the same as ExtractClusterRole except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterRoleStatus(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
- return extractClusterRole(clusterRole, fieldManager, "status")
-}
-
-func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
+func ExtractClusterRoleFrom(clusterRole *rbacv1beta1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +73,21 @@ func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager strin
b.WithAPIVersion("rbac.authorization.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractClusterRole extracts the applied configuration owned by fieldManager from
+// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
+// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
+// ExtractClusterRole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return ExtractClusterRoleFrom(clusterRole, fieldManager, "")
+}
+
func (b ClusterRoleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
index e75ab7a8..c519c6e0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
@@ -29,11 +29,19 @@ import (
// ClusterRoleBindingApplyConfiguration represents a declarative configuration of the ClusterRoleBinding type for use
// with apply.
+//
+// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace,
+// and adds who information via Subject.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.22.
type ClusterRoleBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ // Subjects holds references to the objects the role applies to.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // RoleRef can only reference a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// ClusterRoleBinding constructs a declarative configuration of the ClusterRoleBinding type for use with
@@ -46,29 +54,14 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
return b
}
-// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
-// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
-// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractClusterRoleBindingFrom extracts the applied configuration owned by fieldManager from
+// clusterRoleBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
-// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractClusterRoleBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
- return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
-}
-
-// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
- return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
-}
-
-func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
+func ExtractClusterRoleBindingFrom(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +73,21 @@ func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBindin
b.WithAPIVersion("rbac.authorization.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
+// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
+// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
+// ExtractClusterRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return ExtractClusterRoleBindingFrom(clusterRoleBinding, fieldManager, "")
+}
+
func (b ClusterRoleBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go
index dc630df2..bf3a5a50 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/policyrule.go
@@ -20,11 +20,27 @@ package v1beta1
// PolicyRuleApplyConfiguration represents a declarative configuration of the PolicyRule type for use
// with apply.
+//
+// Authorization is calculated against
+// 1. evaluation of ClusterRoleBindings - short circuit on match
+// 2. evaluation of RoleBindings in the namespace requested - short circuit on match
+// 3. deny by default
+// PolicyRule holds information that describes a policy rule, but does not contain information
+// about who the rule applies to or which namespace the rule applies to.
type PolicyRuleApplyConfiguration struct {
- Verbs []string `json:"verbs,omitempty"`
- APIGroups []string `json:"apiGroups,omitempty"`
- Resources []string `json:"resources,omitempty"`
- ResourceNames []string `json:"resourceNames,omitempty"`
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.
+ Verbs []string `json:"verbs,omitempty"`
+ // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
+ // the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups.
+ APIGroups []string `json:"apiGroups,omitempty"`
+ // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups.
+ // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.
+ Resources []string `json:"resources,omitempty"`
+ // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
+ ResourceNames []string `json:"resourceNames,omitempty"`
+ // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path
+ // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
+ // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both.
NonResourceURLs []string `json:"nonResourceURLs,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
index 7a628b95..d0a4c20c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
@@ -29,10 +29,15 @@ import (
// RoleApplyConfiguration represents a declarative configuration of the Role type for use
// with apply.
+//
+// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.22.
type RoleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
+ // Rules holds all the PolicyRules for this Role
+ Rules []PolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
// Role constructs a declarative configuration of the Role type for use with
@@ -46,29 +51,14 @@ func Role(name, namespace string) *RoleApplyConfiguration {
return b
}
-// ExtractRole extracts the applied configuration owned by fieldManager from
-// role. If no managedFields are found in role for fieldManager, a
-// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRoleFrom extracts the applied configuration owned by fieldManager from
+// role for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
-// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRoleFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRole(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
- return extractRole(role, fieldManager, "")
-}
-
-// ExtractRoleStatus is the same as ExtractRole except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRoleStatus(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
- return extractRole(role, fieldManager, "status")
-}
-
-func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
+func ExtractRoleFrom(role *rbacv1beta1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1beta1.Role"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +71,21 @@ func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string
b.WithAPIVersion("rbac.authorization.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractRole extracts the applied configuration owned by fieldManager from
+// role. If no managedFields are found in role for fieldManager, a
+// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
+// ExtractRole provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRole(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return ExtractRoleFrom(role, fieldManager, "")
+}
+
func (b RoleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
index be180c3f..64669c10 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
@@ -29,11 +29,20 @@ import (
// RoleBindingApplyConfiguration represents a declarative configuration of the RoleBinding type for use
// with apply.
+//
+// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace.
+// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given
+// namespace only have effect in that namespace.
+// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.22.
type RoleBindingApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
- RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
+ // Subjects holds references to the objects the role applies to.
+ Subjects []SubjectApplyConfiguration `json:"subjects,omitempty"`
+ // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace.
+ // If the RoleRef cannot be resolved, the Authorizer must return an error.
+ RoleRef *RoleRefApplyConfiguration `json:"roleRef,omitempty"`
}
// RoleBinding constructs a declarative configuration of the RoleBinding type for use with
@@ -47,29 +56,14 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
return b
}
-// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
-// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
-// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractRoleBindingFrom extracts the applied configuration owned by fieldManager from
+// roleBinding for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
-// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractRoleBindingFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
- return extractRoleBinding(roleBinding, fieldManager, "")
-}
-
-// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractRoleBindingStatus(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
- return extractRoleBinding(roleBinding, fieldManager, "status")
-}
-
-func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
+func ExtractRoleBindingFrom(roleBinding *rbacv1beta1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
@@ -82,6 +76,21 @@ func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager strin
b.WithAPIVersion("rbac.authorization.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
+// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
+// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
+// ExtractRoleBinding provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return ExtractRoleBindingFrom(roleBinding, fieldManager, "")
+}
+
func (b RoleBindingApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go
index 19d0420a..350930af 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/roleref.go
@@ -20,10 +20,15 @@ package v1beta1
// RoleRefApplyConfiguration represents a declarative configuration of the RoleRef type for use
// with apply.
+//
+// RoleRef contains information that points to the role being used
type RoleRefApplyConfiguration struct {
+ // APIGroup is the group for the resource being referenced
APIGroup *string `json:"apiGroup,omitempty"`
- Kind *string `json:"kind,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind is the type of resource being referenced
+ Kind *string `json:"kind,omitempty"`
+ // Name is the name of resource being referenced
+ Name *string `json:"name,omitempty"`
}
// RoleRefApplyConfiguration constructs a declarative configuration of the RoleRef type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go
index f7c1a21a..8abbaa9f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/subject.go
@@ -20,10 +20,21 @@ package v1beta1
// SubjectApplyConfiguration represents a declarative configuration of the Subject type for use
// with apply.
+//
+// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
+// or a value for non-objects such as user and group names.
type SubjectApplyConfiguration struct {
- Kind *string `json:"kind,omitempty"`
- APIGroup *string `json:"apiGroup,omitempty"`
- Name *string `json:"name,omitempty"`
+ // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
+ // If the Authorizer does not recognized the kind value, the Authorizer should report an error.
+ Kind *string `json:"kind,omitempty"`
+ // APIGroup holds the API group of the referenced subject.
+ // Defaults to "" for ServiceAccount subjects.
+ // Defaults to "rbac.authorization.k8s.io" for User and Group subjects.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Name of the object being referenced.
+ Name *string `json:"name,omitempty"`
+ // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty
+ // the Authorizer should report an error.
Namespace *string `json:"namespace,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocateddevicestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocateddevicestatus.go
index 2c2c4156..3b04727e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocateddevicestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocateddevicestatus.go
@@ -25,13 +25,42 @@ import (
// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use
// with apply.
+//
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+//
+// The combination of Driver, Pool, Device, and ShareID must match the corresponding key
+// in Status.Allocation.Devices.
type AllocatedDeviceStatusApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- ShareID *string `json:"shareID,omitempty"`
- Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ Pool *string `json:"pool,omitempty"`
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ Device *string `json:"device,omitempty"`
+ // ShareID uniquely identifies an individual allocation share of the device.
+ ShareID *string `json:"shareID,omitempty"`
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // Must not contain more than 8 entries.
+ Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ // NetworkData contains network-related information specific to the device.
NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocationresult.go
index b536e49d..83f0b968 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/allocationresult.go
@@ -25,10 +25,20 @@ import (
// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use
// with apply.
+//
+// AllocationResult contains attributes of an allocated resource.
type AllocationResultApplyConfiguration struct {
- Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
- NodeSelector *corev1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllocationTimestamp *metav1.Time `json:"allocationTimestamp,omitempty"`
+ // Devices is the result of allocating devices.
+ Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
+ // NodeSelector defines where the allocated resources are available. If
+ // unset, they are available everywhere.
+ NodeSelector *corev1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllocationTimestamp stores the time when the resources were allocated.
+ // This field is not guaranteed to be set, in which case that time is unknown.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gate.
+ AllocationTimestamp *metav1.Time `json:"allocationTimestamp,omitempty"`
}
// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicy.go
index 2c016efa..43bd7e01 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicy.go
@@ -24,10 +24,39 @@ import (
// CapacityRequestPolicyApplyConfiguration represents a declarative configuration of the CapacityRequestPolicy type for use
// with apply.
+//
+// CapacityRequestPolicy defines how requests consume device capacity.
+//
+// Must not set more than one ValidRequestValues.
type CapacityRequestPolicyApplyConfiguration struct {
- Default *resource.Quantity `json:"default,omitempty"`
- ValidValues []resource.Quantity `json:"validValues,omitempty"`
- ValidRange *CapacityRequestPolicyRangeApplyConfiguration `json:"validRange,omitempty"`
+ // Default specifies how much of this capacity is consumed by a request
+ // that does not contain an entry for it in DeviceRequest's Capacity.
+ Default *resource.Quantity `json:"default,omitempty"`
+ // ValidValues defines a set of acceptable quantity values in consuming requests.
+ //
+ // Must not contain more than 10 entries.
+ // Must be sorted in ascending order.
+ //
+ // If this field is set,
+ // Default must be defined and it must be included in ValidValues list.
+ //
+ // If the requested amount does not match any valid value but smaller than some valid values,
+ // the scheduler calculates the smallest valid value that is greater than or equal to the request.
+ // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues).
+ //
+ // If the requested amount exceeds all valid values, the request violates the policy,
+ // and this device cannot be allocated.
+ ValidValues []resource.Quantity `json:"validValues,omitempty"`
+ // ValidRange defines an acceptable quantity value range in consuming requests.
+ //
+ // If this field is set,
+ // Default must be defined and it must fall within the defined ValidRange.
+ //
+ // If the requested amount does not fall within the defined range, the request violates the policy,
+ // and this device cannot be allocated.
+ //
+ // If the request doesn't contain this capacity entry, Default value is used.
+ ValidRange *CapacityRequestPolicyRangeApplyConfiguration `json:"validRange,omitempty"`
}
// CapacityRequestPolicyApplyConfiguration constructs a declarative configuration of the CapacityRequestPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicyrange.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicyrange.go
index 6f486b48..b3bade61 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicyrange.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequestpolicyrange.go
@@ -24,9 +24,31 @@ import (
// CapacityRequestPolicyRangeApplyConfiguration represents a declarative configuration of the CapacityRequestPolicyRange type for use
// with apply.
+//
+// CapacityRequestPolicyRange defines a valid range for consumable capacity values.
+//
+// - If the requested amount is less than Min, it is rounded up to the Min value.
+// - If Step is set and the requested amount is between Min and Max but not aligned with Step,
+// it will be rounded up to the next value equal to Min + (n * Step).
+// - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set).
+// - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy,
+// and the device cannot be allocated.
type CapacityRequestPolicyRangeApplyConfiguration struct {
- Min *resource.Quantity `json:"min,omitempty"`
- Max *resource.Quantity `json:"max,omitempty"`
+ // Min specifies the minimum capacity allowed for a consumption request.
+ //
+ // Min must be greater than or equal to zero,
+ // and less than or equal to the capacity value.
+ // requestPolicy.default must be more than or equal to the minimum.
+ Min *resource.Quantity `json:"min,omitempty"`
+ // Max defines the upper limit for capacity that can be requested.
+ //
+ // Max must be less than or equal to the capacity value.
+ // Min and requestPolicy.default must be less than or equal to the maximum.
+ Max *resource.Quantity `json:"max,omitempty"`
+ // Step defines the step size between valid capacity amounts within the range.
+ //
+ // Max (if set) and requestPolicy.default must be a multiple of Step.
+ // Min + Step must be less than or equal to the capacity value.
Step *resource.Quantity `json:"step,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequirements.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequirements.go
index b6143efa..83d321cd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequirements.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/capacityrequirements.go
@@ -25,7 +25,31 @@ import (
// CapacityRequirementsApplyConfiguration represents a declarative configuration of the CapacityRequirements type for use
// with apply.
+//
+// CapacityRequirements defines the capacity requirements for a specific device request.
type CapacityRequirementsApplyConfiguration struct {
+ // Requests represent individual device resource requests for distinct resources,
+ // all of which must be provided by the device.
+ //
+ // This value is used as an additional filtering condition against the available capacity on the device.
+ // This is semantically equivalent to a CEL selector with
+ // `device.capacity[]..compareTo(quantity()) >= 0`.
+ // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0.
+ //
+ // When a requestPolicy is defined, the requested amount is adjusted upward
+ // to the nearest valid value based on the policy.
+ // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows—
+ // the device is considered ineligible for allocation.
+ //
+ // For any capacity that is not explicitly requested:
+ // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity
+ // (i.e., the whole device is claimed).
+ // - If a requestPolicy is set, the default consumed capacity is determined according to that policy.
+ //
+ // If the device allows multiple allocation,
+ // the aggregated amount across all requests must not exceed the capacity value.
+ // The consumed capacity, which may be adjusted based on the requestPolicy if defined,
+ // is recorded in the resource claim’s status.devices[*].consumedCapacity field.
Requests map[resourcev1.QualifiedName]resource.Quantity `json:"requests,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/celdeviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/celdeviceselector.go
index 4d1e8ecb..5a77aac9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/celdeviceselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/celdeviceselector.go
@@ -20,7 +20,61 @@ package v1
// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use
// with apply.
+//
+// CELDeviceSelector contains a CEL expression for selecting a device.
type CELDeviceSelectorApplyConfiguration struct {
+ // Expression is a CEL expression which evaluates a single device. It
+ // must evaluate to true when the device under consideration satisfies
+ // the desired criteria, and false when it does not. Any other result
+ // is an error and causes allocation of devices to abort.
+ //
+ // The expression's input is an object named "device", which carries
+ // the following properties:
+ // - driver (string): the name of the driver which defines this device.
+ // - attributes (map[string]object): the device's attributes, grouped by prefix
+ // (e.g. device.attributes["dra.example.com"] evaluates to an object with all
+ // of the attributes which were prefixed by "dra.example.com".
+ // - capacity (map[string]object): the device's capacities, grouped by prefix.
+ // - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device
+ // (v1.34+ with the DRAConsumableCapacity feature enabled).
+ //
+ // Example: Consider a device with driver="dra.example.com", which exposes
+ // two attributes named "model" and "ext.example.com/family" and which
+ // exposes one capacity named "modules". This input to this expression
+ // would have the following fields:
+ //
+ // device.driver
+ // device.attributes["dra.example.com"].model
+ // device.attributes["ext.example.com"].family
+ // device.capacity["dra.example.com"].modules
+ //
+ // The device.driver field can be used to check for a specific driver,
+ // either as a high-level precondition (i.e. you only want to consider
+ // devices from this driver) or as part of a multi-clause expression
+ // that is meant to consider devices from different drivers.
+ //
+ // The value type of each attribute is defined by the device
+ // definition, and users who write these expressions must consult the
+ // documentation for their specific drivers. The value type of each
+ // capacity is Quantity.
+ //
+ // If an unknown prefix is used as a lookup in either device.attributes
+ // or device.capacity, an empty map will be returned. Any reference to
+ // an unknown field will cause an evaluation error and allocation to
+ // abort.
+ //
+ // A robust expression should check for the existence of attributes
+ // before referencing them.
+ //
+ // For ease of use, the cel.bind() function is enabled, and can be used
+ // to simplify expressions that access multiple attributes with the
+ // same domain. For example:
+ //
+ // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
+ //
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counter.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counter.go
index 92ec63bb..161b8e16 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counter.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counter.go
@@ -24,7 +24,10 @@ import (
// CounterApplyConfiguration represents a declarative configuration of the Counter type for use
// with apply.
+//
+// Counter describes a quantity associated with a device.
type CounterApplyConfiguration struct {
+ // Value defines how much of a certain device counter is available.
Value *resource.Quantity `json:"value,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counterset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counterset.go
index 3a5d2863..08748990 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counterset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/counterset.go
@@ -20,8 +20,23 @@ package v1
// CounterSetApplyConfiguration represents a declarative configuration of the CounterSet type for use
// with apply.
+//
+// CounterSet defines a named set of counters
+// that are available to be used by devices defined in the
+// ResourcePool.
+//
+// The counters are not allocatable by themselves, but
+// can be referenced by devices. When a device is allocated,
+// the portion of counters it uses will no longer be available for use
+// by other devices.
type CounterSetApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name defines the name of the counter set.
+ // It must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Counters defines the set of counters for this CounterSet
+ // The name of each counter must be unique in that set and must be a DNS label.
+ //
+ // The maximum number of counters is 32.
Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/device.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/device.go
index 2b6b5bfe..ca50f602 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/device.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/device.go
@@ -25,19 +25,94 @@ import (
// DeviceApplyConfiguration represents a declarative configuration of the Device type for use
// with apply.
+//
+// Device represents one individual hardware instance that can be selected based
+// on its attributes. Besides the name, exactly one field must be set.
type DeviceApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Attributes map[resourcev1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
- Capacity map[resourcev1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
- ConsumesCounters []DeviceCounterConsumptionApplyConfiguration `json:"consumesCounters,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- NodeSelector *corev1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllNodes *bool `json:"allNodes,omitempty"`
- Taints []DeviceTaintApplyConfiguration `json:"taints,omitempty"`
- BindsToNode *bool `json:"bindsToNode,omitempty"`
- BindingConditions []string `json:"bindingConditions,omitempty"`
- BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
- AllowMultipleAllocations *bool `json:"allowMultipleAllocations,omitempty"`
+ // Name is unique identifier among all devices managed by
+ // the driver in the pool. It must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Attributes defines the set of attributes for this device.
+ // The name of each attribute must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ Attributes map[resourcev1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
+ // Capacity defines the set of capacities for this device.
+ // The name of each capacity must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ Capacity map[resourcev1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
+ // ConsumesCounters defines a list of references to sharedCounters
+ // and the set of counters that the device will
+ // consume from those counter sets.
+ //
+ // There can only be a single entry per counterSet.
+ //
+ // The maximum number of device counter consumptions per
+ // device is 2.
+ ConsumesCounters []DeviceCounterConsumptionApplyConfiguration `json:"consumesCounters,omitempty"`
+ // NodeName identifies the node where the device is available.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ NodeName *string `json:"nodeName,omitempty"`
+ // NodeSelector defines the nodes where the device is available.
+ //
+ // Must use exactly one term.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ NodeSelector *corev1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllNodes indicates that all nodes have access to the device.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ AllNodes *bool `json:"allNodes,omitempty"`
+ // If specified, these are the driver-defined taints.
+ //
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Taints []DeviceTaintApplyConfiguration `json:"taints,omitempty"`
+ // BindsToNode indicates if the usage of an allocation involving this device
+ // has to be limited to exactly the node that was chosen when allocating the claim.
+ // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector
+ // to match the node where the allocation was made.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindsToNode *bool `json:"bindsToNode,omitempty"`
+ // BindingConditions defines the conditions for proceeding with binding.
+ // All of these conditions must be set in the per-device status
+ // conditions with a value of True to proceed with binding the pod to the node
+ // while scheduling the pod.
+ //
+ // The maximum number of binding conditions is 4.
+ //
+ // The conditions must be a valid condition type string.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingConditions []string `json:"bindingConditions,omitempty"`
+ // BindingFailureConditions defines the conditions for binding failure.
+ // They may be set in the per-device status conditions.
+ // If any is set to "True", a binding failure occurred.
+ //
+ // The maximum number of binding failure conditions is 4.
+ //
+ // The conditions must be a valid condition type string.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
+ // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
+ //
+ // If AllowMultipleAllocations is set to true, the device can be allocated more than once,
+ // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not.
+ AllowMultipleAllocations *bool `json:"allowMultipleAllocations,omitempty"`
}
// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationconfiguration.go
index f1d009cc..29e1ba42 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationconfiguration.go
@@ -24,9 +24,20 @@ import (
// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use
// with apply.
+//
+// DeviceAllocationConfiguration gets embedded in an AllocationResult.
type DeviceAllocationConfigurationApplyConfiguration struct {
- Source *resourcev1.AllocationConfigSource `json:"source,omitempty"`
- Requests []string `json:"requests,omitempty"`
+ // Source records whether the configuration comes from a class and thus
+ // is not something that a normal user would have been able to set
+ // or from a claim.
+ Source *resourcev1.AllocationConfigSource `json:"source,omitempty"`
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, its applies to all requests.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
+ Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationresult.go
index e95e45f2..911f3a63 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceallocationresult.go
@@ -20,9 +20,19 @@ package v1
// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use
// with apply.
+//
+// DeviceAllocationResult is the result of allocating devices.
type DeviceAllocationResultApplyConfiguration struct {
+ // Results lists all allocated devices.
Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"`
- Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
+ // This field is a combination of all the claim and class configuration parameters.
+ // Drivers can distinguish between those based on a flag.
+ //
+ // This includes configuration parameters for drivers which have no allocated
+ // devices in the result because it is up to the drivers which configuration
+ // parameters they support. They can silently ignore unknown configuration
+ // parameters.
+ Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceattribute.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceattribute.go
index c2e5829a..e41696ab 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceattribute.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceattribute.go
@@ -20,10 +20,17 @@ package v1
// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use
// with apply.
+//
+// DeviceAttribute must have exactly one field set.
type DeviceAttributeApplyConfiguration struct {
- IntValue *int64 `json:"int,omitempty"`
- BoolValue *bool `json:"bool,omitempty"`
- StringValue *string `json:"string,omitempty"`
+ // IntValue is a number.
+ IntValue *int64 `json:"int,omitempty"`
+ // BoolValue is a true/false value.
+ BoolValue *bool `json:"bool,omitempty"`
+ // StringValue is a string. Must not be longer than 64 characters.
+ StringValue *string `json:"string,omitempty"`
+ // VersionValue is a semantic version according to semver.org spec 2.0.0.
+ // Must not be longer than 64 characters.
VersionValue *string `json:"version,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecapacity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecapacity.go
index 769b9cbc..322ca55a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecapacity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecapacity.go
@@ -24,8 +24,24 @@ import (
// DeviceCapacityApplyConfiguration represents a declarative configuration of the DeviceCapacity type for use
// with apply.
+//
+// DeviceCapacity describes a quantity associated with a device.
type DeviceCapacityApplyConfiguration struct {
- Value *resource.Quantity `json:"value,omitempty"`
+ // Value defines how much of a certain capacity that device has.
+ //
+ // This field reflects the fixed total capacity and does not change.
+ // The consumed amount is tracked separately by scheduler
+ // and does not affect this value.
+ Value *resource.Quantity `json:"value,omitempty"`
+ // RequestPolicy defines how this DeviceCapacity must be consumed
+ // when the device is allowed to be shared by multiple allocations.
+ //
+ // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy.
+ //
+ // If unset, capacity requests are unconstrained:
+ // requests can consume any amount of capacity, as long as the total consumed
+ // across all allocations does not exceed the device's defined capacity.
+ // If request is also unset, default is the full capacity value.
RequestPolicy *CapacityRequestPolicyApplyConfiguration `json:"requestPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaim.go
index 8297805f..1b85b4fa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaim.go
@@ -20,10 +20,19 @@ package v1
// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use
// with apply.
+//
+// DeviceClaim defines how to request devices with a ResourceClaim.
type DeviceClaimApplyConfiguration struct {
- Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
- Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
- Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
+ // Requests represent individual requests for distinct devices which
+ // must all be satisfied. If empty, nothing needs to be allocated.
+ Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
+ // These constraints must be satisfied by the set of devices that get
+ // allocated for the claim.
+ Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
+ // This field holds configuration for multiple potential drivers which
+ // could satisfy requests in this claim. It is ignored while allocating
+ // the claim.
+ Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaimconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaimconfiguration.go
index a5bae3bf..402cd877 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaimconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclaimconfiguration.go
@@ -20,7 +20,15 @@ package v1
// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use
// with apply.
+//
+// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
type DeviceClaimConfigurationApplyConfiguration struct {
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, it applies to all requests.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclass.go
index 2c9ed5bb..6d3bc7e7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclass.go
@@ -29,10 +29,27 @@ import (
// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use
// with apply.
+//
+// DeviceClass is a vendor- or admin-provided resource that contains
+// device configuration and selectors. It can be referenced in
+// the device requests of a claim to apply these presets.
+// Cluster scoped.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type DeviceClassApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec defines what can be allocated and how to configure it.
+ //
+ // This is mutable. Consumers have to be prepared for classes changing
+ // at any time, either because they get updated or replaced. Claim
+ // allocations are done once based on whatever was set in classes at
+ // the time of allocation.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// DeviceClass constructs a declarative configuration of the DeviceClass type for use with
@@ -45,29 +62,14 @@ func DeviceClass(name string) *DeviceClassApplyConfiguration {
return b
}
-// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
-// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
-// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractDeviceClassFrom extracts the applied configuration owned by fieldManager from
+// deviceClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
-// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractDeviceClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractDeviceClass(deviceClass *resourcev1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
- return extractDeviceClass(deviceClass, fieldManager, "")
-}
-
-// ExtractDeviceClassStatus is the same as ExtractDeviceClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractDeviceClassStatus(deviceClass *resourcev1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
- return extractDeviceClass(deviceClass, fieldManager, "status")
-}
-
-func extractDeviceClass(deviceClass *resourcev1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
+func ExtractDeviceClassFrom(deviceClass *resourcev1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
b := &DeviceClassApplyConfiguration{}
err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1.DeviceClass"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +81,21 @@ func extractDeviceClass(deviceClass *resourcev1.DeviceClass, fieldManager string
b.WithAPIVersion("resource.k8s.io/v1")
return b, nil
}
+
+// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
+// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
+// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
+// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeviceClass(deviceClass *resourcev1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
+ return ExtractDeviceClassFrom(deviceClass, fieldManager, "")
+}
+
func (b DeviceClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassconfiguration.go
index 73d7e15a..c47fd9f8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassconfiguration.go
@@ -20,6 +20,8 @@ package v1
// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use
// with apply.
+//
+// DeviceClassConfiguration is used in DeviceClass.
type DeviceClassConfigurationApplyConfiguration struct {
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassspec.go
index 09500361..a304c7d8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceclassspec.go
@@ -20,10 +20,29 @@ package v1
// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use
// with apply.
+//
+// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
+// and how to configure it.
type DeviceClassSpecApplyConfiguration struct {
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
- ExtendedResourceName *string `json:"extendedResourceName,omitempty"`
+ // Each selector must be satisfied by a device which is claimed via this class.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // Config defines configuration parameters that apply to each device that is claimed via this class.
+ // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
+ // configuration applies to exactly one driver.
+ //
+ // They are passed to the driver, but are not considered while allocating the claim.
+ Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
+ // ExtendedResourceName is the extended resource name for the devices of this class.
+ // The devices of this class can be used to satisfy a pod's extended resource requests.
+ // It has the same format as the name of a pod's extended resource.
+ // It should be unique among all the device classes in a cluster.
+ // If two device classes have the same name, then the class created later
+ // is picked to satisfy a pod's extended resource requests.
+ // If two classes are created at the same time, then the name of the class
+ // lexicographically sorted first is picked.
+ //
+ // This is an alpha field.
+ ExtendedResourceName *string `json:"extendedResourceName,omitempty"`
}
// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconfiguration.go
index 7f4b88a3..c96749b0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconfiguration.go
@@ -20,7 +20,12 @@ package v1
// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use
// with apply.
+//
+// DeviceConfiguration must have exactly one field set. It gets embedded
+// inline in some other structs which have other fields, so field names must
+// not conflict with those.
type DeviceConfigurationApplyConfiguration struct {
+ // Opaque provides driver-specific configuration parameters.
Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconstraint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconstraint.go
index 1942f03f..cd2467e6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconstraint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceconstraint.go
@@ -24,9 +24,42 @@ import (
// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use
// with apply.
+//
+// DeviceConstraint must have exactly one field set besides Requests.
type DeviceConstraintApplyConfiguration struct {
- Requests []string `json:"requests,omitempty"`
- MatchAttribute *resourcev1.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ // Requests is a list of the one or more requests in this claim which
+ // must co-satisfy this constraint. If a request is fulfilled by
+ // multiple devices, then all of the devices must satisfy the
+ // constraint. If this is not specified, this constraint applies to all
+ // requests in this claim.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the constraint applies to all subrequests.
+ Requests []string `json:"requests,omitempty"`
+ // MatchAttribute requires that all devices in question have this
+ // attribute and that its type and value are the same across those
+ // devices.
+ //
+ // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
+ // then only devices in the same NUMA node will be chosen. A device which
+ // does not have that attribute will not be chosen. All devices should
+ // use a value of the same type for this attribute because that is part of
+ // its specification, but if one device doesn't, then it also will not be
+ // chosen.
+ //
+ // Must include the domain qualifier.
+ MatchAttribute *resourcev1.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ // DistinctAttribute requires that all devices in question have this
+ // attribute and that its type and value are unique across those devices.
+ //
+ // This acts as the inverse of MatchAttribute.
+ //
+ // This constraint is used to avoid allocating multiple requests to the same device
+ // by ensuring attribute-level differentiation.
+ //
+ // This is useful for scenarios where resource requests must be fulfilled by separate physical devices.
+ // For example, a container requests two network interfaces that must be allocated from two different physical NICs.
DistinctAttribute *resourcev1.FullyQualifiedName `json:"distinctAttribute,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecounterconsumption.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecounterconsumption.go
index 6377d004..9bcd936d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecounterconsumption.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicecounterconsumption.go
@@ -20,9 +20,17 @@ package v1
// DeviceCounterConsumptionApplyConfiguration represents a declarative configuration of the DeviceCounterConsumption type for use
// with apply.
+//
+// DeviceCounterConsumption defines a set of counters that
+// a device will consume from a CounterSet.
type DeviceCounterConsumptionApplyConfiguration struct {
- CounterSet *string `json:"counterSet,omitempty"`
- Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
+ // CounterSet is the name of the set from which the
+ // counters defined will be consumed.
+ CounterSet *string `json:"counterSet,omitempty"`
+ // Counters defines the counters that will be consumed by the device.
+ //
+ // The maximum number of counters is 32.
+ Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
}
// DeviceCounterConsumptionApplyConfiguration constructs a declarative configuration of the DeviceCounterConsumption type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequest.go
index a17ecfc6..57836ef3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequest.go
@@ -20,10 +20,42 @@ package v1
// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use
// with apply.
+//
+// DeviceRequest is a request for devices required for a claim.
+// This is typically a request for a single resource like a device, but can
+// also ask for several identical devices. With FirstAvailable it is also
+// possible to provide a prioritized list of requests.
type DeviceRequestApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Exactly *ExactDeviceRequestApplyConfiguration `json:"exactly,omitempty"`
- FirstAvailable []DeviceSubRequestApplyConfiguration `json:"firstAvailable,omitempty"`
+ // Name can be used to reference this request in a pod.spec.containers[].resources.claims
+ // entry and in a constraint of the claim.
+ //
+ // References using the name in the DeviceRequest will uniquely
+ // identify a request when the Exactly field is set. When the
+ // FirstAvailable field is set, a reference to the name of the
+ // DeviceRequest will match whatever subrequest is chosen by the
+ // scheduler.
+ //
+ // Must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Exactly specifies the details for a single request that must
+ // be met exactly for the request to be satisfied.
+ //
+ // One of Exactly or FirstAvailable must be set.
+ Exactly *ExactDeviceRequestApplyConfiguration `json:"exactly,omitempty"`
+ // FirstAvailable contains subrequests, of which exactly one will be
+ // selected by the scheduler. It tries to
+ // satisfy them in the order in which they are listed here. So if
+ // there are two entries in the list, the scheduler will only check
+ // the second one if it determines that the first one can not be used.
+ //
+ // DRA does not yet implement scoring, so the scheduler will
+ // select the first set of devices that satisfies all the
+ // requests in the claim. And if the requirements can
+ // be satisfied on more than one node, other scheduling features
+ // will determine which node is chosen. This means that the set of
+ // devices allocated to a claim might not be the optimal set
+ // available to the cluster. Scoring will be implemented later.
+ FirstAvailable []DeviceSubRequestApplyConfiguration `json:"firstAvailable,omitempty"`
}
// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequestallocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequestallocationresult.go
index e9f49aa7..8b38fd28 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequestallocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicerequestallocationresult.go
@@ -26,17 +26,75 @@ import (
// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use
// with apply.
+//
+// DeviceRequestAllocationResult contains the allocation result for one request.
type DeviceRequestAllocationResultApplyConfiguration struct {
- Request *string `json:"request,omitempty"`
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- AdminAccess *bool `json:"adminAccess,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- BindingConditions []string `json:"bindingConditions,omitempty"`
- BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
- ShareID *types.UID `json:"shareID,omitempty"`
- ConsumedCapacity map[resourcev1.QualifiedName]resource.Quantity `json:"consumedCapacity,omitempty"`
+ // Request is the name of the request in the claim which caused this
+ // device to be allocated. If it references a subrequest in the
+ // firstAvailable list on a DeviceRequest, this field must
+ // include both the name of the main request and the subrequest
+ // using the format /.
+ //
+ // Multiple devices may have been allocated per request.
+ Request *string `json:"request,omitempty"`
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ Pool *string `json:"pool,omitempty"`
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ Device *string `json:"device,omitempty"`
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+ // A copy of all tolerations specified in the request at the time
+ // when the device got allocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // BindingConditions contains a copy of the BindingConditions
+ // from the corresponding ResourceSlice at the time of allocation.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingConditions []string `json:"bindingConditions,omitempty"`
+ // BindingFailureConditions contains a copy of the BindingFailureConditions
+ // from the corresponding ResourceSlice at the time of allocation.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
+ // ShareID uniquely identifies an individual allocation share of the device,
+ // used when the device supports multiple simultaneous allocations.
+ // It serves as an additional map key to differentiate concurrent shares
+ // of the same device.
+ ShareID *types.UID `json:"shareID,omitempty"`
+ // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
+ // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid
+ // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount).
+ //
+ // The total consumed capacity for each device must not exceed the DeviceCapacity's Value.
+ //
+ // This field is populated only for devices that allow multiple allocations.
+ // All capacity entries are included, even if the consumed amount is zero.
+ ConsumedCapacity map[resourcev1.QualifiedName]resource.Quantity `json:"consumedCapacity,omitempty"`
}
// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceselector.go
index 0426206a..ba43ea22 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/deviceselector.go
@@ -20,7 +20,10 @@ package v1
// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use
// with apply.
+//
+// DeviceSelector must have exactly one field set.
type DeviceSelectorApplyConfiguration struct {
+ // CEL contains a CEL expression for selecting a device.
CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicesubrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicesubrequest.go
index 4d5df312..f3ee27af 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicesubrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicesubrequest.go
@@ -24,14 +24,91 @@ import (
// DeviceSubRequestApplyConfiguration represents a declarative configuration of the DeviceSubRequest type for use
// with apply.
+//
+// DeviceSubRequest describes a request for device provided in the
+// claim.spec.devices.requests[].firstAvailable array. Each
+// is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
+//
+// DeviceSubRequest is similar to ExactDeviceRequest, but doesn't expose the
+// AdminAccess field as that one is only supported when requesting a
+// specific device.
type DeviceSubRequestApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- AllocationMode *resourcev1.DeviceAllocationMode `json:"allocationMode,omitempty"`
- Count *int64 `json:"count,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
+ // Name can be used to reference this subrequest in the list of constraints
+ // or the list of configurations for the claim. References must use the
+ // format /.
+ //
+ // Must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // subrequest.
+ //
+ // A class is required. Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // subrequest. All selectors must be satisfied for a device to be
+ // considered.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this subrequest. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This subrequest is for all of the matching devices in a pool.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other subrequests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ AllocationMode *resourcev1.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ Count *int64 `json:"count,omitempty"`
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // Capacity define resource requirements against each capacity.
+ //
+ // If this field is unset and the device supports multiple allocations,
+ // the default value will be applied to each capacity according to requestPolicy.
+ // For the capacity that has no requestPolicy, default is the full capacity value.
+ //
+ // Applies to each device allocation.
+ // If Count > 1,
+ // the request fails if there aren't enough devices that meet the requirements.
+ // If AllocationMode is set to All,
+ // the request fails if there are devices that otherwise match the request,
+ // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request.
+ Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
}
// DeviceSubRequestApplyConfiguration constructs a declarative configuration of the DeviceSubRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetaint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetaint.go
index c4e7a221..db98da8b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetaint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetaint.go
@@ -25,11 +25,27 @@ import (
// DeviceTaintApplyConfiguration represents a declarative configuration of the DeviceTaint type for use
// with apply.
+//
+// The device this taint is attached to has the "effect" on
+// any claim which does not tolerate the taint and, through the claim,
+// to pods using the claim.
type DeviceTaintApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1.DeviceTaintEffect `json:"effect,omitempty"`
- TimeAdded *metav1.Time `json:"timeAdded,omitempty"`
+ // The taint key to be applied to a device.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // The taint value corresponding to the taint key.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // The effect of the taint on claims that do not tolerate the taint
+ // and through such claims on the pods using them.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
+ Effect *resourcev1.DeviceTaintEffect `json:"effect,omitempty"`
+ // TimeAdded represents the time at which the taint was added.
+ // Added automatically during create or update if not set.
+ TimeAdded *metav1.Time `json:"timeAdded,omitempty"`
}
// DeviceTaintApplyConfiguration constructs a declarative configuration of the DeviceTaint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetoleration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetoleration.go
index de995b25..6e6df9e7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetoleration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/devicetoleration.go
@@ -24,12 +24,33 @@ import (
// DeviceTolerationApplyConfiguration represents a declarative configuration of the DeviceToleration type for use
// with apply.
+//
+// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches
+// the triple using the matching operator .
type DeviceTolerationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *resourcev1.DeviceTolerationOperator `json:"operator,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1.DeviceTaintEffect `json:"effect,omitempty"`
- TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
+ // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // Operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a ResourceClaim can
+ // tolerate all taints of a particular category.
+ Operator *resourcev1.DeviceTolerationOperator `json:"operator,omitempty"`
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value must be empty, otherwise just a regular string.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and NoExecute.
+ Effect *resourcev1.DeviceTaintEffect `json:"effect,omitempty"`
+ // TolerationSeconds represents the period of time the toleration (which must be
+ // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ // it is not set, which means tolerate the taint forever (do not evict). Zero and
+ // negative values will be treated as 0 (evict immediately) by the system.
+ // If larger than zero, the time when the pod needs to be evicted is calculated as + .
+ TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
}
// DeviceTolerationApplyConfiguration constructs a declarative configuration of the DeviceToleration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/exactdevicerequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/exactdevicerequest.go
index 64d4f8d6..130893c6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/exactdevicerequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/exactdevicerequest.go
@@ -24,14 +24,89 @@ import (
// ExactDeviceRequestApplyConfiguration represents a declarative configuration of the ExactDeviceRequest type for use
// with apply.
+//
+// ExactDeviceRequest is a request for one or more identical devices.
type ExactDeviceRequestApplyConfiguration struct {
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- AllocationMode *resourcev1.DeviceAllocationMode `json:"allocationMode,omitempty"`
- Count *int64 `json:"count,omitempty"`
- AdminAccess *bool `json:"adminAccess,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // request.
+ //
+ // A DeviceClassName is required.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // request. All selectors must be satisfied for a device to be
+ // considered.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this request. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This request is for all of the matching devices in a pool.
+ // At least one device must exist on the node for the allocation to succeed.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other requests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ AllocationMode *resourcev1.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ Count *int64 `json:"count,omitempty"`
+ // AdminAccess indicates that this is a claim for administrative access
+ // to the device(s). Claims with AdminAccess are expected to be used for
+ // monitoring or other management services for a device. They ignore
+ // all ordinary claims to the device with respect to access modes and
+ // any resource allocations.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // Capacity define resource requirements against each capacity.
+ //
+ // If this field is unset and the device supports multiple allocations,
+ // the default value will be applied to each capacity according to requestPolicy.
+ // For the capacity that has no requestPolicy, default is the full capacity value.
+ //
+ // Applies to each device allocation.
+ // If Count > 1,
+ // the request fails if there aren't enough devices that meet the requirements.
+ // If AllocationMode is set to All,
+ // the request fails if there are devices that otherwise match the request,
+ // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request.
+ Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
}
// ExactDeviceRequestApplyConfiguration constructs a declarative configuration of the ExactDeviceRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/networkdevicedata.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/networkdevicedata.go
index 37bf604a..5d0ee8fc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/networkdevicedata.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/networkdevicedata.go
@@ -20,10 +20,27 @@ package v1
// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use
// with apply.
+//
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
type NetworkDeviceDataApplyConfiguration struct {
- InterfaceName *string `json:"interfaceName,omitempty"`
- IPs []string `json:"ips,omitempty"`
- HardwareAddress *string `json:"hardwareAddress,omitempty"`
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ IPs []string `json:"ips,omitempty"`
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ HardwareAddress *string `json:"hardwareAddress,omitempty"`
}
// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/opaquedeviceconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/opaquedeviceconfiguration.go
index 5df44a9c..4e2210d7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/opaquedeviceconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/opaquedeviceconfiguration.go
@@ -24,8 +24,25 @@ import (
// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use
// with apply.
+//
+// OpaqueDeviceConfiguration contains configuration parameters for a driver
+// in a format defined by the driver vendor.
type OpaqueDeviceConfigurationApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
+ // Driver is used to determine which kubelet plugin needs
+ // to be passed these configuration parameters.
+ //
+ // An admission policy provided by the driver developer could use this
+ // to decide whether it needs to validate them.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // Parameters can contain arbitrary data. It is the responsibility of
+ // the driver developer to handle validation and versioning. Typically this
+ // includes self-identification and a version ("kind" + "apiVersion" for
+ // Kubernetes types), with conversion between different versions.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
Parameters *runtime.RawExtension `json:"parameters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaim.go
index dada5ca3..240e1818 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaim.go
@@ -29,11 +29,24 @@ import (
// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
// with apply.
+//
+// ResourceClaim describes a request for access to resources in the cluster,
+// for use by workloads. For example, if a workload needs an accelerator device
+// with specific properties, this is how that request is expressed. The status
+// stanza tracks whether this claim has been satisfied and what specific
+// resources have been allocated.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceClaimApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec describes what is being requested and how to configure it.
+ // The spec is immutable.
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status describes whether the claim is ready to use and what has been allocated.
+ Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
}
// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with
@@ -47,6 +60,27 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
return b
}
+// ExtractResourceClaimFrom extracts the applied configuration owned by fieldManager from
+// resourceClaim for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceClaimFrom(resourceClaim *resourcev1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
+ b := &ResourceClaimApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1.ResourceClaim"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(resourceClaim.Name)
+ b.WithNamespace(resourceClaim.Namespace)
+
+ b.WithKind("ResourceClaim")
+ b.WithAPIVersion("resource.k8s.io/v1")
+ return b, nil
+}
+
// ExtractResourceClaim extracts the applied configuration owned by fieldManager from
// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a
// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,16 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractResourceClaim(resourceClaim *resourcev1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
- return extractResourceClaim(resourceClaim, fieldManager, "")
+ return ExtractResourceClaimFrom(resourceClaim, fieldManager, "")
}
-// ExtractResourceClaimStatus is the same as ExtractResourceClaim except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractResourceClaimStatus extracts the applied configuration owned by fieldManager from
+// resourceClaim for the status subresource.
func ExtractResourceClaimStatus(resourceClaim *resourcev1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
- return extractResourceClaim(resourceClaim, fieldManager, "status")
+ return ExtractResourceClaimFrom(resourceClaim, fieldManager, "status")
}
-func extractResourceClaim(resourceClaim *resourcev1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
- b := &ResourceClaimApplyConfiguration{}
- err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1.ResourceClaim"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(resourceClaim.Name)
- b.WithNamespace(resourceClaim.Namespace)
-
- b.WithKind("ResourceClaim")
- b.WithAPIVersion("resource.k8s.io/v1")
- return b, nil
-}
func (b ResourceClaimApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimconsumerreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimconsumerreference.go
index 7c761a44..c0cf7d87 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimconsumerreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimconsumerreference.go
@@ -24,11 +24,21 @@ import (
// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use
// with apply.
+//
+// ResourceClaimConsumerReference contains enough information to let you
+// locate the consumer of a ResourceClaim. The user must be a resource in the same
+// namespace as the ResourceClaim.
type ResourceClaimConsumerReferenceApplyConfiguration struct {
- APIGroup *string `json:"apiGroup,omitempty"`
- Resource *string `json:"resource,omitempty"`
- Name *string `json:"name,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
+ // APIGroup is the group for the resource being referenced. It is
+ // empty for the core API. This matches the group in the APIVersion
+ // that is used when creating the resources.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Resource is the type of resource being referenced, for example "pods".
+ Resource *string `json:"resource,omitempty"`
+ // Name is the name of resource being referenced.
+ Name *string `json:"name,omitempty"`
+ // UID identifies exactly one incarnation of the resource.
+ UID *types.UID `json:"uid,omitempty"`
}
// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimspec.go
index 7f05f4f9..7585c00c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimspec.go
@@ -20,7 +20,10 @@ package v1
// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use
// with apply.
+//
+// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
type ResourceClaimSpecApplyConfiguration struct {
+ // Devices defines how to request devices.
Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimstatus.go
index 75865ba4..15b05a56 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimstatus.go
@@ -20,10 +20,36 @@ package v1
// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use
// with apply.
+//
+// ResourceClaimStatus tracks whether the resource has been allocated and what
+// the result of that was.
type ResourceClaimStatusApplyConfiguration struct {
- Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ // Allocation is set once the claim has been allocated successfully.
+ Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ // ReservedFor indicates which entities are currently allowed to use
+ // the claim. A Pod which references a ResourceClaim which is not
+ // reserved for that Pod will not be started. A claim that is in
+ // use or might be in use because it has been reserved must not get
+ // deallocated.
+ //
+ // In a cluster with multiple scheduler instances, two pods might get
+ // scheduled concurrently by different schedulers. When they reference
+ // the same ResourceClaim which already has reached its maximum number
+ // of consumers, only one pod can be scheduled.
+ //
+ // Both schedulers try to add their pod to the claim.status.reservedFor
+ // field, but only the update that reaches the API server first gets
+ // stored. The other one fails with an error and the scheduler
+ // which issued it knows that it must put the pod back into the queue,
+ // waiting for the ResourceClaim to become usable again.
+ //
+ // There can be at most 256 such reservations. This may get increased in
+ // the future, but not reduced.
ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
- Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
+ Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
}
// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplate.go
index 69424661..7bbadb70 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplate.go
@@ -29,10 +29,21 @@ import (
// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use
// with apply.
+//
+// ResourceClaimTemplate is used to produce ResourceClaim objects.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceClaimTemplateApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
+ // Describes the ResourceClaim that is to be generated.
+ //
+ // This field is immutable. A ResourceClaim will get created by the
+ // control plane for a Pod when needed and then not get updated
+ // anymore.
+ Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with
@@ -46,29 +57,14 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo
return b
}
-// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
-// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
-// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractResourceClaimTemplateFrom extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
-// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractResourceClaimTemplateFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
- return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "")
-}
-
-// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
- return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status")
-}
-
-func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
+func ExtractResourceClaimTemplateFrom(resourceClaimTemplate *resourcev1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
b := &ResourceClaimTemplateApplyConfiguration{}
err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1.ResourceClaimTemplate"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +77,21 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1.ResourceClai
b.WithAPIVersion("resource.k8s.io/v1")
return b, nil
}
+
+// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
+// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
+ return ExtractResourceClaimTemplateFrom(resourceClaimTemplate, fieldManager, "")
+}
+
func (b ResourceClaimTemplateApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplatespec.go
index af7d8772..0d2a2135 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceclaimtemplatespec.go
@@ -26,9 +26,17 @@ import (
// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use
// with apply.
+//
+// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
type ResourceClaimTemplateSpecApplyConfiguration struct {
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
+ // when creating it. No other fields are allowed and will be rejected during
+ // validation.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec for the ResourceClaim. The entire content is copied unchanged
+ // into the ResourceClaim that gets created from this template. The
+ // same fields as in a ResourceClaim are also valid here.
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourcepool.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourcepool.go
index 22e06076..2c779f3c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourcepool.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourcepool.go
@@ -20,10 +20,34 @@ package v1
// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use
// with apply.
+//
+// ResourcePool describes the pool that ResourceSlices belong to.
type ResourcePoolApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Generation *int64 `json:"generation,omitempty"`
- ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
+ // Name is used to identify the pool. For node-local devices, this
+ // is often the node name, but this is not required.
+ //
+ // It must not be longer than 253 characters and must consist of one or more DNS sub-domains
+ // separated by slashes. This field is immutable.
+ Name *string `json:"name,omitempty"`
+ // Generation tracks the change in a pool over time. Whenever a driver
+ // changes something about one or more of the resources in a pool, it
+ // must change the generation in all ResourceSlices which are part of
+ // that pool. Consumers of ResourceSlices should only consider
+ // resources from the pool with the highest generation number. The
+ // generation may be reset by drivers, which should be fine for
+ // consumers, assuming that all ResourceSlices in a pool are updated to
+ // match or deleted.
+ //
+ // Combined with ResourceSliceCount, this mechanism enables consumers to
+ // detect pools which are comprised of multiple ResourceSlices and are
+ // in an incomplete state.
+ Generation *int64 `json:"generation,omitempty"`
+ // ResourceSliceCount is the total number of ResourceSlices in the pool at this
+ // generation number. Must be greater than zero.
+ //
+ // Consumers can use this to check whether they have seen all ResourceSlices
+ // belonging to the same pool.
+ ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
}
// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslice.go
index 6f9b2021..6c7ddf41 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslice.go
@@ -29,10 +29,39 @@ import (
// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use
// with apply.
+//
+// ResourceSlice represents one or more resources in a pool of similar resources,
+// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
+// ResourceSlices comprise a pool is determined by the driver.
+//
+// At the moment, the only supported resources are devices with attributes and capacities.
+// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
+// The ResourceSlice in which a device gets published may change over time. The unique identifier
+// for a device is the tuple , , .
+//
+// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
+// and updates all ResourceSlices with that new number and new resource definitions. A consumer
+// must only use ResourceSlices with the highest generation number and ignore all others.
+//
+// When allocating all resources in a pool matching certain criteria or when
+// looking for the best solution among several different alternatives, a
+// consumer should check the number of ResourceSlices in a pool (included in
+// each ResourceSlice) to determine whether its view of a pool is complete and
+// if not, should wait until the driver has completed updating the pool.
+//
+// For resources that are not local to a node, the node name is not set. Instead,
+// the driver may use a node selector to specify where the devices are available.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceSliceApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
+ // Contains the information published by the driver.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with
@@ -45,29 +74,14 @@ func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
return b
}
-// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
-// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
-// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractResourceSliceFrom extracts the applied configuration owned by fieldManager from
+// resourceSlice for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
-// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractResourceSliceFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractResourceSlice(resourceSlice *resourcev1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
- return extractResourceSlice(resourceSlice, fieldManager, "")
-}
-
-// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractResourceSliceStatus(resourceSlice *resourcev1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
- return extractResourceSlice(resourceSlice, fieldManager, "status")
-}
-
-func extractResourceSlice(resourceSlice *resourcev1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
+func ExtractResourceSliceFrom(resourceSlice *resourcev1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
b := &ResourceSliceApplyConfiguration{}
err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1.ResourceSlice"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +93,21 @@ func extractResourceSlice(resourceSlice *resourcev1.ResourceSlice, fieldManager
b.WithAPIVersion("resource.k8s.io/v1")
return b, nil
}
+
+// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
+// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
+// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
+// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceSlice(resourceSlice *resourcev1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
+ return ExtractResourceSliceFrom(resourceSlice, fieldManager, "")
+}
+
func (b ResourceSliceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslicespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslicespec.go
index c629a092..b268fc8d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslicespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1/resourceslicespec.go
@@ -24,15 +24,64 @@ import (
// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use
// with apply.
+//
+// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
type ResourceSliceSpecApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- NodeSelector *corev1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllNodes *bool `json:"allNodes,omitempty"`
- Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
- PerDeviceNodeSelection *bool `json:"perDeviceNodeSelection,omitempty"`
- SharedCounters []CounterSetApplyConfiguration `json:"sharedCounters,omitempty"`
+ // Driver identifies the DRA driver providing the capacity information.
+ // A field selector can be used to list only ResourceSlice
+ // objects with a certain driver name.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
+ Driver *string `json:"driver,omitempty"`
+ // Pool describes the pool that this ResourceSlice belongs to.
+ Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
+ // NodeName identifies the node which provides the resources in this pool.
+ // A field selector can be used to list only ResourceSlice
+ // objects belonging to a certain node.
+ //
+ // This field can be used to limit access from nodes to ResourceSlices with
+ // the same node name. It also indicates to autoscalers that adding
+ // new nodes of the same type as some old node might also make new
+ // resources available.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ // This field is immutable.
+ NodeName *string `json:"nodeName,omitempty"`
+ // NodeSelector defines which nodes have access to the resources in the pool,
+ // when that pool is not limited to a single node.
+ //
+ // Must use exactly one term.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ NodeSelector *corev1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllNodes indicates that all nodes have access to the resources in the pool.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ AllNodes *bool `json:"allNodes,omitempty"`
+ // Devices lists some or all of the devices in this pool.
+ //
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
+ // PerDeviceNodeSelection defines whether the access from nodes to
+ // resources in the pool is set on the ResourceSlice level or on each
+ // device. If it is set to true, every device defined the ResourceSlice
+ // must specify this individually.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ PerDeviceNodeSelection *bool `json:"perDeviceNodeSelection,omitempty"`
+ // SharedCounters defines a list of counter sets, each of which
+ // has a name and a list of counters available.
+ //
+ // The names of the counter sets must be unique in the ResourcePool.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ //
+ // The maximum number of counter sets is 8.
+ SharedCounters []CounterSetApplyConfiguration `json:"sharedCounters,omitempty"`
}
// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go
deleted file mode 100644
index c59b6a2e..00000000
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1alpha3
-
-// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use
-// with apply.
-type CELDeviceSelectorApplyConfiguration struct {
- Expression *string `json:"expression,omitempty"`
-}
-
-// CELDeviceSelectorApplyConfiguration constructs a declarative configuration of the CELDeviceSelector type for use with
-// apply.
-func CELDeviceSelector() *CELDeviceSelectorApplyConfiguration {
- return &CELDeviceSelectorApplyConfiguration{}
-}
-
-// WithExpression sets the Expression field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Expression field is set to the value of the last call.
-func (b *CELDeviceSelectorApplyConfiguration) WithExpression(value string) *CELDeviceSelectorApplyConfiguration {
- b.Expression = &value
- return b
-}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go
deleted file mode 100644
index 574299d1..00000000
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1alpha3
-
-// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use
-// with apply.
-type DeviceSelectorApplyConfiguration struct {
- CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"`
-}
-
-// DeviceSelectorApplyConfiguration constructs a declarative configuration of the DeviceSelector type for use with
-// apply.
-func DeviceSelector() *DeviceSelectorApplyConfiguration {
- return &DeviceSelectorApplyConfiguration{}
-}
-
-// WithCEL sets the CEL field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the CEL field is set to the value of the last call.
-func (b *DeviceSelectorApplyConfiguration) WithCEL(value *CELDeviceSelectorApplyConfiguration) *DeviceSelectorApplyConfiguration {
- b.CEL = value
- return b
-}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go
index 0dcd9a58..d9c2c6f4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaint.go
@@ -25,11 +25,27 @@ import (
// DeviceTaintApplyConfiguration represents a declarative configuration of the DeviceTaint type for use
// with apply.
+//
+// The device this taint is attached to has the "effect" on
+// any claim which does not tolerate the taint and, through the claim,
+// to pods using the claim.
type DeviceTaintApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1alpha3.DeviceTaintEffect `json:"effect,omitempty"`
- TimeAdded *v1.Time `json:"timeAdded,omitempty"`
+ // The taint key to be applied to a device.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // The taint value corresponding to the taint key.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // The effect of the taint on claims that do not tolerate the taint
+ // and through such claims on the pods using them.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
+ Effect *resourcev1alpha3.DeviceTaintEffect `json:"effect,omitempty"`
+ // TimeAdded represents the time at which the taint was added.
+ // Added automatically during create or update if not set.
+ TimeAdded *v1.Time `json:"timeAdded,omitempty"`
}
// DeviceTaintApplyConfiguration constructs a declarative configuration of the DeviceTaint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go
index f3327cf7..b28d015d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrule.go
@@ -29,10 +29,20 @@ import (
// DeviceTaintRuleApplyConfiguration represents a declarative configuration of the DeviceTaintRule type for use
// with apply.
+//
+// DeviceTaintRule adds one taint to all devices which match the selector.
+// This has the same effect as if the taint was specified directly
+// in the ResourceSlice by the DRA driver.
type DeviceTaintRuleApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeviceTaintRuleSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec specifies the selector and one taint.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *DeviceTaintRuleSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status provides information about what was requested in the spec.
+ Status *DeviceTaintRuleStatusApplyConfiguration `json:"status,omitempty"`
}
// DeviceTaintRule constructs a declarative configuration of the DeviceTaintRule type for use with
@@ -45,6 +55,26 @@ func DeviceTaintRule(name string) *DeviceTaintRuleApplyConfiguration {
return b
}
+// ExtractDeviceTaintRuleFrom extracts the applied configuration owned by fieldManager from
+// deviceTaintRule for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// deviceTaintRule must be a unmodified DeviceTaintRule API object that was retrieved from the Kubernetes API.
+// ExtractDeviceTaintRuleFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeviceTaintRuleFrom(deviceTaintRule *resourcev1alpha3.DeviceTaintRule, fieldManager string, subresource string) (*DeviceTaintRuleApplyConfiguration, error) {
+ b := &DeviceTaintRuleApplyConfiguration{}
+ err := managedfields.ExtractInto(deviceTaintRule, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceTaintRule"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(deviceTaintRule.Name)
+
+ b.WithKind("DeviceTaintRule")
+ b.WithAPIVersion("resource.k8s.io/v1alpha3")
+ return b, nil
+}
+
// ExtractDeviceTaintRule extracts the applied configuration owned by fieldManager from
// deviceTaintRule. If no managedFields are found in deviceTaintRule for fieldManager, a
// DeviceTaintRuleApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -55,30 +85,16 @@ func DeviceTaintRule(name string) *DeviceTaintRuleApplyConfiguration {
// ExtractDeviceTaintRule provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractDeviceTaintRule(deviceTaintRule *resourcev1alpha3.DeviceTaintRule, fieldManager string) (*DeviceTaintRuleApplyConfiguration, error) {
- return extractDeviceTaintRule(deviceTaintRule, fieldManager, "")
+ return ExtractDeviceTaintRuleFrom(deviceTaintRule, fieldManager, "")
}
-// ExtractDeviceTaintRuleStatus is the same as ExtractDeviceTaintRule except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractDeviceTaintRuleStatus extracts the applied configuration owned by fieldManager from
+// deviceTaintRule for the status subresource.
func ExtractDeviceTaintRuleStatus(deviceTaintRule *resourcev1alpha3.DeviceTaintRule, fieldManager string) (*DeviceTaintRuleApplyConfiguration, error) {
- return extractDeviceTaintRule(deviceTaintRule, fieldManager, "status")
+ return ExtractDeviceTaintRuleFrom(deviceTaintRule, fieldManager, "status")
}
-func extractDeviceTaintRule(deviceTaintRule *resourcev1alpha3.DeviceTaintRule, fieldManager string, subresource string) (*DeviceTaintRuleApplyConfiguration, error) {
- b := &DeviceTaintRuleApplyConfiguration{}
- err := managedfields.ExtractInto(deviceTaintRule, internal.Parser().Type("io.k8s.api.resource.v1alpha3.DeviceTaintRule"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(deviceTaintRule.Name)
-
- b.WithKind("DeviceTaintRule")
- b.WithAPIVersion("resource.k8s.io/v1alpha3")
- return b, nil
-}
func (b DeviceTaintRuleApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
@@ -247,6 +263,14 @@ func (b *DeviceTaintRuleApplyConfiguration) WithSpec(value *DeviceTaintRuleSpecA
return b
}
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *DeviceTaintRuleApplyConfiguration) WithStatus(value *DeviceTaintRuleStatusApplyConfiguration) *DeviceTaintRuleApplyConfiguration {
+ b.Status = value
+ return b
+}
+
// GetKind retrieves the value of the Kind field in the declarative configuration.
func (b *DeviceTaintRuleApplyConfiguration) GetKind() *string {
return b.TypeMetaApplyConfiguration.Kind
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go
index a14ada3d..bbdee4ee 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulespec.go
@@ -20,9 +20,16 @@ package v1alpha3
// DeviceTaintRuleSpecApplyConfiguration represents a declarative configuration of the DeviceTaintRuleSpec type for use
// with apply.
+//
+// DeviceTaintRuleSpec specifies the selector and one taint.
type DeviceTaintRuleSpecApplyConfiguration struct {
+ // DeviceSelector defines which device(s) the taint is applied to.
+ // All selector criteria must be satisfied for a device to
+ // match. The empty selector matches all devices. Without
+ // a selector, no devices are matches.
DeviceSelector *DeviceTaintSelectorApplyConfiguration `json:"deviceSelector,omitempty"`
- Taint *DeviceTaintApplyConfiguration `json:"taint,omitempty"`
+ // The taint that gets applied to matching devices.
+ Taint *DeviceTaintApplyConfiguration `json:"taint,omitempty"`
}
// DeviceTaintRuleSpecApplyConfiguration constructs a declarative configuration of the DeviceTaintRuleSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulestatus.go
new file mode 100644
index 00000000..1cbc1433
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintrulestatus.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha3
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// DeviceTaintRuleStatusApplyConfiguration represents a declarative configuration of the DeviceTaintRuleStatus type for use
+// with apply.
+//
+// DeviceTaintRuleStatus provides information about an on-going pod eviction.
+type DeviceTaintRuleStatusApplyConfiguration struct {
+ // Conditions provide information about the state of the DeviceTaintRule
+ // and the cluster at some point in time,
+ // in a machine-readable and human-readable format.
+ //
+ // The following condition is currently defined as part of this API, more may
+ // get added:
+ // - Type: EvictionInProgress
+ // - Status: True if there are currently pods which need to be evicted, False otherwise
+ // (includes the effects which don't cause eviction).
+ // - Reason: not specified, may change
+ // - Message: includes information about number of pending pods and already evicted pods
+ // in a human-readable format, updated periodically, may change
+ //
+ // For `effect: None`, the condition above gets set once for each change to
+ // the spec, with the message containing information about what would happen
+ // if the effect was `NoExecute`. This feedback can be used to decide whether
+ // changing the effect to `NoExecute` will work as intended. It only gets
+ // set once to avoid having to constantly update the status.
+ //
+ // Must have 8 or fewer entries.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+}
+
+// DeviceTaintRuleStatusApplyConfiguration constructs a declarative configuration of the DeviceTaintRuleStatus type for use with
+// apply.
+func DeviceTaintRuleStatus() *DeviceTaintRuleStatusApplyConfiguration {
+ return &DeviceTaintRuleStatusApplyConfiguration{}
+}
+
+// WithConditions adds the given value to the Conditions field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Conditions field.
+func (b *DeviceTaintRuleStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *DeviceTaintRuleStatusApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithConditions")
+ }
+ b.Conditions = append(b.Conditions, *values[i])
+ }
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go
index aecb2aa2..a1a06fd3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicetaintselector.go
@@ -20,12 +20,29 @@ package v1alpha3
// DeviceTaintSelectorApplyConfiguration represents a declarative configuration of the DeviceTaintSelector type for use
// with apply.
+//
+// DeviceTaintSelector defines which device(s) a DeviceTaintRule applies to.
+// The empty selector matches all devices. Without a selector, no devices
+// are matched.
type DeviceTaintSelectorApplyConfiguration struct {
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // If driver is set, only devices from that driver are selected.
+ // This fields corresponds to slice.spec.driver.
+ Driver *string `json:"driver,omitempty"`
+ // If pool is set, only devices in that pool are selected.
+ //
+ // Also setting the driver name may be useful to avoid
+ // ambiguity when different drivers use the same pool name,
+ // but this is not required because selecting pools from
+ // different drivers may also be useful, for example when
+ // drivers with node-local devices use the node name as
+ // their pool name.
+ Pool *string `json:"pool,omitempty"`
+ // If device is set, only devices with that name are selected.
+ // This field corresponds to slice.spec.devices[].name.
+ //
+ // Setting also driver and pool may be required to avoid ambiguity,
+ // but is not required.
+ Device *string `json:"device,omitempty"`
}
// DeviceTaintSelectorApplyConfiguration constructs a declarative configuration of the DeviceTaintSelector type for use with
@@ -34,14 +51,6 @@ func DeviceTaintSelector() *DeviceTaintSelectorApplyConfiguration {
return &DeviceTaintSelectorApplyConfiguration{}
}
-// WithDeviceClassName sets the DeviceClassName field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the DeviceClassName field is set to the value of the last call.
-func (b *DeviceTaintSelectorApplyConfiguration) WithDeviceClassName(value string) *DeviceTaintSelectorApplyConfiguration {
- b.DeviceClassName = &value
- return b
-}
-
// WithDriver sets the Driver field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Driver field is set to the value of the last call.
@@ -65,16 +74,3 @@ func (b *DeviceTaintSelectorApplyConfiguration) WithDevice(value string) *Device
b.Device = &value
return b
}
-
-// WithSelectors adds the given value to the Selectors field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the Selectors field.
-func (b *DeviceTaintSelectorApplyConfiguration) WithSelectors(values ...*DeviceSelectorApplyConfiguration) *DeviceTaintSelectorApplyConfiguration {
- for i := range values {
- if values[i] == nil {
- panic("nil value passed to WithSelectors")
- }
- b.Selectors = append(b.Selectors, *values[i])
- }
- return b
-}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go
index 3fe28a39..45b08dfe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go
@@ -25,13 +25,42 @@ import (
// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use
// with apply.
+//
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+//
+// The combination of Driver, Pool, Device, and ShareID must match the corresponding key
+// in Status.Allocation.Devices.
type AllocatedDeviceStatusApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- ShareID *string `json:"shareID,omitempty"`
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ Pool *string `json:"pool,omitempty"`
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ Device *string `json:"device,omitempty"`
+ // ShareID uniquely identifies an individual allocation share of the device.
+ ShareID *string `json:"shareID,omitempty"`
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // Must not contain more than 8 entries.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ // NetworkData contains network-related information specific to the device.
NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go
index f031f974..f6fd7e20 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go
@@ -25,10 +25,20 @@ import (
// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use
// with apply.
+//
+// AllocationResult contains attributes of an allocated resource.
type AllocationResultApplyConfiguration struct {
- Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
- NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllocationTimestamp *metav1.Time `json:"allocationTimestamp,omitempty"`
+ // Devices is the result of allocating devices.
+ Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
+ // NodeSelector defines where the allocated resources are available. If
+ // unset, they are available everywhere.
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllocationTimestamp stores the time when the resources were allocated.
+ // This field is not guaranteed to be set, in which case that time is unknown.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gate.
+ AllocationTimestamp *metav1.Time `json:"allocationTimestamp,omitempty"`
}
// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go
index e792ca24..8a0241a2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go
@@ -25,18 +25,90 @@ import (
// BasicDeviceApplyConfiguration represents a declarative configuration of the BasicDevice type for use
// with apply.
+//
+// BasicDevice defines one device instance.
type BasicDeviceApplyConfiguration struct {
- Attributes map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
- Capacity map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
- ConsumesCounters []DeviceCounterConsumptionApplyConfiguration `json:"consumesCounters,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllNodes *bool `json:"allNodes,omitempty"`
- Taints []DeviceTaintApplyConfiguration `json:"taints,omitempty"`
- BindsToNode *bool `json:"bindsToNode,omitempty"`
- BindingConditions []string `json:"bindingConditions,omitempty"`
- BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
- AllowMultipleAllocations *bool `json:"allowMultipleAllocations,omitempty"`
+ // Attributes defines the set of attributes for this device.
+ // The name of each attribute must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ Attributes map[resourcev1beta1.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
+ // Capacity defines the set of capacities for this device.
+ // The name of each capacity must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ Capacity map[resourcev1beta1.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
+ // ConsumesCounters defines a list of references to sharedCounters
+ // and the set of counters that the device will
+ // consume from those counter sets.
+ //
+ // There can only be a single entry per counterSet.
+ //
+ // The maximum number of device counter consumptions per
+ // device is 2.
+ ConsumesCounters []DeviceCounterConsumptionApplyConfiguration `json:"consumesCounters,omitempty"`
+ // NodeName identifies the node where the device is available.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ NodeName *string `json:"nodeName,omitempty"`
+ // NodeSelector defines the nodes where the device is available.
+ //
+ // Must use exactly one term.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllNodes indicates that all nodes have access to the device.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ AllNodes *bool `json:"allNodes,omitempty"`
+ // If specified, these are the driver-defined taints.
+ //
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Taints []DeviceTaintApplyConfiguration `json:"taints,omitempty"`
+ // BindsToNode indicates if the usage of an allocation involving this device
+ // has to be limited to exactly the node that was chosen when allocating the claim.
+ // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector
+ // to match the node where the allocation was made.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindsToNode *bool `json:"bindsToNode,omitempty"`
+ // BindingConditions defines the conditions for proceeding with binding.
+ // All of these conditions must be set in the per-device status
+ // conditions with a value of True to proceed with binding the pod to the node
+ // while scheduling the pod.
+ //
+ // The maximum number of binding conditions is 4.
+ //
+ // The conditions must be a valid condition type string.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingConditions []string `json:"bindingConditions,omitempty"`
+ // BindingFailureConditions defines the conditions for binding failure.
+ // They may be set in the per-device status conditions.
+ // If any is true, a binding failure occurred.
+ //
+ // The maximum number of binding failure conditions is 4.
+ //
+ // The conditions must be a valid condition type string.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
+ // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
+ //
+ // If AllowMultipleAllocations is set to true, the device can be allocated more than once,
+ // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not.
+ AllowMultipleAllocations *bool `json:"allowMultipleAllocations,omitempty"`
}
// BasicDeviceApplyConfiguration constructs a declarative configuration of the BasicDevice type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicy.go
index 2f76a55d..6c6aa2b8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicy.go
@@ -24,10 +24,39 @@ import (
// CapacityRequestPolicyApplyConfiguration represents a declarative configuration of the CapacityRequestPolicy type for use
// with apply.
+//
+// CapacityRequestPolicy defines how requests consume device capacity.
+//
+// Must not set more than one ValidRequestValues.
type CapacityRequestPolicyApplyConfiguration struct {
- Default *resource.Quantity `json:"default,omitempty"`
- ValidValues []resource.Quantity `json:"validValues,omitempty"`
- ValidRange *CapacityRequestPolicyRangeApplyConfiguration `json:"validRange,omitempty"`
+ // Default specifies how much of this capacity is consumed by a request
+ // that does not contain an entry for it in DeviceRequest's Capacity.
+ Default *resource.Quantity `json:"default,omitempty"`
+ // ValidValues defines a set of acceptable quantity values in consuming requests.
+ //
+ // Must not contain more than 10 entries.
+ // Must be sorted in ascending order.
+ //
+ // If this field is set,
+ // Default must be defined and it must be included in ValidValues list.
+ //
+ // If the requested amount does not match any valid value but smaller than some valid values,
+ // the scheduler calculates the smallest valid value that is greater than or equal to the request.
+ // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues).
+ //
+ // If the requested amount exceeds all valid values, the request violates the policy,
+ // and this device cannot be allocated.
+ ValidValues []resource.Quantity `json:"validValues,omitempty"`
+ // ValidRange defines an acceptable quantity value range in consuming requests.
+ //
+ // If this field is set,
+ // Default must be defined and it must fall within the defined ValidRange.
+ //
+ // If the requested amount does not fall within the defined range, the request violates the policy,
+ // and this device cannot be allocated.
+ //
+ // If the request doesn't contain this capacity entry, Default value is used.
+ ValidRange *CapacityRequestPolicyRangeApplyConfiguration `json:"validRange,omitempty"`
}
// CapacityRequestPolicyApplyConfiguration constructs a declarative configuration of the CapacityRequestPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicyrange.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicyrange.go
index ec67e8df..cddfe6da 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicyrange.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequestpolicyrange.go
@@ -24,9 +24,31 @@ import (
// CapacityRequestPolicyRangeApplyConfiguration represents a declarative configuration of the CapacityRequestPolicyRange type for use
// with apply.
+//
+// CapacityRequestPolicyRange defines a valid range for consumable capacity values.
+//
+// - If the requested amount is less than Min, it is rounded up to the Min value.
+// - If Step is set and the requested amount is between Min and Max but not aligned with Step,
+// it will be rounded up to the next value equal to Min + (n * Step).
+// - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set).
+// - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy,
+// and the device cannot be allocated.
type CapacityRequestPolicyRangeApplyConfiguration struct {
- Min *resource.Quantity `json:"min,omitempty"`
- Max *resource.Quantity `json:"max,omitempty"`
+ // Min specifies the minimum capacity allowed for a consumption request.
+ //
+ // Min must be greater than or equal to zero,
+ // and less than or equal to the capacity value.
+ // requestPolicy.default must be more than or equal to the minimum.
+ Min *resource.Quantity `json:"min,omitempty"`
+ // Max defines the upper limit for capacity that can be requested.
+ //
+ // Max must be less than or equal to the capacity value.
+ // Min and requestPolicy.default must be less than or equal to the maximum.
+ Max *resource.Quantity `json:"max,omitempty"`
+ // Step defines the step size between valid capacity amounts within the range.
+ //
+ // Max (if set) and requestPolicy.default must be a multiple of Step.
+ // Min + Step must be less than or equal to the capacity value.
Step *resource.Quantity `json:"step,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequirements.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequirements.go
index c78618f4..03662a8c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequirements.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/capacityrequirements.go
@@ -25,7 +25,31 @@ import (
// CapacityRequirementsApplyConfiguration represents a declarative configuration of the CapacityRequirements type for use
// with apply.
+//
+// CapacityRequirements defines the capacity requirements for a specific device request.
type CapacityRequirementsApplyConfiguration struct {
+ // Requests represent individual device resource requests for distinct resources,
+ // all of which must be provided by the device.
+ //
+ // This value is used as an additional filtering condition against the available capacity on the device.
+ // This is semantically equivalent to a CEL selector with
+ // `device.capacity[]..compareTo(quantity()) >= 0`.
+ // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0.
+ //
+ // When a requestPolicy is defined, the requested amount is adjusted upward
+ // to the nearest valid value based on the policy.
+ // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows—
+ // the device is considered ineligible for allocation.
+ //
+ // For any capacity that is not explicitly requested:
+ // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity
+ // (i.e., the whole device is claimed).
+ // - If a requestPolicy is set, the default consumed capacity is determined according to that policy.
+ //
+ // If the device allows multiple allocation,
+ // the aggregated amount across all requests must not exceed the capacity value.
+ // The consumed capacity, which may be adjusted based on the requestPolicy if defined,
+ // is recorded in the resource claim’s status.devices[*].consumedCapacity field.
Requests map[resourcev1beta1.QualifiedName]resource.Quantity `json:"requests,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go
index c4a28bbf..42708c5c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go
@@ -20,7 +20,61 @@ package v1beta1
// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use
// with apply.
+//
+// CELDeviceSelector contains a CEL expression for selecting a device.
type CELDeviceSelectorApplyConfiguration struct {
+ // Expression is a CEL expression which evaluates a single device. It
+ // must evaluate to true when the device under consideration satisfies
+ // the desired criteria, and false when it does not. Any other result
+ // is an error and causes allocation of devices to abort.
+ //
+ // The expression's input is an object named "device", which carries
+ // the following properties:
+ // - driver (string): the name of the driver which defines this device.
+ // - attributes (map[string]object): the device's attributes, grouped by prefix
+ // (e.g. device.attributes["dra.example.com"] evaluates to an object with all
+ // of the attributes which were prefixed by "dra.example.com".
+ // - capacity (map[string]object): the device's capacities, grouped by prefix.
+ // - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device
+ // (v1.34+ with the DRAConsumableCapacity feature enabled).
+ //
+ // Example: Consider a device with driver="dra.example.com", which exposes
+ // two attributes named "model" and "ext.example.com/family" and which
+ // exposes one capacity named "modules". This input to this expression
+ // would have the following fields:
+ //
+ // device.driver
+ // device.attributes["dra.example.com"].model
+ // device.attributes["ext.example.com"].family
+ // device.capacity["dra.example.com"].modules
+ //
+ // The device.driver field can be used to check for a specific driver,
+ // either as a high-level precondition (i.e. you only want to consider
+ // devices from this driver) or as part of a multi-clause expression
+ // that is meant to consider devices from different drivers.
+ //
+ // The value type of each attribute is defined by the device
+ // definition, and users who write these expressions must consult the
+ // documentation for their specific drivers. The value type of each
+ // capacity is Quantity.
+ //
+ // If an unknown prefix is used as a lookup in either device.attributes
+ // or device.capacity, an empty map will be returned. Any reference to
+ // an unknown field will cause an evaluation error and allocation to
+ // abort.
+ //
+ // A robust expression should check for the existence of attributes
+ // before referencing them.
+ //
+ // For ease of use, the cel.bind() function is enabled, and can be used
+ // to simplify expressions that access multiple attributes with the
+ // same domain. For example:
+ //
+ // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
+ //
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go
index b33ed99a..951f3859 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counter.go
@@ -24,7 +24,10 @@ import (
// CounterApplyConfiguration represents a declarative configuration of the Counter type for use
// with apply.
+//
+// Counter describes a quantity associated with a device.
type CounterApplyConfiguration struct {
+ // Value defines how much of a certain device counter is available.
Value *resource.Quantity `json:"value,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go
index 7592fa4d..d5fad0dd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/counterset.go
@@ -20,8 +20,23 @@ package v1beta1
// CounterSetApplyConfiguration represents a declarative configuration of the CounterSet type for use
// with apply.
+//
+// CounterSet defines a named set of counters
+// that are available to be used by devices defined in the
+// ResourcePool.
+//
+// The counters are not allocatable by themselves, but
+// can be referenced by devices. When a device is allocated,
+// the portion of counters it uses will no longer be available for use
+// by other devices.
type CounterSetApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name defines the name of the counter set.
+ // It must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Counters defines the set of counters for this CounterSet
+ // The name of each counter must be unique in that set and must be a DNS label.
+ //
+ // The maximum number of counters is 32.
Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go
index f635267e..76956989 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go
@@ -20,8 +20,14 @@ package v1beta1
// DeviceApplyConfiguration represents a declarative configuration of the Device type for use
// with apply.
+//
+// Device represents one individual hardware instance that can be selected based
+// on its attributes. Besides the name, exactly one field must be set.
type DeviceApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name is unique identifier among all devices managed by
+ // the driver in the pool. It must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Basic defines one device instance.
Basic *BasicDeviceApplyConfiguration `json:"basic,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go
index b5218ba4..314ff5fa 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go
@@ -24,9 +24,20 @@ import (
// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use
// with apply.
+//
+// DeviceAllocationConfiguration gets embedded in an AllocationResult.
type DeviceAllocationConfigurationApplyConfiguration struct {
- Source *resourcev1beta1.AllocationConfigSource `json:"source,omitempty"`
- Requests []string `json:"requests,omitempty"`
+ // Source records whether the configuration comes from a class and thus
+ // is not something that a normal user would have been able to set
+ // or from a claim.
+ Source *resourcev1beta1.AllocationConfigSource `json:"source,omitempty"`
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, its applies to all requests.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
+ Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go
index bf309cf2..ee17ee19 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go
@@ -20,9 +20,19 @@ package v1beta1
// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use
// with apply.
+//
+// DeviceAllocationResult is the result of allocating devices.
type DeviceAllocationResultApplyConfiguration struct {
+ // Results lists all allocated devices.
Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"`
- Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
+ // This field is a combination of all the claim and class configuration parameters.
+ // Drivers can distinguish between those based on a flag.
+ //
+ // This includes configuration parameters for drivers which have no allocated
+ // devices in the result because it is up to the drivers which configuration
+ // parameters they support. They can silently ignore unknown configuration
+ // parameters.
+ Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go
index 6e88ae38..a4e9fe85 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go
@@ -20,10 +20,17 @@ package v1beta1
// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use
// with apply.
+//
+// DeviceAttribute must have exactly one field set.
type DeviceAttributeApplyConfiguration struct {
- IntValue *int64 `json:"int,omitempty"`
- BoolValue *bool `json:"bool,omitempty"`
- StringValue *string `json:"string,omitempty"`
+ // IntValue is a number.
+ IntValue *int64 `json:"int,omitempty"`
+ // BoolValue is a true/false value.
+ BoolValue *bool `json:"bool,omitempty"`
+ // StringValue is a string. Must not be longer than 64 characters.
+ StringValue *string `json:"string,omitempty"`
+ // VersionValue is a semantic version according to semver.org spec 2.0.0.
+ // Must not be longer than 64 characters.
VersionValue *string `json:"version,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go
index 43a112b2..4e4a5b66 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go
@@ -24,8 +24,24 @@ import (
// DeviceCapacityApplyConfiguration represents a declarative configuration of the DeviceCapacity type for use
// with apply.
+//
+// DeviceCapacity describes a quantity associated with a device.
type DeviceCapacityApplyConfiguration struct {
- Value *resource.Quantity `json:"value,omitempty"`
+ // Value defines how much of a certain capacity that device has.
+ //
+ // This field reflects the fixed total capacity and does not change.
+ // The consumed amount is tracked separately by scheduler
+ // and does not affect this value.
+ Value *resource.Quantity `json:"value,omitempty"`
+ // RequestPolicy defines how this DeviceCapacity must be consumed
+ // when the device is allowed to be shared by multiple allocations.
+ //
+ // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy.
+ //
+ // If unset, capacity requests are unconstrained:
+ // requests can consume any amount of capacity, as long as the total consumed
+ // across all allocations does not exceed the device's defined capacity.
+ // If request is also unset, default is the full capacity value.
RequestPolicy *CapacityRequestPolicyApplyConfiguration `json:"requestPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go
index 95c1c2e6..b5176ba0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go
@@ -20,10 +20,19 @@ package v1beta1
// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use
// with apply.
+//
+// DeviceClaim defines how to request devices with a ResourceClaim.
type DeviceClaimApplyConfiguration struct {
- Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
- Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
- Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
+ // Requests represent individual requests for distinct devices which
+ // must all be satisfied. If empty, nothing needs to be allocated.
+ Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
+ // These constraints must be satisfied by the set of devices that get
+ // allocated for the claim.
+ Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
+ // This field holds configuration for multiple potential drivers which
+ // could satisfy requests in this claim. It is ignored while allocating
+ // the claim.
+ Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go
index beac5e9d..2c7faab5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go
@@ -20,7 +20,15 @@ package v1beta1
// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use
// with apply.
+//
+// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
type DeviceClaimConfigurationApplyConfiguration struct {
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, it applies to all requests.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go
index 894580cd..b0fcbac6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go
@@ -29,10 +29,27 @@ import (
// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use
// with apply.
+//
+// DeviceClass is a vendor- or admin-provided resource that contains
+// device configuration and selectors. It can be referenced in
+// the device requests of a claim to apply these presets.
+// Cluster scoped.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type DeviceClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec defines what can be allocated and how to configure it.
+ //
+ // This is mutable. Consumers have to be prepared for classes changing
+ // at any time, either because they get updated or replaced. Claim
+ // allocations are done once based on whatever was set in classes at
+ // the time of allocation.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// DeviceClass constructs a declarative configuration of the DeviceClass type for use with
@@ -45,29 +62,14 @@ func DeviceClass(name string) *DeviceClassApplyConfiguration {
return b
}
-// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
-// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
-// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractDeviceClassFrom extracts the applied configuration owned by fieldManager from
+// deviceClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
-// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractDeviceClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
- return extractDeviceClass(deviceClass, fieldManager, "")
-}
-
-// ExtractDeviceClassStatus is the same as ExtractDeviceClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractDeviceClassStatus(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
- return extractDeviceClass(deviceClass, fieldManager, "status")
-}
-
-func extractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
+func ExtractDeviceClassFrom(deviceClass *resourcev1beta1.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
b := &DeviceClassApplyConfiguration{}
err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1beta1.DeviceClass"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +81,21 @@ func extractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager s
b.WithAPIVersion("resource.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
+// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
+// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
+// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeviceClass(deviceClass *resourcev1beta1.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
+ return ExtractDeviceClassFrom(deviceClass, fieldManager, "")
+}
+
func (b DeviceClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go
index 3ce90eab..d521f01e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go
@@ -20,6 +20,8 @@ package v1beta1
// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use
// with apply.
+//
+// DeviceClassConfiguration is used in DeviceClass.
type DeviceClassConfigurationApplyConfiguration struct {
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go
index 171149eb..942da3ef 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go
@@ -20,10 +20,29 @@ package v1beta1
// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use
// with apply.
+//
+// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
+// and how to configure it.
type DeviceClassSpecApplyConfiguration struct {
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
- ExtendedResourceName *string `json:"extendedResourceName,omitempty"`
+ // Each selector must be satisfied by a device which is claimed via this class.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // Config defines configuration parameters that apply to each device that is claimed via this class.
+ // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
+ // configuration applies to exactly one driver.
+ //
+ // They are passed to the driver, but are not considered while allocating the claim.
+ Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
+ // ExtendedResourceName is the extended resource name for the devices of this class.
+ // The devices of this class can be used to satisfy a pod's extended resource requests.
+ // It has the same format as the name of a pod's extended resource.
+ // It should be unique among all the device classes in a cluster.
+ // If two device classes have the same name, then the class created later
+ // is picked to satisfy a pod's extended resource requests.
+ // If two classes are created at the same time, then the name of the class
+ // lexicographically sorted first is picked.
+ //
+ // This is an alpha field.
+ ExtendedResourceName *string `json:"extendedResourceName,omitempty"`
}
// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go
index b0f41f5a..98861a3b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go
@@ -20,7 +20,12 @@ package v1beta1
// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use
// with apply.
+//
+// DeviceConfiguration must have exactly one field set. It gets embedded
+// inline in some other structs which have other fields, so field names must
+// not conflict with those.
type DeviceConfigurationApplyConfiguration struct {
+ // Opaque provides driver-specific configuration parameters.
Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go
index 624d9885..1716c923 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go
@@ -24,9 +24,42 @@ import (
// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use
// with apply.
+//
+// DeviceConstraint must have exactly one field set besides Requests.
type DeviceConstraintApplyConfiguration struct {
- Requests []string `json:"requests,omitempty"`
- MatchAttribute *resourcev1beta1.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ // Requests is a list of the one or more requests in this claim which
+ // must co-satisfy this constraint. If a request is fulfilled by
+ // multiple devices, then all of the devices must satisfy the
+ // constraint. If this is not specified, this constraint applies to all
+ // requests in this claim.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the constraint applies to all subrequests.
+ Requests []string `json:"requests,omitempty"`
+ // MatchAttribute requires that all devices in question have this
+ // attribute and that its type and value are the same across those
+ // devices.
+ //
+ // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
+ // then only devices in the same NUMA node will be chosen. A device which
+ // does not have that attribute will not be chosen. All devices should
+ // use a value of the same type for this attribute because that is part of
+ // its specification, but if one device doesn't, then it also will not be
+ // chosen.
+ //
+ // Must include the domain qualifier.
+ MatchAttribute *resourcev1beta1.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ // DistinctAttribute requires that all devices in question have this
+ // attribute and that its type and value are unique across those devices.
+ //
+ // This acts as the inverse of MatchAttribute.
+ //
+ // This constraint is used to avoid allocating multiple requests to the same device
+ // by ensuring attribute-level differentiation.
+ //
+ // This is useful for scenarios where resource requests must be fulfilled by separate physical devices.
+ // For example, a container requests two network interfaces that must be allocated from two different physical NICs.
DistinctAttribute *resourcev1beta1.FullyQualifiedName `json:"distinctAttribute,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go
index a8a8a5f5..1b1956c5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecounterconsumption.go
@@ -20,9 +20,17 @@ package v1beta1
// DeviceCounterConsumptionApplyConfiguration represents a declarative configuration of the DeviceCounterConsumption type for use
// with apply.
+//
+// DeviceCounterConsumption defines a set of counters that
+// a device will consume from a CounterSet.
type DeviceCounterConsumptionApplyConfiguration struct {
- CounterSet *string `json:"counterSet,omitempty"`
- Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
+ // CounterSet is the name of the set from which the
+ // counters defined will be consumed.
+ CounterSet *string `json:"counterSet,omitempty"`
+ // Counters defines the counters that will be consumed by the device.
+ //
+ // The maximum number of counters is 32.
+ Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
}
// DeviceCounterConsumptionApplyConfiguration constructs a declarative configuration of the DeviceCounterConsumption type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go
index 1d3f604e..eab54ba0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go
@@ -24,16 +24,131 @@ import (
// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use
// with apply.
+//
+// DeviceRequest is a request for devices required for a claim.
+// This is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
type DeviceRequestApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"`
- Count *int64 `json:"count,omitempty"`
- AdminAccess *bool `json:"adminAccess,omitempty"`
- FirstAvailable []DeviceSubRequestApplyConfiguration `json:"firstAvailable,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
+ // Name can be used to reference this request in a pod.spec.containers[].resources.claims
+ // entry and in a constraint of the claim.
+ //
+ // Must be a DNS label and unique among all DeviceRequests in a
+ // ResourceClaim.
+ Name *string `json:"name,omitempty"`
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // request.
+ //
+ // A class is required if no subrequests are specified in the
+ // firstAvailable list and no class can be set if subrequests
+ // are specified in the firstAvailable list.
+ // Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // request. All selectors must be satisfied for a device to be
+ // considered.
+ //
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this request. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This request is for all of the matching devices in a pool.
+ // At least one device must exist on the node for the allocation to succeed.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other requests must specify this field.
+ //
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ //
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ Count *int64 `json:"count,omitempty"`
+ // AdminAccess indicates that this is a claim for administrative access
+ // to the device(s). Claims with AdminAccess are expected to be used for
+ // monitoring or other management services for a device. They ignore
+ // all ordinary claims to the device with respect to access modes and
+ // any resource allocations.
+ //
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+ // FirstAvailable contains subrequests, of which exactly one will be
+ // satisfied by the scheduler to satisfy this request. It tries to
+ // satisfy them in the order in which they are listed here. So if
+ // there are two entries in the list, the scheduler will only check
+ // the second one if it determines that the first one cannot be used.
+ //
+ // This field may only be set in the entries of DeviceClaim.Requests.
+ //
+ // DRA does not yet implement scoring, so the scheduler will
+ // select the first set of devices that satisfies all the
+ // requests in the claim. And if the requirements can
+ // be satisfied on more than one node, other scheduling features
+ // will determine which node is chosen. This means that the set of
+ // devices allocated to a claim might not be the optimal set
+ // available to the cluster. Scoring will be implemented later.
+ FirstAvailable []DeviceSubRequestApplyConfiguration `json:"firstAvailable,omitempty"`
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This field can only be set when deviceClassName is set and no subrequests
+ // are specified in the firstAvailable list.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // Capacity define resource requirements against each capacity.
+ //
+ // If this field is unset and the device supports multiple allocations,
+ // the default value will be applied to each capacity according to requestPolicy.
+ // For the capacity that has no requestPolicy, default is the full capacity value.
+ //
+ // Applies to each device allocation.
+ // If Count > 1,
+ // the request fails if there aren't enough devices that meet the requirements.
+ // If AllocationMode is set to All,
+ // the request fails if there are devices that otherwise match the request,
+ // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request.
+ Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
}
// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go
index b61c4252..2171522a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go
@@ -26,17 +26,75 @@ import (
// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use
// with apply.
+//
+// DeviceRequestAllocationResult contains the allocation result for one request.
type DeviceRequestAllocationResultApplyConfiguration struct {
- Request *string `json:"request,omitempty"`
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- AdminAccess *bool `json:"adminAccess,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- BindingConditions []string `json:"bindingConditions,omitempty"`
- BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
- ShareID *types.UID `json:"shareID,omitempty"`
- ConsumedCapacity map[resourcev1beta1.QualifiedName]resource.Quantity `json:"consumedCapacity,omitempty"`
+ // Request is the name of the request in the claim which caused this
+ // device to be allocated. If it references a subrequest in the
+ // firstAvailable list on a DeviceRequest, this field must
+ // include both the name of the main request and the subrequest
+ // using the format /.
+ //
+ // Multiple devices may have been allocated per request.
+ Request *string `json:"request,omitempty"`
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ Pool *string `json:"pool,omitempty"`
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ Device *string `json:"device,omitempty"`
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+ // A copy of all tolerations specified in the request at the time
+ // when the device got allocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // BindingConditions contains a copy of the BindingConditions
+ // from the corresponding ResourceSlice at the time of allocation.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingConditions []string `json:"bindingConditions,omitempty"`
+ // BindingFailureConditions contains a copy of the BindingFailureConditions
+ // from the corresponding ResourceSlice at the time of allocation.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
+ // ShareID uniquely identifies an individual allocation share of the device,
+ // used when the device supports multiple simultaneous allocations.
+ // It serves as an additional map key to differentiate concurrent shares
+ // of the same device.
+ ShareID *types.UID `json:"shareID,omitempty"`
+ // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
+ // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid
+ // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount).
+ //
+ // The total consumed capacity for each device must not exceed the DeviceCapacity's Value.
+ //
+ // This field is populated only for devices that allow multiple allocations.
+ // All capacity entries are included, even if the consumed amount is zero.
+ ConsumedCapacity map[resourcev1beta1.QualifiedName]resource.Quantity `json:"consumedCapacity,omitempty"`
}
// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go
index bf60bf43..2a40d6e1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go
@@ -20,7 +20,10 @@ package v1beta1
// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use
// with apply.
+//
+// DeviceSelector must have exactly one field set.
type DeviceSelectorApplyConfiguration struct {
+ // CEL contains a CEL expression for selecting a device.
CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go
index ef207925..2701074e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicesubrequest.go
@@ -24,14 +24,92 @@ import (
// DeviceSubRequestApplyConfiguration represents a declarative configuration of the DeviceSubRequest type for use
// with apply.
+//
+// DeviceSubRequest describes a request for device provided in the
+// claim.spec.devices.requests[].firstAvailable array. Each
+// is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
+//
+// DeviceSubRequest is similar to Request, but doesn't expose the AdminAccess
+// or FirstAvailable fields, as those can only be set on the top-level request.
+// AdminAccess is not supported for requests with a prioritized list, and
+// recursive FirstAvailable fields are not supported.
type DeviceSubRequestApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"`
- Count *int64 `json:"count,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
+ // Name can be used to reference this subrequest in the list of constraints
+ // or the list of configurations for the claim. References must use the
+ // format /.
+ //
+ // Must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // subrequest.
+ //
+ // A class is required. Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // subrequest. All selectors must be satisfied for a device to be
+ // considered.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this subrequest. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This subrequest is for all of the matching devices in a pool.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other subrequests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ AllocationMode *resourcev1beta1.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ Count *int64 `json:"count,omitempty"`
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // Capacity define resource requirements against each capacity.
+ //
+ // If this field is unset and the device supports multiple allocations,
+ // the default value will be applied to each capacity according to requestPolicy.
+ // For the capacity that has no requestPolicy, default is the full capacity value.
+ //
+ // Applies to each device allocation.
+ // If Count > 1,
+ // the request fails if there aren't enough devices that meet the requirements.
+ // If AllocationMode is set to All,
+ // the request fails if there are devices that otherwise match the request,
+ // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request.
+ Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
}
// DeviceSubRequestApplyConfiguration constructs a declarative configuration of the DeviceSubRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go
index bfa826f0..55c7d58a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetaint.go
@@ -25,11 +25,27 @@ import (
// DeviceTaintApplyConfiguration represents a declarative configuration of the DeviceTaint type for use
// with apply.
+//
+// The device this taint is attached to has the "effect" on
+// any claim which does not tolerate the taint and, through the claim,
+// to pods using the claim.
type DeviceTaintApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1beta1.DeviceTaintEffect `json:"effect,omitempty"`
- TimeAdded *v1.Time `json:"timeAdded,omitempty"`
+ // The taint key to be applied to a device.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // The taint value corresponding to the taint key.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // The effect of the taint on claims that do not tolerate the taint
+ // and through such claims on the pods using them.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
+ Effect *resourcev1beta1.DeviceTaintEffect `json:"effect,omitempty"`
+ // TimeAdded represents the time at which the taint was added.
+ // Added automatically during create or update if not set.
+ TimeAdded *v1.Time `json:"timeAdded,omitempty"`
}
// DeviceTaintApplyConfiguration constructs a declarative configuration of the DeviceTaint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go
index 977af670..26a74452 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicetoleration.go
@@ -24,12 +24,33 @@ import (
// DeviceTolerationApplyConfiguration represents a declarative configuration of the DeviceToleration type for use
// with apply.
+//
+// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches
+// the triple using the matching operator .
type DeviceTolerationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *resourcev1beta1.DeviceTolerationOperator `json:"operator,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1beta1.DeviceTaintEffect `json:"effect,omitempty"`
- TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
+ // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // Operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a ResourceClaim can
+ // tolerate all taints of a particular category.
+ Operator *resourcev1beta1.DeviceTolerationOperator `json:"operator,omitempty"`
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value must be empty, otherwise just a regular string.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and NoExecute.
+ Effect *resourcev1beta1.DeviceTaintEffect `json:"effect,omitempty"`
+ // TolerationSeconds represents the period of time the toleration (which must be
+ // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ // it is not set, which means tolerate the taint forever (do not evict). Zero and
+ // negative values will be treated as 0 (evict immediately) by the system.
+ // If larger than zero, the time when the pod needs to be evicted is calculated as + .
+ TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
}
// DeviceTolerationApplyConfiguration constructs a declarative configuration of the DeviceToleration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go
index c9d48801..5c3edcfe 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go
@@ -20,10 +20,29 @@ package v1beta1
// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use
// with apply.
+//
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
type NetworkDeviceDataApplyConfiguration struct {
- InterfaceName *string `json:"interfaceName,omitempty"`
- IPs []string `json:"ips,omitempty"`
- HardwareAddress *string `json:"hardwareAddress,omitempty"`
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ //
+ // Must not contain more than 16 entries.
+ IPs []string `json:"ips,omitempty"`
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ HardwareAddress *string `json:"hardwareAddress,omitempty"`
}
// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go
index 0b52fa93..ae8231f0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go
@@ -24,8 +24,25 @@ import (
// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use
// with apply.
+//
+// OpaqueDeviceConfiguration contains configuration parameters for a driver
+// in a format defined by the driver vendor.
type OpaqueDeviceConfigurationApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
+ // Driver is used to determine which kubelet plugin needs
+ // to be passed these configuration parameters.
+ //
+ // An admission policy provided by the driver developer could use this
+ // to decide whether it needs to validate them.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // Parameters can contain arbitrary data. It is the responsibility of
+ // the driver developer to handle validation and versioning. Typically this
+ // includes self-identification and a version ("kind" + "apiVersion" for
+ // Kubernetes types), with conversion between different versions.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
Parameters *runtime.RawExtension `json:"parameters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go
index 82055340..aaeaf687 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go
@@ -29,11 +29,24 @@ import (
// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
// with apply.
+//
+// ResourceClaim describes a request for access to resources in the cluster,
+// for use by workloads. For example, if a workload needs an accelerator device
+// with specific properties, this is how that request is expressed. The status
+// stanza tracks whether this claim has been satisfied and what specific
+// resources have been allocated.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceClaimApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec describes what is being requested and how to configure it.
+ // The spec is immutable.
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status describes whether the claim is ready to use and what has been allocated.
+ Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
}
// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with
@@ -47,6 +60,27 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
return b
}
+// ExtractResourceClaimFrom extracts the applied configuration owned by fieldManager from
+// resourceClaim for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceClaimFrom(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
+ b := &ResourceClaimApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaim"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(resourceClaim.Name)
+ b.WithNamespace(resourceClaim.Namespace)
+
+ b.WithKind("ResourceClaim")
+ b.WithAPIVersion("resource.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractResourceClaim extracts the applied configuration owned by fieldManager from
// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a
// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,16 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
- return extractResourceClaim(resourceClaim, fieldManager, "")
+ return ExtractResourceClaimFrom(resourceClaim, fieldManager, "")
}
-// ExtractResourceClaimStatus is the same as ExtractResourceClaim except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractResourceClaimStatus extracts the applied configuration owned by fieldManager from
+// resourceClaim for the status subresource.
func ExtractResourceClaimStatus(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
- return extractResourceClaim(resourceClaim, fieldManager, "status")
+ return ExtractResourceClaimFrom(resourceClaim, fieldManager, "status")
}
-func extractResourceClaim(resourceClaim *resourcev1beta1.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
- b := &ResourceClaimApplyConfiguration{}
- err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaim"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(resourceClaim.Name)
- b.WithNamespace(resourceClaim.Namespace)
-
- b.WithKind("ResourceClaim")
- b.WithAPIVersion("resource.k8s.io/v1beta1")
- return b, nil
-}
func (b ResourceClaimApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go
index f6eefdda..4891123b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go
@@ -24,11 +24,21 @@ import (
// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use
// with apply.
+//
+// ResourceClaimConsumerReference contains enough information to let you
+// locate the consumer of a ResourceClaim. The user must be a resource in the same
+// namespace as the ResourceClaim.
type ResourceClaimConsumerReferenceApplyConfiguration struct {
- APIGroup *string `json:"apiGroup,omitempty"`
- Resource *string `json:"resource,omitempty"`
- Name *string `json:"name,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
+ // APIGroup is the group for the resource being referenced. It is
+ // empty for the core API. This matches the group in the APIVersion
+ // that is used when creating the resources.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Resource is the type of resource being referenced, for example "pods".
+ Resource *string `json:"resource,omitempty"`
+ // Name is the name of resource being referenced.
+ Name *string `json:"name,omitempty"`
+ // UID identifies exactly one incarnation of the resource.
+ UID *types.UID `json:"uid,omitempty"`
}
// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go
index c6b1b0b4..2e3a41f0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go
@@ -20,7 +20,10 @@ package v1beta1
// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use
// with apply.
+//
+// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
type ResourceClaimSpecApplyConfiguration struct {
+ // Devices defines how to request devices.
Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go
index bb3db18b..899660dc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go
@@ -20,10 +20,36 @@ package v1beta1
// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use
// with apply.
+//
+// ResourceClaimStatus tracks whether the resource has been allocated and what
+// the result of that was.
type ResourceClaimStatusApplyConfiguration struct {
- Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ // Allocation is set once the claim has been allocated successfully.
+ Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ // ReservedFor indicates which entities are currently allowed to use
+ // the claim. A Pod which references a ResourceClaim which is not
+ // reserved for that Pod will not be started. A claim that is in
+ // use or might be in use because it has been reserved must not get
+ // deallocated.
+ //
+ // In a cluster with multiple scheduler instances, two pods might get
+ // scheduled concurrently by different schedulers. When they reference
+ // the same ResourceClaim which already has reached its maximum number
+ // of consumers, only one pod can be scheduled.
+ //
+ // Both schedulers try to add their pod to the claim.status.reservedFor
+ // field, but only the update that reaches the API server first gets
+ // stored. The other one fails with an error and the scheduler
+ // which issued it knows that it must put the pod back into the queue,
+ // waiting for the ResourceClaim to become usable again.
+ //
+ // There can be at most 256 such reservations. This may get increased in
+ // the future, but not reduced.
ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
- Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
+ Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
}
// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go
index deb46a25..fe420032 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go
@@ -29,10 +29,21 @@ import (
// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use
// with apply.
+//
+// ResourceClaimTemplate is used to produce ResourceClaim objects.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceClaimTemplateApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
+ // Describes the ResourceClaim that is to be generated.
+ //
+ // This field is immutable. A ResourceClaim will get created by the
+ // control plane for a Pod when needed and then not get updated
+ // anymore.
+ Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with
@@ -46,29 +57,14 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo
return b
}
-// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
-// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
-// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractResourceClaimTemplateFrom extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
-// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractResourceClaimTemplateFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
- return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "")
-}
-
-// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
- return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status")
-}
-
-func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
+func ExtractResourceClaimTemplateFrom(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
b := &ResourceClaimTemplateApplyConfiguration{}
err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceClaimTemplate"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +77,21 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.Resourc
b.WithAPIVersion("resource.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
+// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta1.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
+ return ExtractResourceClaimTemplateFrom(resourceClaimTemplate, fieldManager, "")
+}
+
func (b ResourceClaimTemplateApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go
index 4c17b756..765dcc52 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplatespec.go
@@ -26,9 +26,17 @@ import (
// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use
// with apply.
+//
+// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
type ResourceClaimTemplateSpecApplyConfiguration struct {
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
+ // when creating it. No other fields are allowed and will be rejected during
+ // validation.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec for the ResourceClaim. The entire content is copied unchanged
+ // into the ResourceClaim that gets created from this template. The
+ // same fields as in a ResourceClaim are also valid here.
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go
index 33c155b5..eff9e042 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go
@@ -20,10 +20,34 @@ package v1beta1
// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use
// with apply.
+//
+// ResourcePool describes the pool that ResourceSlices belong to.
type ResourcePoolApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Generation *int64 `json:"generation,omitempty"`
- ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
+ // Name is used to identify the pool. For node-local devices, this
+ // is often the node name, but this is not required.
+ //
+ // It must not be longer than 253 characters and must consist of one or more DNS sub-domains
+ // separated by slashes. This field is immutable.
+ Name *string `json:"name,omitempty"`
+ // Generation tracks the change in a pool over time. Whenever a driver
+ // changes something about one or more of the resources in a pool, it
+ // must change the generation in all ResourceSlices which are part of
+ // that pool. Consumers of ResourceSlices should only consider
+ // resources from the pool with the highest generation number. The
+ // generation may be reset by drivers, which should be fine for
+ // consumers, assuming that all ResourceSlices in a pool are updated to
+ // match or deleted.
+ //
+ // Combined with ResourceSliceCount, this mechanism enables consumers to
+ // detect pools which are comprised of multiple ResourceSlices and are
+ // in an incomplete state.
+ Generation *int64 `json:"generation,omitempty"`
+ // ResourceSliceCount is the total number of ResourceSlices in the pool at this
+ // generation number. Must be greater than zero.
+ //
+ // Consumers can use this to check whether they have seen all ResourceSlices
+ // belonging to the same pool.
+ ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
}
// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go
index d4d78a71..c9ec6a7b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go
@@ -29,10 +29,39 @@ import (
// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use
// with apply.
+//
+// ResourceSlice represents one or more resources in a pool of similar resources,
+// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
+// ResourceSlices comprise a pool is determined by the driver.
+//
+// At the moment, the only supported resources are devices with attributes and capacities.
+// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
+// The ResourceSlice in which a device gets published may change over time. The unique identifier
+// for a device is the tuple , , .
+//
+// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
+// and updates all ResourceSlices with that new number and new resource definitions. A consumer
+// must only use ResourceSlices with the highest generation number and ignore all others.
+//
+// When allocating all resources in a pool matching certain criteria or when
+// looking for the best solution among several different alternatives, a
+// consumer should check the number of ResourceSlices in a pool (included in
+// each ResourceSlice) to determine whether its view of a pool is complete and
+// if not, should wait until the driver has completed updating the pool.
+//
+// For resources that are not local to a node, the node name is not set. Instead,
+// the driver may use a node selector to specify where the devices are available.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceSliceApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
+ // Contains the information published by the driver.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with
@@ -45,29 +74,14 @@ func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
return b
}
-// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
-// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
-// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractResourceSliceFrom extracts the applied configuration owned by fieldManager from
+// resourceSlice for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
-// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractResourceSliceFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
- return extractResourceSlice(resourceSlice, fieldManager, "")
-}
-
-// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractResourceSliceStatus(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
- return extractResourceSlice(resourceSlice, fieldManager, "status")
-}
-
-func extractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
+func ExtractResourceSliceFrom(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
b := &ResourceSliceApplyConfiguration{}
err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1beta1.ResourceSlice"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +93,21 @@ func extractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldMan
b.WithAPIVersion("resource.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
+// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
+// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
+// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceSlice(resourceSlice *resourcev1beta1.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
+ return ExtractResourceSliceFrom(resourceSlice, fieldManager, "")
+}
+
func (b ResourceSliceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go
index 6eaae7da..23024864 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go
@@ -24,15 +24,64 @@ import (
// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use
// with apply.
+//
+// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
type ResourceSliceSpecApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllNodes *bool `json:"allNodes,omitempty"`
- Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
- PerDeviceNodeSelection *bool `json:"perDeviceNodeSelection,omitempty"`
- SharedCounters []CounterSetApplyConfiguration `json:"sharedCounters,omitempty"`
+ // Driver identifies the DRA driver providing the capacity information.
+ // A field selector can be used to list only ResourceSlice
+ // objects with a certain driver name.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
+ Driver *string `json:"driver,omitempty"`
+ // Pool describes the pool that this ResourceSlice belongs to.
+ Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
+ // NodeName identifies the node which provides the resources in this pool.
+ // A field selector can be used to list only ResourceSlice
+ // objects belonging to a certain node.
+ //
+ // This field can be used to limit access from nodes to ResourceSlices with
+ // the same node name. It also indicates to autoscalers that adding
+ // new nodes of the same type as some old node might also make new
+ // resources available.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ // This field is immutable.
+ NodeName *string `json:"nodeName,omitempty"`
+ // NodeSelector defines which nodes have access to the resources in the pool,
+ // when that pool is not limited to a single node.
+ //
+ // Must use exactly one term.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllNodes indicates that all nodes have access to the resources in the pool.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ AllNodes *bool `json:"allNodes,omitempty"`
+ // Devices lists some or all of the devices in this pool.
+ //
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
+ // PerDeviceNodeSelection defines whether the access from nodes to
+ // resources in the pool is set on the ResourceSlice level or on each
+ // device. If it is set to true, every device defined the ResourceSlice
+ // must specify this individually.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ PerDeviceNodeSelection *bool `json:"perDeviceNodeSelection,omitempty"`
+ // SharedCounters defines a list of counter sets, each of which
+ // has a name and a list of counters available.
+ //
+ // The names of the counter sets must be unique in the ResourcePool.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ //
+ // The maximum number of counter sets is 8.
+ SharedCounters []CounterSetApplyConfiguration `json:"sharedCounters,omitempty"`
}
// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go
index 5e408c9c..4d7ba35c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocateddevicestatus.go
@@ -25,13 +25,42 @@ import (
// AllocatedDeviceStatusApplyConfiguration represents a declarative configuration of the AllocatedDeviceStatus type for use
// with apply.
+//
+// AllocatedDeviceStatus contains the status of an allocated device, if the
+// driver chooses to report it. This may include driver-specific information.
+//
+// The combination of Driver, Pool, Device, and ShareID must match the corresponding key
+// in Status.Allocation.Devices.
type AllocatedDeviceStatusApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- ShareID *string `json:"shareID,omitempty"`
- Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
- Data *runtime.RawExtension `json:"data,omitempty"`
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ Pool *string `json:"pool,omitempty"`
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ Device *string `json:"device,omitempty"`
+ // ShareID uniquely identifies an individual allocation share of the device.
+ ShareID *string `json:"shareID,omitempty"`
+ // Conditions contains the latest observation of the device's state.
+ // If the device has been configured according to the class and claim
+ // config references, the `Ready` condition should be True.
+ //
+ // Must not contain more than 8 entries.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // Data contains arbitrary driver-specific data.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
+ Data *runtime.RawExtension `json:"data,omitempty"`
+ // NetworkData contains network-related information specific to the device.
NetworkData *NetworkDeviceDataApplyConfiguration `json:"networkData,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go
index fb2a78ec..91db54f9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/allocationresult.go
@@ -25,10 +25,20 @@ import (
// AllocationResultApplyConfiguration represents a declarative configuration of the AllocationResult type for use
// with apply.
+//
+// AllocationResult contains attributes of an allocated resource.
type AllocationResultApplyConfiguration struct {
- Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
- NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllocationTimestamp *metav1.Time `json:"allocationTimestamp,omitempty"`
+ // Devices is the result of allocating devices.
+ Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"`
+ // NodeSelector defines where the allocated resources are available. If
+ // unset, they are available everywhere.
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllocationTimestamp stores the time when the resources were allocated.
+ // This field is not guaranteed to be set, in which case that time is unknown.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gate.
+ AllocationTimestamp *metav1.Time `json:"allocationTimestamp,omitempty"`
}
// AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicy.go
index 6d0ed27d..2f83af4a 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicy.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicy.go
@@ -24,10 +24,39 @@ import (
// CapacityRequestPolicyApplyConfiguration represents a declarative configuration of the CapacityRequestPolicy type for use
// with apply.
+//
+// CapacityRequestPolicy defines how requests consume device capacity.
+//
+// Must not set more than one ValidRequestValues.
type CapacityRequestPolicyApplyConfiguration struct {
- Default *resource.Quantity `json:"default,omitempty"`
- ValidValues []resource.Quantity `json:"validValues,omitempty"`
- ValidRange *CapacityRequestPolicyRangeApplyConfiguration `json:"validRange,omitempty"`
+ // Default specifies how much of this capacity is consumed by a request
+ // that does not contain an entry for it in DeviceRequest's Capacity.
+ Default *resource.Quantity `json:"default,omitempty"`
+ // ValidValues defines a set of acceptable quantity values in consuming requests.
+ //
+ // Must not contain more than 10 entries.
+ // Must be sorted in ascending order.
+ //
+ // If this field is set,
+ // Default must be defined and it must be included in ValidValues list.
+ //
+ // If the requested amount does not match any valid value but smaller than some valid values,
+ // the scheduler calculates the smallest valid value that is greater than or equal to the request.
+ // That is: min(ceil(requestedValue) ∈ validValues), where requestedValue ≤ max(validValues).
+ //
+ // If the requested amount exceeds all valid values, the request violates the policy,
+ // and this device cannot be allocated.
+ ValidValues []resource.Quantity `json:"validValues,omitempty"`
+ // ValidRange defines an acceptable quantity value range in consuming requests.
+ //
+ // If this field is set,
+ // Default must be defined and it must fall within the defined ValidRange.
+ //
+ // If the requested amount does not fall within the defined range, the request violates the policy,
+ // and this device cannot be allocated.
+ //
+ // If the request doesn't contain this capacity entry, Default value is used.
+ ValidRange *CapacityRequestPolicyRangeApplyConfiguration `json:"validRange,omitempty"`
}
// CapacityRequestPolicyApplyConfiguration constructs a declarative configuration of the CapacityRequestPolicy type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicyrange.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicyrange.go
index c3728db1..bd9a7342 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicyrange.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequestpolicyrange.go
@@ -24,9 +24,31 @@ import (
// CapacityRequestPolicyRangeApplyConfiguration represents a declarative configuration of the CapacityRequestPolicyRange type for use
// with apply.
+//
+// CapacityRequestPolicyRange defines a valid range for consumable capacity values.
+//
+// - If the requested amount is less than Min, it is rounded up to the Min value.
+// - If Step is set and the requested amount is between Min and Max but not aligned with Step,
+// it will be rounded up to the next value equal to Min + (n * Step).
+// - If Step is not set, the requested amount is used as-is if it falls within the range Min to Max (if set).
+// - If the requested or rounded amount exceeds Max (if set), the request does not satisfy the policy,
+// and the device cannot be allocated.
type CapacityRequestPolicyRangeApplyConfiguration struct {
- Min *resource.Quantity `json:"min,omitempty"`
- Max *resource.Quantity `json:"max,omitempty"`
+ // Min specifies the minimum capacity allowed for a consumption request.
+ //
+ // Min must be greater than or equal to zero,
+ // and less than or equal to the capacity value.
+ // requestPolicy.default must be more than or equal to the minimum.
+ Min *resource.Quantity `json:"min,omitempty"`
+ // Max defines the upper limit for capacity that can be requested.
+ //
+ // Max must be less than or equal to the capacity value.
+ // Min and requestPolicy.default must be less than or equal to the maximum.
+ Max *resource.Quantity `json:"max,omitempty"`
+ // Step defines the step size between valid capacity amounts within the range.
+ //
+ // Max (if set) and requestPolicy.default must be a multiple of Step.
+ // Min + Step must be less than or equal to the capacity value.
Step *resource.Quantity `json:"step,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequirements.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequirements.go
index 57b6f1e2..4b36a08d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequirements.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/capacityrequirements.go
@@ -25,7 +25,31 @@ import (
// CapacityRequirementsApplyConfiguration represents a declarative configuration of the CapacityRequirements type for use
// with apply.
+//
+// CapacityRequirements defines the capacity requirements for a specific device request.
type CapacityRequirementsApplyConfiguration struct {
+ // Requests represent individual device resource requests for distinct resources,
+ // all of which must be provided by the device.
+ //
+ // This value is used as an additional filtering condition against the available capacity on the device.
+ // This is semantically equivalent to a CEL selector with
+ // `device.capacity[]..compareTo(quantity()) >= 0`.
+ // For example, device.capacity['test-driver.cdi.k8s.io'].counters.compareTo(quantity('2')) >= 0.
+ //
+ // When a requestPolicy is defined, the requested amount is adjusted upward
+ // to the nearest valid value based on the policy.
+ // If the requested amount cannot be adjusted to a valid value—because it exceeds what the requestPolicy allows—
+ // the device is considered ineligible for allocation.
+ //
+ // For any capacity that is not explicitly requested:
+ // - If no requestPolicy is set, the default consumed capacity is equal to the full device capacity
+ // (i.e., the whole device is claimed).
+ // - If a requestPolicy is set, the default consumed capacity is determined according to that policy.
+ //
+ // If the device allows multiple allocation,
+ // the aggregated amount across all requests must not exceed the capacity value.
+ // The consumed capacity, which may be adjusted based on the requestPolicy if defined,
+ // is recorded in the resource claim’s status.devices[*].consumedCapacity field.
Requests map[resourcev1beta2.QualifiedName]resource.Quantity `json:"requests,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go
index c2c3e52f..861168b9 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/celdeviceselector.go
@@ -20,7 +20,61 @@ package v1beta2
// CELDeviceSelectorApplyConfiguration represents a declarative configuration of the CELDeviceSelector type for use
// with apply.
+//
+// CELDeviceSelector contains a CEL expression for selecting a device.
type CELDeviceSelectorApplyConfiguration struct {
+ // Expression is a CEL expression which evaluates a single device. It
+ // must evaluate to true when the device under consideration satisfies
+ // the desired criteria, and false when it does not. Any other result
+ // is an error and causes allocation of devices to abort.
+ //
+ // The expression's input is an object named "device", which carries
+ // the following properties:
+ // - driver (string): the name of the driver which defines this device.
+ // - attributes (map[string]object): the device's attributes, grouped by prefix
+ // (e.g. device.attributes["dra.example.com"] evaluates to an object with all
+ // of the attributes which were prefixed by "dra.example.com".
+ // - capacity (map[string]object): the device's capacities, grouped by prefix.
+ // - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device
+ // (v1.34+ with the DRAConsumableCapacity feature enabled).
+ //
+ // Example: Consider a device with driver="dra.example.com", which exposes
+ // two attributes named "model" and "ext.example.com/family" and which
+ // exposes one capacity named "modules". This input to this expression
+ // would have the following fields:
+ //
+ // device.driver
+ // device.attributes["dra.example.com"].model
+ // device.attributes["ext.example.com"].family
+ // device.capacity["dra.example.com"].modules
+ //
+ // The device.driver field can be used to check for a specific driver,
+ // either as a high-level precondition (i.e. you only want to consider
+ // devices from this driver) or as part of a multi-clause expression
+ // that is meant to consider devices from different drivers.
+ //
+ // The value type of each attribute is defined by the device
+ // definition, and users who write these expressions must consult the
+ // documentation for their specific drivers. The value type of each
+ // capacity is Quantity.
+ //
+ // If an unknown prefix is used as a lookup in either device.attributes
+ // or device.capacity, an empty map will be returned. Any reference to
+ // an unknown field will cause an evaluation error and allocation to
+ // abort.
+ //
+ // A robust expression should check for the existence of attributes
+ // before referencing them.
+ //
+ // For ease of use, the cel.bind() function is enabled, and can be used
+ // to simplify expressions that access multiple attributes with the
+ // same domain. For example:
+ //
+ // cel.bind(dra, device.attributes["dra.example.com"], dra.someBool && dra.anotherBool)
+ //
+ // The length of the expression must be smaller or equal to 10 Ki. The
+ // cost of evaluating it is also limited based on the estimated number
+ // of logical steps.
Expression *string `json:"expression,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go
index 4afdb9a3..01578164 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counter.go
@@ -24,7 +24,10 @@ import (
// CounterApplyConfiguration represents a declarative configuration of the Counter type for use
// with apply.
+//
+// Counter describes a quantity associated with a device.
type CounterApplyConfiguration struct {
+ // Value defines how much of a certain device counter is available.
Value *resource.Quantity `json:"value,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go
index 2882b4ef..dfee5b72 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/counterset.go
@@ -20,8 +20,23 @@ package v1beta2
// CounterSetApplyConfiguration represents a declarative configuration of the CounterSet type for use
// with apply.
+//
+// CounterSet defines a named set of counters
+// that are available to be used by devices defined in the
+// ResourcePool.
+//
+// The counters are not allocatable by themselves, but
+// can be referenced by devices. When a device is allocated,
+// the portion of counters it uses will no longer be available for use
+// by other devices.
type CounterSetApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
+ // Name defines the name of the counter set.
+ // It must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Counters defines the set of counters for this CounterSet
+ // The name of each counter must be unique in that set and must be a DNS label.
+ //
+ // The maximum number of counters is 32.
Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go
index 7896a383..607c9f5e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/device.go
@@ -25,19 +25,94 @@ import (
// DeviceApplyConfiguration represents a declarative configuration of the Device type for use
// with apply.
+//
+// Device represents one individual hardware instance that can be selected based
+// on its attributes. Besides the name, exactly one field must be set.
type DeviceApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Attributes map[resourcev1beta2.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
- Capacity map[resourcev1beta2.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
- ConsumesCounters []DeviceCounterConsumptionApplyConfiguration `json:"consumesCounters,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllNodes *bool `json:"allNodes,omitempty"`
- Taints []DeviceTaintApplyConfiguration `json:"taints,omitempty"`
- BindsToNode *bool `json:"bindsToNode,omitempty"`
- BindingConditions []string `json:"bindingConditions,omitempty"`
- BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
- AllowMultipleAllocations *bool `json:"allowMultipleAllocations,omitempty"`
+ // Name is unique identifier among all devices managed by
+ // the driver in the pool. It must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Attributes defines the set of attributes for this device.
+ // The name of each attribute must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ Attributes map[resourcev1beta2.QualifiedName]DeviceAttributeApplyConfiguration `json:"attributes,omitempty"`
+ // Capacity defines the set of capacities for this device.
+ // The name of each capacity must be unique in that set.
+ //
+ // The maximum number of attributes and capacities combined is 32.
+ Capacity map[resourcev1beta2.QualifiedName]DeviceCapacityApplyConfiguration `json:"capacity,omitempty"`
+ // ConsumesCounters defines a list of references to sharedCounters
+ // and the set of counters that the device will
+ // consume from those counter sets.
+ //
+ // There can only be a single entry per counterSet.
+ //
+ // The maximum number of device counter consumptions per
+ // device is 2.
+ ConsumesCounters []DeviceCounterConsumptionApplyConfiguration `json:"consumesCounters,omitempty"`
+ // NodeName identifies the node where the device is available.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ NodeName *string `json:"nodeName,omitempty"`
+ // NodeSelector defines the nodes where the device is available.
+ //
+ // Must use exactly one term.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllNodes indicates that all nodes have access to the device.
+ //
+ // Must only be set if Spec.PerDeviceNodeSelection is set to true.
+ // At most one of NodeName, NodeSelector and AllNodes can be set.
+ AllNodes *bool `json:"allNodes,omitempty"`
+ // If specified, these are the driver-defined taints.
+ //
+ // The maximum number of taints is 16. If taints are set for
+ // any device in a ResourceSlice, then the maximum number of
+ // allowed devices per ResourceSlice is 64 instead of 128.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Taints []DeviceTaintApplyConfiguration `json:"taints,omitempty"`
+ // BindsToNode indicates if the usage of an allocation involving this device
+ // has to be limited to exactly the node that was chosen when allocating the claim.
+ // If set to true, the scheduler will set the ResourceClaim.Status.Allocation.NodeSelector
+ // to match the node where the allocation was made.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindsToNode *bool `json:"bindsToNode,omitempty"`
+ // BindingConditions defines the conditions for proceeding with binding.
+ // All of these conditions must be set in the per-device status
+ // conditions with a value of True to proceed with binding the pod to the node
+ // while scheduling the pod.
+ //
+ // The maximum number of binding conditions is 4.
+ //
+ // The conditions must be a valid condition type string.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingConditions []string `json:"bindingConditions,omitempty"`
+ // BindingFailureConditions defines the conditions for binding failure.
+ // They may be set in the per-device status conditions.
+ // If any is set to "True", a binding failure occurred.
+ //
+ // The maximum number of binding failure conditions is 4.
+ //
+ // The conditions must be a valid condition type string.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
+ // AllowMultipleAllocations marks whether the device is allowed to be allocated to multiple DeviceRequests.
+ //
+ // If AllowMultipleAllocations is set to true, the device can be allocated more than once,
+ // and all of its capacity is consumable, regardless of whether the requestPolicy is defined or not.
+ AllowMultipleAllocations *bool `json:"allowMultipleAllocations,omitempty"`
}
// DeviceApplyConfiguration constructs a declarative configuration of the Device type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go
index 971fe807..cbfb0c1f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationconfiguration.go
@@ -24,9 +24,20 @@ import (
// DeviceAllocationConfigurationApplyConfiguration represents a declarative configuration of the DeviceAllocationConfiguration type for use
// with apply.
+//
+// DeviceAllocationConfiguration gets embedded in an AllocationResult.
type DeviceAllocationConfigurationApplyConfiguration struct {
- Source *resourcev1beta2.AllocationConfigSource `json:"source,omitempty"`
- Requests []string `json:"requests,omitempty"`
+ // Source records whether the configuration comes from a class and thus
+ // is not something that a normal user would have been able to set
+ // or from a claim.
+ Source *resourcev1beta2.AllocationConfigSource `json:"source,omitempty"`
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, its applies to all requests.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
+ Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go
index 5d9f0130..8907ac8d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceallocationresult.go
@@ -20,9 +20,19 @@ package v1beta2
// DeviceAllocationResultApplyConfiguration represents a declarative configuration of the DeviceAllocationResult type for use
// with apply.
+//
+// DeviceAllocationResult is the result of allocating devices.
type DeviceAllocationResultApplyConfiguration struct {
+ // Results lists all allocated devices.
Results []DeviceRequestAllocationResultApplyConfiguration `json:"results,omitempty"`
- Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
+ // This field is a combination of all the claim and class configuration parameters.
+ // Drivers can distinguish between those based on a flag.
+ //
+ // This includes configuration parameters for drivers which have no allocated
+ // devices in the result because it is up to the drivers which configuration
+ // parameters they support. They can silently ignore unknown configuration
+ // parameters.
+ Config []DeviceAllocationConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceAllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go
index c5f88c3f..63c7f205 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceattribute.go
@@ -20,10 +20,17 @@ package v1beta2
// DeviceAttributeApplyConfiguration represents a declarative configuration of the DeviceAttribute type for use
// with apply.
+//
+// DeviceAttribute must have exactly one field set.
type DeviceAttributeApplyConfiguration struct {
- IntValue *int64 `json:"int,omitempty"`
- BoolValue *bool `json:"bool,omitempty"`
- StringValue *string `json:"string,omitempty"`
+ // IntValue is a number.
+ IntValue *int64 `json:"int,omitempty"`
+ // BoolValue is a true/false value.
+ BoolValue *bool `json:"bool,omitempty"`
+ // StringValue is a string. Must not be longer than 64 characters.
+ StringValue *string `json:"string,omitempty"`
+ // VersionValue is a semantic version according to semver.org spec 2.0.0.
+ // Must not be longer than 64 characters.
VersionValue *string `json:"version,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go
index 79a4e125..4e41c8e6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecapacity.go
@@ -24,8 +24,24 @@ import (
// DeviceCapacityApplyConfiguration represents a declarative configuration of the DeviceCapacity type for use
// with apply.
+//
+// DeviceCapacity describes a quantity associated with a device.
type DeviceCapacityApplyConfiguration struct {
- Value *resource.Quantity `json:"value,omitempty"`
+ // Value defines how much of a certain capacity that device has.
+ //
+ // This field reflects the fixed total capacity and does not change.
+ // The consumed amount is tracked separately by scheduler
+ // and does not affect this value.
+ Value *resource.Quantity `json:"value,omitempty"`
+ // RequestPolicy defines how this DeviceCapacity must be consumed
+ // when the device is allowed to be shared by multiple allocations.
+ //
+ // The Device must have allowMultipleAllocations set to true in order to set a requestPolicy.
+ //
+ // If unset, capacity requests are unconstrained:
+ // requests can consume any amount of capacity, as long as the total consumed
+ // across all allocations does not exceed the device's defined capacity.
+ // If request is also unset, default is the full capacity value.
RequestPolicy *CapacityRequestPolicyApplyConfiguration `json:"requestPolicy,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go
index 33af599a..f142c13e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaim.go
@@ -20,10 +20,19 @@ package v1beta2
// DeviceClaimApplyConfiguration represents a declarative configuration of the DeviceClaim type for use
// with apply.
+//
+// DeviceClaim defines how to request devices with a ResourceClaim.
type DeviceClaimApplyConfiguration struct {
- Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
- Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
- Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
+ // Requests represent individual requests for distinct devices which
+ // must all be satisfied. If empty, nothing needs to be allocated.
+ Requests []DeviceRequestApplyConfiguration `json:"requests,omitempty"`
+ // These constraints must be satisfied by the set of devices that get
+ // allocated for the claim.
+ Constraints []DeviceConstraintApplyConfiguration `json:"constraints,omitempty"`
+ // This field holds configuration for multiple potential drivers which
+ // could satisfy requests in this claim. It is ignored while allocating
+ // the claim.
+ Config []DeviceClaimConfigurationApplyConfiguration `json:"config,omitempty"`
}
// DeviceClaimApplyConfiguration constructs a declarative configuration of the DeviceClaim type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go
index 08464b39..59a4f50f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclaimconfiguration.go
@@ -20,7 +20,15 @@ package v1beta2
// DeviceClaimConfigurationApplyConfiguration represents a declarative configuration of the DeviceClaimConfiguration type for use
// with apply.
+//
+// DeviceClaimConfiguration is used for configuration parameters in DeviceClaim.
type DeviceClaimConfigurationApplyConfiguration struct {
+ // Requests lists the names of requests where the configuration applies.
+ // If empty, it applies to all requests.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the configuration applies to all subrequests.
Requests []string `json:"requests,omitempty"`
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go
index 39cac115..439e03c6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclass.go
@@ -29,10 +29,27 @@ import (
// DeviceClassApplyConfiguration represents a declarative configuration of the DeviceClass type for use
// with apply.
+//
+// DeviceClass is a vendor- or admin-provided resource that contains
+// device configuration and selectors. It can be referenced in
+// the device requests of a claim to apply these presets.
+// Cluster scoped.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type DeviceClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec defines what can be allocated and how to configure it.
+ //
+ // This is mutable. Consumers have to be prepared for classes changing
+ // at any time, either because they get updated or replaced. Claim
+ // allocations are done once based on whatever was set in classes at
+ // the time of allocation.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *DeviceClassSpecApplyConfiguration `json:"spec,omitempty"`
}
// DeviceClass constructs a declarative configuration of the DeviceClass type for use with
@@ -45,29 +62,14 @@ func DeviceClass(name string) *DeviceClassApplyConfiguration {
return b
}
-// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
-// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
-// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractDeviceClassFrom extracts the applied configuration owned by fieldManager from
+// deviceClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
-// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractDeviceClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractDeviceClass(deviceClass *resourcev1beta2.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
- return extractDeviceClass(deviceClass, fieldManager, "")
-}
-
-// ExtractDeviceClassStatus is the same as ExtractDeviceClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractDeviceClassStatus(deviceClass *resourcev1beta2.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
- return extractDeviceClass(deviceClass, fieldManager, "status")
-}
-
-func extractDeviceClass(deviceClass *resourcev1beta2.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
+func ExtractDeviceClassFrom(deviceClass *resourcev1beta2.DeviceClass, fieldManager string, subresource string) (*DeviceClassApplyConfiguration, error) {
b := &DeviceClassApplyConfiguration{}
err := managedfields.ExtractInto(deviceClass, internal.Parser().Type("io.k8s.api.resource.v1beta2.DeviceClass"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +81,21 @@ func extractDeviceClass(deviceClass *resourcev1beta2.DeviceClass, fieldManager s
b.WithAPIVersion("resource.k8s.io/v1beta2")
return b, nil
}
+
+// ExtractDeviceClass extracts the applied configuration owned by fieldManager from
+// deviceClass. If no managedFields are found in deviceClass for fieldManager, a
+// DeviceClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// deviceClass must be a unmodified DeviceClass API object that was retrieved from the Kubernetes API.
+// ExtractDeviceClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractDeviceClass(deviceClass *resourcev1beta2.DeviceClass, fieldManager string) (*DeviceClassApplyConfiguration, error) {
+ return ExtractDeviceClassFrom(deviceClass, fieldManager, "")
+}
+
func (b DeviceClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go
index 90441028..f668fcf8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassconfiguration.go
@@ -20,6 +20,8 @@ package v1beta2
// DeviceClassConfigurationApplyConfiguration represents a declarative configuration of the DeviceClassConfiguration type for use
// with apply.
+//
+// DeviceClassConfiguration is used in DeviceClass.
type DeviceClassConfigurationApplyConfiguration struct {
DeviceConfigurationApplyConfiguration `json:",inline"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go
index b181fd68..e270c590 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceclassspec.go
@@ -20,10 +20,29 @@ package v1beta2
// DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use
// with apply.
+//
+// DeviceClassSpec is used in a [DeviceClass] to define what can be allocated
+// and how to configure it.
type DeviceClassSpecApplyConfiguration struct {
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
- ExtendedResourceName *string `json:"extendedResourceName,omitempty"`
+ // Each selector must be satisfied by a device which is claimed via this class.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // Config defines configuration parameters that apply to each device that is claimed via this class.
+ // Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor
+ // configuration applies to exactly one driver.
+ //
+ // They are passed to the driver, but are not considered while allocating the claim.
+ Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"`
+ // ExtendedResourceName is the extended resource name for the devices of this class.
+ // The devices of this class can be used to satisfy a pod's extended resource requests.
+ // It has the same format as the name of a pod's extended resource.
+ // It should be unique among all the device classes in a cluster.
+ // If two device classes have the same name, then the class created later
+ // is picked to satisfy a pod's extended resource requests.
+ // If two classes are created at the same time, then the name of the class
+ // lexicographically sorted first is picked.
+ //
+ // This is an alpha field.
+ ExtendedResourceName *string `json:"extendedResourceName,omitempty"`
}
// DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go
index 2032433f..9d79fe23 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconfiguration.go
@@ -20,7 +20,12 @@ package v1beta2
// DeviceConfigurationApplyConfiguration represents a declarative configuration of the DeviceConfiguration type for use
// with apply.
+//
+// DeviceConfiguration must have exactly one field set. It gets embedded
+// inline in some other structs which have other fields, so field names must
+// not conflict with those.
type DeviceConfigurationApplyConfiguration struct {
+ // Opaque provides driver-specific configuration parameters.
Opaque *OpaqueDeviceConfigurationApplyConfiguration `json:"opaque,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go
index dd23cd22..81a682c8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceconstraint.go
@@ -24,9 +24,42 @@ import (
// DeviceConstraintApplyConfiguration represents a declarative configuration of the DeviceConstraint type for use
// with apply.
+//
+// DeviceConstraint must have exactly one field set besides Requests.
type DeviceConstraintApplyConfiguration struct {
- Requests []string `json:"requests,omitempty"`
- MatchAttribute *resourcev1beta2.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ // Requests is a list of the one or more requests in this claim which
+ // must co-satisfy this constraint. If a request is fulfilled by
+ // multiple devices, then all of the devices must satisfy the
+ // constraint. If this is not specified, this constraint applies to all
+ // requests in this claim.
+ //
+ // References to subrequests must include the name of the main request
+ // and may include the subrequest using the format [/]. If just
+ // the main request is given, the constraint applies to all subrequests.
+ Requests []string `json:"requests,omitempty"`
+ // MatchAttribute requires that all devices in question have this
+ // attribute and that its type and value are the same across those
+ // devices.
+ //
+ // For example, if you specified "dra.example.com/numa" (a hypothetical example!),
+ // then only devices in the same NUMA node will be chosen. A device which
+ // does not have that attribute will not be chosen. All devices should
+ // use a value of the same type for this attribute because that is part of
+ // its specification, but if one device doesn't, then it also will not be
+ // chosen.
+ //
+ // Must include the domain qualifier.
+ MatchAttribute *resourcev1beta2.FullyQualifiedName `json:"matchAttribute,omitempty"`
+ // DistinctAttribute requires that all devices in question have this
+ // attribute and that its type and value are unique across those devices.
+ //
+ // This acts as the inverse of MatchAttribute.
+ //
+ // This constraint is used to avoid allocating multiple requests to the same device
+ // by ensuring attribute-level differentiation.
+ //
+ // This is useful for scenarios where resource requests must be fulfilled by separate physical devices.
+ // For example, a container requests two network interfaces that must be allocated from two different physical NICs.
DistinctAttribute *resourcev1beta2.FullyQualifiedName `json:"distinctAttribute,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go
index 9d6d0a87..f636ccdb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicecounterconsumption.go
@@ -20,9 +20,17 @@ package v1beta2
// DeviceCounterConsumptionApplyConfiguration represents a declarative configuration of the DeviceCounterConsumption type for use
// with apply.
+//
+// DeviceCounterConsumption defines a set of counters that
+// a device will consume from a CounterSet.
type DeviceCounterConsumptionApplyConfiguration struct {
- CounterSet *string `json:"counterSet,omitempty"`
- Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
+ // CounterSet is the name of the set from which the
+ // counters defined will be consumed.
+ CounterSet *string `json:"counterSet,omitempty"`
+ // Counters defines the counters that will be consumed by the device.
+ //
+ // The maximum number of counters is 32.
+ Counters map[string]CounterApplyConfiguration `json:"counters,omitempty"`
}
// DeviceCounterConsumptionApplyConfiguration constructs a declarative configuration of the DeviceCounterConsumption type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go
index 426c9748..c2e9ef20 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequest.go
@@ -20,10 +20,42 @@ package v1beta2
// DeviceRequestApplyConfiguration represents a declarative configuration of the DeviceRequest type for use
// with apply.
+//
+// DeviceRequest is a request for devices required for a claim.
+// This is typically a request for a single resource like a device, but can
+// also ask for several identical devices. With FirstAvailable it is also
+// possible to provide a prioritized list of requests.
type DeviceRequestApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Exactly *ExactDeviceRequestApplyConfiguration `json:"exactly,omitempty"`
- FirstAvailable []DeviceSubRequestApplyConfiguration `json:"firstAvailable,omitempty"`
+ // Name can be used to reference this request in a pod.spec.containers[].resources.claims
+ // entry and in a constraint of the claim.
+ //
+ // References using the name in the DeviceRequest will uniquely
+ // identify a request when the Exactly field is set. When the
+ // FirstAvailable field is set, a reference to the name of the
+ // DeviceRequest will match whatever subrequest is chosen by the
+ // scheduler.
+ //
+ // Must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // Exactly specifies the details for a single request that must
+ // be met exactly for the request to be satisfied.
+ //
+ // One of Exactly or FirstAvailable must be set.
+ Exactly *ExactDeviceRequestApplyConfiguration `json:"exactly,omitempty"`
+ // FirstAvailable contains subrequests, of which exactly one will be
+ // selected by the scheduler. It tries to
+ // satisfy them in the order in which they are listed here. So if
+ // there are two entries in the list, the scheduler will only check
+ // the second one if it determines that the first one can not be used.
+ //
+ // DRA does not yet implement scoring, so the scheduler will
+ // select the first set of devices that satisfies all the
+ // requests in the claim. And if the requirements can
+ // be satisfied on more than one node, other scheduling features
+ // will determine which node is chosen. This means that the set of
+ // devices allocated to a claim might not be the optimal set
+ // available to the cluster. Scoring will be implemented later.
+ FirstAvailable []DeviceSubRequestApplyConfiguration `json:"firstAvailable,omitempty"`
}
// DeviceRequestApplyConfiguration constructs a declarative configuration of the DeviceRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go
index 202fca5d..c1fd0266 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicerequestallocationresult.go
@@ -26,17 +26,75 @@ import (
// DeviceRequestAllocationResultApplyConfiguration represents a declarative configuration of the DeviceRequestAllocationResult type for use
// with apply.
+//
+// DeviceRequestAllocationResult contains the allocation result for one request.
type DeviceRequestAllocationResultApplyConfiguration struct {
- Request *string `json:"request,omitempty"`
- Driver *string `json:"driver,omitempty"`
- Pool *string `json:"pool,omitempty"`
- Device *string `json:"device,omitempty"`
- AdminAccess *bool `json:"adminAccess,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- BindingConditions []string `json:"bindingConditions,omitempty"`
- BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
- ShareID *types.UID `json:"shareID,omitempty"`
- ConsumedCapacity map[resourcev1beta2.QualifiedName]resource.Quantity `json:"consumedCapacity,omitempty"`
+ // Request is the name of the request in the claim which caused this
+ // device to be allocated. If it references a subrequest in the
+ // firstAvailable list on a DeviceRequest, this field must
+ // include both the name of the main request and the subrequest
+ // using the format /.
+ //
+ // Multiple devices may have been allocated per request.
+ Request *string `json:"request,omitempty"`
+ // Driver specifies the name of the DRA driver whose kubelet
+ // plugin should be invoked to process the allocation once the claim is
+ // needed on a node.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // This name together with the driver name and the device name field
+ // identify which device was allocated (`//`).
+ //
+ // Must not be longer than 253 characters and may contain one or more
+ // DNS sub-domains separated by slashes.
+ Pool *string `json:"pool,omitempty"`
+ // Device references one device instance via its name in the driver's
+ // resource pool. It must be a DNS label.
+ Device *string `json:"device,omitempty"`
+ // AdminAccess indicates that this device was allocated for
+ // administrative access. See the corresponding request field
+ // for a definition of mode.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+ // A copy of all tolerations specified in the request at the time
+ // when the device got allocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // BindingConditions contains a copy of the BindingConditions
+ // from the corresponding ResourceSlice at the time of allocation.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingConditions []string `json:"bindingConditions,omitempty"`
+ // BindingFailureConditions contains a copy of the BindingFailureConditions
+ // from the corresponding ResourceSlice at the time of allocation.
+ //
+ // This is an alpha field and requires enabling the DRADeviceBindingConditions and DRAResourceClaimDeviceStatus
+ // feature gates.
+ BindingFailureConditions []string `json:"bindingFailureConditions,omitempty"`
+ // ShareID uniquely identifies an individual allocation share of the device,
+ // used when the device supports multiple simultaneous allocations.
+ // It serves as an additional map key to differentiate concurrent shares
+ // of the same device.
+ ShareID *types.UID `json:"shareID,omitempty"`
+ // ConsumedCapacity tracks the amount of capacity consumed per device as part of the claim request.
+ // The consumed amount may differ from the requested amount: it is rounded up to the nearest valid
+ // value based on the device’s requestPolicy if applicable (i.e., may not be less than the requested amount).
+ //
+ // The total consumed capacity for each device must not exceed the DeviceCapacity's Value.
+ //
+ // This field is populated only for devices that allow multiple allocations.
+ // All capacity entries are included, even if the consumed amount is zero.
+ ConsumedCapacity map[resourcev1beta2.QualifiedName]resource.Quantity `json:"consumedCapacity,omitempty"`
}
// DeviceRequestAllocationResultApplyConfiguration constructs a declarative configuration of the DeviceRequestAllocationResult type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go
index fd064e5f..aa4b9312 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/deviceselector.go
@@ -20,7 +20,10 @@ package v1beta2
// DeviceSelectorApplyConfiguration represents a declarative configuration of the DeviceSelector type for use
// with apply.
+//
+// DeviceSelector must have exactly one field set.
type DeviceSelectorApplyConfiguration struct {
+ // CEL contains a CEL expression for selecting a device.
CEL *CELDeviceSelectorApplyConfiguration `json:"cel,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go
index 1ebd716d..79345cf6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicesubrequest.go
@@ -24,14 +24,91 @@ import (
// DeviceSubRequestApplyConfiguration represents a declarative configuration of the DeviceSubRequest type for use
// with apply.
+//
+// DeviceSubRequest describes a request for device provided in the
+// claim.spec.devices.requests[].firstAvailable array. Each
+// is typically a request for a single resource like a device, but can
+// also ask for several identical devices.
+//
+// DeviceSubRequest is similar to ExactDeviceRequest, but doesn't expose the
+// AdminAccess field as that one is only supported when requesting a
+// specific device.
type DeviceSubRequestApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- AllocationMode *resourcev1beta2.DeviceAllocationMode `json:"allocationMode,omitempty"`
- Count *int64 `json:"count,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
+ // Name can be used to reference this subrequest in the list of constraints
+ // or the list of configurations for the claim. References must use the
+ // format /.
+ //
+ // Must be a DNS label.
+ Name *string `json:"name,omitempty"`
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // subrequest.
+ //
+ // A class is required. Which classes are available depends on the cluster.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // subrequest. All selectors must be satisfied for a device to be
+ // considered.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this subrequest. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This subrequest is for all of the matching devices in a pool.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other subrequests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ AllocationMode *resourcev1beta2.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ Count *int64 `json:"count,omitempty"`
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // Capacity define resource requirements against each capacity.
+ //
+ // If this field is unset and the device supports multiple allocations,
+ // the default value will be applied to each capacity according to requestPolicy.
+ // For the capacity that has no requestPolicy, default is the full capacity value.
+ //
+ // Applies to each device allocation.
+ // If Count > 1,
+ // the request fails if there aren't enough devices that meet the requirements.
+ // If AllocationMode is set to All,
+ // the request fails if there are devices that otherwise match the request,
+ // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request.
+ Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
}
// DeviceSubRequestApplyConfiguration constructs a declarative configuration of the DeviceSubRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go
index b21f98a1..8049f52c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetaint.go
@@ -25,11 +25,27 @@ import (
// DeviceTaintApplyConfiguration represents a declarative configuration of the DeviceTaint type for use
// with apply.
+//
+// The device this taint is attached to has the "effect" on
+// any claim which does not tolerate the taint and, through the claim,
+// to pods using the claim.
type DeviceTaintApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1beta2.DeviceTaintEffect `json:"effect,omitempty"`
- TimeAdded *v1.Time `json:"timeAdded,omitempty"`
+ // The taint key to be applied to a device.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // The taint value corresponding to the taint key.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // The effect of the taint on claims that do not tolerate the taint
+ // and through such claims on the pods using them.
+ //
+ // Valid effects are None, NoSchedule and NoExecute. PreferNoSchedule as used for
+ // nodes is not valid here. More effects may get added in the future.
+ // Consumers must treat unknown effects like None.
+ Effect *resourcev1beta2.DeviceTaintEffect `json:"effect,omitempty"`
+ // TimeAdded represents the time at which the taint was added.
+ // Added automatically during create or update if not set.
+ TimeAdded *v1.Time `json:"timeAdded,omitempty"`
}
// DeviceTaintApplyConfiguration constructs a declarative configuration of the DeviceTaint type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go
index ae471233..988d7f89 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/devicetoleration.go
@@ -24,12 +24,33 @@ import (
// DeviceTolerationApplyConfiguration represents a declarative configuration of the DeviceToleration type for use
// with apply.
+//
+// The ResourceClaim this DeviceToleration is attached to tolerates any taint that matches
+// the triple using the matching operator .
type DeviceTolerationApplyConfiguration struct {
- Key *string `json:"key,omitempty"`
- Operator *resourcev1beta2.DeviceTolerationOperator `json:"operator,omitempty"`
- Value *string `json:"value,omitempty"`
- Effect *resourcev1beta2.DeviceTaintEffect `json:"effect,omitempty"`
- TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
+ // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ // Must be a label name.
+ Key *string `json:"key,omitempty"`
+ // Operator represents a key's relationship to the value.
+ // Valid operators are Exists and Equal. Defaults to Equal.
+ // Exists is equivalent to wildcard for value, so that a ResourceClaim can
+ // tolerate all taints of a particular category.
+ Operator *resourcev1beta2.DeviceTolerationOperator `json:"operator,omitempty"`
+ // Value is the taint value the toleration matches to.
+ // If the operator is Exists, the value must be empty, otherwise just a regular string.
+ // Must be a label value.
+ Value *string `json:"value,omitempty"`
+ // Effect indicates the taint effect to match. Empty means match all taint effects.
+ // When specified, allowed values are NoSchedule and NoExecute.
+ Effect *resourcev1beta2.DeviceTaintEffect `json:"effect,omitempty"`
+ // TolerationSeconds represents the period of time the toleration (which must be
+ // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ // it is not set, which means tolerate the taint forever (do not evict). Zero and
+ // negative values will be treated as 0 (evict immediately) by the system.
+ // If larger than zero, the time when the pod needs to be evicted is calculated as + .
+ TolerationSeconds *int64 `json:"tolerationSeconds,omitempty"`
}
// DeviceTolerationApplyConfiguration constructs a declarative configuration of the DeviceToleration type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go
index 1f0d6b41..e4c5d599 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/exactdevicerequest.go
@@ -24,14 +24,89 @@ import (
// ExactDeviceRequestApplyConfiguration represents a declarative configuration of the ExactDeviceRequest type for use
// with apply.
+//
+// ExactDeviceRequest is a request for one or more identical devices.
type ExactDeviceRequestApplyConfiguration struct {
- DeviceClassName *string `json:"deviceClassName,omitempty"`
- Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
- AllocationMode *resourcev1beta2.DeviceAllocationMode `json:"allocationMode,omitempty"`
- Count *int64 `json:"count,omitempty"`
- AdminAccess *bool `json:"adminAccess,omitempty"`
- Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
- Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
+ // DeviceClassName references a specific DeviceClass, which can define
+ // additional configuration and selectors to be inherited by this
+ // request.
+ //
+ // A DeviceClassName is required.
+ //
+ // Administrators may use this to restrict which devices may get
+ // requested by only installing classes with selectors for permitted
+ // devices. If users are free to request anything without restrictions,
+ // then administrators can create an empty DeviceClass for users
+ // to reference.
+ DeviceClassName *string `json:"deviceClassName,omitempty"`
+ // Selectors define criteria which must be satisfied by a specific
+ // device in order for that device to be considered for this
+ // request. All selectors must be satisfied for a device to be
+ // considered.
+ Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"`
+ // AllocationMode and its related fields define how devices are allocated
+ // to satisfy this request. Supported values are:
+ //
+ // - ExactCount: This request is for a specific number of devices.
+ // This is the default. The exact number is provided in the
+ // count field.
+ //
+ // - All: This request is for all of the matching devices in a pool.
+ // At least one device must exist on the node for the allocation to succeed.
+ // Allocation will fail if some devices are already allocated,
+ // unless adminAccess is requested.
+ //
+ // If AllocationMode is not specified, the default mode is ExactCount. If
+ // the mode is ExactCount and count is not specified, the default count is
+ // one. Any other requests must specify this field.
+ //
+ // More modes may get added in the future. Clients must refuse to handle
+ // requests with unknown modes.
+ AllocationMode *resourcev1beta2.DeviceAllocationMode `json:"allocationMode,omitempty"`
+ // Count is used only when the count mode is "ExactCount". Must be greater than zero.
+ // If AllocationMode is ExactCount and this field is not specified, the default is one.
+ Count *int64 `json:"count,omitempty"`
+ // AdminAccess indicates that this is a claim for administrative access
+ // to the device(s). Claims with AdminAccess are expected to be used for
+ // monitoring or other management services for a device. They ignore
+ // all ordinary claims to the device with respect to access modes and
+ // any resource allocations.
+ //
+ // This is an alpha field and requires enabling the DRAAdminAccess
+ // feature gate. Admin access is disabled if this field is unset or
+ // set to false, otherwise it is enabled.
+ AdminAccess *bool `json:"adminAccess,omitempty"`
+ // If specified, the request's tolerations.
+ //
+ // Tolerations for NoSchedule are required to allocate a
+ // device which has a taint with that effect. The same applies
+ // to NoExecute.
+ //
+ // In addition, should any of the allocated devices get tainted
+ // with NoExecute after allocation and that effect is not tolerated,
+ // then all pods consuming the ResourceClaim get deleted to evict
+ // them. The scheduler will not let new pods reserve the claim while
+ // it has these tainted devices. Once all pods are evicted, the
+ // claim will get deallocated.
+ //
+ // The maximum number of tolerations is 16.
+ //
+ // This is an alpha field and requires enabling the DRADeviceTaints
+ // feature gate.
+ Tolerations []DeviceTolerationApplyConfiguration `json:"tolerations,omitempty"`
+ // Capacity define resource requirements against each capacity.
+ //
+ // If this field is unset and the device supports multiple allocations,
+ // the default value will be applied to each capacity according to requestPolicy.
+ // For the capacity that has no requestPolicy, default is the full capacity value.
+ //
+ // Applies to each device allocation.
+ // If Count > 1,
+ // the request fails if there aren't enough devices that meet the requirements.
+ // If AllocationMode is set to All,
+ // the request fails if there are devices that otherwise match the request,
+ // and have this capacity, with a value >= the requested amount, but which cannot be allocated to this request.
+ Capacity *CapacityRequirementsApplyConfiguration `json:"capacity,omitempty"`
}
// ExactDeviceRequestApplyConfiguration constructs a declarative configuration of the ExactDeviceRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go
index 9b0944f8..96217032 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/networkdevicedata.go
@@ -20,10 +20,27 @@ package v1beta2
// NetworkDeviceDataApplyConfiguration represents a declarative configuration of the NetworkDeviceData type for use
// with apply.
+//
+// NetworkDeviceData provides network-related details for the allocated device.
+// This information may be filled by drivers or other components to configure
+// or identify the device within a network context.
type NetworkDeviceDataApplyConfiguration struct {
- InterfaceName *string `json:"interfaceName,omitempty"`
- IPs []string `json:"ips,omitempty"`
- HardwareAddress *string `json:"hardwareAddress,omitempty"`
+ // InterfaceName specifies the name of the network interface associated with
+ // the allocated device. This might be the name of a physical or virtual
+ // network interface being configured in the pod.
+ //
+ // Must not be longer than 256 characters.
+ InterfaceName *string `json:"interfaceName,omitempty"`
+ // IPs lists the network addresses assigned to the device's network interface.
+ // This can include both IPv4 and IPv6 addresses.
+ // The IPs are in the CIDR notation, which includes both the address and the
+ // associated subnet mask.
+ // e.g.: "192.0.2.5/24" for IPv4 and "2001:db8::5/64" for IPv6.
+ IPs []string `json:"ips,omitempty"`
+ // HardwareAddress represents the hardware address (e.g. MAC Address) of the device's network interface.
+ //
+ // Must not be longer than 128 characters.
+ HardwareAddress *string `json:"hardwareAddress,omitempty"`
}
// NetworkDeviceDataApplyConfiguration constructs a declarative configuration of the NetworkDeviceData type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go
index aa8fe43f..d4ac4567 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/opaquedeviceconfiguration.go
@@ -24,8 +24,25 @@ import (
// OpaqueDeviceConfigurationApplyConfiguration represents a declarative configuration of the OpaqueDeviceConfiguration type for use
// with apply.
+//
+// OpaqueDeviceConfiguration contains configuration parameters for a driver
+// in a format defined by the driver vendor.
type OpaqueDeviceConfigurationApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
+ // Driver is used to determine which kubelet plugin needs
+ // to be passed these configuration parameters.
+ //
+ // An admission policy provided by the driver developer could use this
+ // to decide whether it needs to validate them.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ Driver *string `json:"driver,omitempty"`
+ // Parameters can contain arbitrary data. It is the responsibility of
+ // the driver developer to handle validation and versioning. Typically this
+ // includes self-identification and a version ("kind" + "apiVersion" for
+ // Kubernetes types), with conversion between different versions.
+ //
+ // The length of the raw data must be smaller or equal to 10 Ki.
Parameters *runtime.RawExtension `json:"parameters,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go
index 0d8d59db..e35d087c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaim.go
@@ -29,11 +29,24 @@ import (
// ResourceClaimApplyConfiguration represents a declarative configuration of the ResourceClaim type for use
// with apply.
+//
+// ResourceClaim describes a request for access to resources in the cluster,
+// for use by workloads. For example, if a workload needs an accelerator device
+// with specific properties, this is how that request is expressed. The status
+// stanza tracks whether this claim has been satisfied and what specific
+// resources have been allocated.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceClaimApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
- Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
+ // Spec describes what is being requested and how to configure it.
+ // The spec is immutable.
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status describes whether the claim is ready to use and what has been allocated.
+ Status *ResourceClaimStatusApplyConfiguration `json:"status,omitempty"`
}
// ResourceClaim constructs a declarative configuration of the ResourceClaim type for use with
@@ -47,6 +60,27 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
return b
}
+// ExtractResourceClaimFrom extracts the applied configuration owned by fieldManager from
+// resourceClaim for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// resourceClaim must be a unmodified ResourceClaim API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceClaimFrom(resourceClaim *resourcev1beta2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
+ b := &ResourceClaimApplyConfiguration{}
+ err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta2.ResourceClaim"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(resourceClaim.Name)
+ b.WithNamespace(resourceClaim.Namespace)
+
+ b.WithKind("ResourceClaim")
+ b.WithAPIVersion("resource.k8s.io/v1beta2")
+ return b, nil
+}
+
// ExtractResourceClaim extracts the applied configuration owned by fieldManager from
// resourceClaim. If no managedFields are found in resourceClaim for fieldManager, a
// ResourceClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -57,31 +91,16 @@ func ResourceClaim(name, namespace string) *ResourceClaimApplyConfiguration {
// ExtractResourceClaim provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractResourceClaim(resourceClaim *resourcev1beta2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
- return extractResourceClaim(resourceClaim, fieldManager, "")
+ return ExtractResourceClaimFrom(resourceClaim, fieldManager, "")
}
-// ExtractResourceClaimStatus is the same as ExtractResourceClaim except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractResourceClaimStatus extracts the applied configuration owned by fieldManager from
+// resourceClaim for the status subresource.
func ExtractResourceClaimStatus(resourceClaim *resourcev1beta2.ResourceClaim, fieldManager string) (*ResourceClaimApplyConfiguration, error) {
- return extractResourceClaim(resourceClaim, fieldManager, "status")
+ return ExtractResourceClaimFrom(resourceClaim, fieldManager, "status")
}
-func extractResourceClaim(resourceClaim *resourcev1beta2.ResourceClaim, fieldManager string, subresource string) (*ResourceClaimApplyConfiguration, error) {
- b := &ResourceClaimApplyConfiguration{}
- err := managedfields.ExtractInto(resourceClaim, internal.Parser().Type("io.k8s.api.resource.v1beta2.ResourceClaim"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(resourceClaim.Name)
- b.WithNamespace(resourceClaim.Namespace)
-
- b.WithKind("ResourceClaim")
- b.WithAPIVersion("resource.k8s.io/v1beta2")
- return b, nil
-}
func (b ResourceClaimApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go
index b7824e85..08d973e2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimconsumerreference.go
@@ -24,11 +24,21 @@ import (
// ResourceClaimConsumerReferenceApplyConfiguration represents a declarative configuration of the ResourceClaimConsumerReference type for use
// with apply.
+//
+// ResourceClaimConsumerReference contains enough information to let you
+// locate the consumer of a ResourceClaim. The user must be a resource in the same
+// namespace as the ResourceClaim.
type ResourceClaimConsumerReferenceApplyConfiguration struct {
- APIGroup *string `json:"apiGroup,omitempty"`
- Resource *string `json:"resource,omitempty"`
- Name *string `json:"name,omitempty"`
- UID *types.UID `json:"uid,omitempty"`
+ // APIGroup is the group for the resource being referenced. It is
+ // empty for the core API. This matches the group in the APIVersion
+ // that is used when creating the resources.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Resource is the type of resource being referenced, for example "pods".
+ Resource *string `json:"resource,omitempty"`
+ // Name is the name of resource being referenced.
+ Name *string `json:"name,omitempty"`
+ // UID identifies exactly one incarnation of the resource.
+ UID *types.UID `json:"uid,omitempty"`
}
// ResourceClaimConsumerReferenceApplyConfiguration constructs a declarative configuration of the ResourceClaimConsumerReference type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go
index e1fce171..7057dd71 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimspec.go
@@ -20,7 +20,10 @@ package v1beta2
// ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use
// with apply.
+//
+// ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.
type ResourceClaimSpecApplyConfiguration struct {
+ // Devices defines how to request devices.
Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go
index a3e7ae25..87d76e09 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimstatus.go
@@ -20,10 +20,36 @@ package v1beta2
// ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use
// with apply.
+//
+// ResourceClaimStatus tracks whether the resource has been allocated and what
+// the result of that was.
type ResourceClaimStatusApplyConfiguration struct {
- Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ // Allocation is set once the claim has been allocated successfully.
+ Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"`
+ // ReservedFor indicates which entities are currently allowed to use
+ // the claim. A Pod which references a ResourceClaim which is not
+ // reserved for that Pod will not be started. A claim that is in
+ // use or might be in use because it has been reserved must not get
+ // deallocated.
+ //
+ // In a cluster with multiple scheduler instances, two pods might get
+ // scheduled concurrently by different schedulers. When they reference
+ // the same ResourceClaim which already has reached its maximum number
+ // of consumers, only one pod can be scheduled.
+ //
+ // Both schedulers try to add their pod to the claim.status.reservedFor
+ // field, but only the update that reaches the API server first gets
+ // stored. The other one fails with an error and the scheduler
+ // which issued it knows that it must put the pod back into the queue,
+ // waiting for the ResourceClaim to become usable again.
+ //
+ // There can be at most 256 such reservations. This may get increased in
+ // the future, but not reduced.
ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"`
- Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
+ // Devices contains the status of each device allocated for this
+ // claim, as reported by the driver. This can include driver-specific
+ // information. Entries are owned by their respective drivers.
+ Devices []AllocatedDeviceStatusApplyConfiguration `json:"devices,omitempty"`
}
// ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go
index 2e79c664..999521ce 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplate.go
@@ -29,10 +29,21 @@ import (
// ResourceClaimTemplateApplyConfiguration represents a declarative configuration of the ResourceClaimTemplate type for use
// with apply.
+//
+// ResourceClaimTemplate is used to produce ResourceClaim objects.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceClaimTemplateApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
+ // Describes the ResourceClaim that is to be generated.
+ //
+ // This field is immutable. A ResourceClaim will get created by the
+ // control plane for a Pod when needed and then not get updated
+ // anymore.
+ Spec *ResourceClaimTemplateSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceClaimTemplate constructs a declarative configuration of the ResourceClaimTemplate type for use with
@@ -46,29 +57,14 @@ func ResourceClaimTemplate(name, namespace string) *ResourceClaimTemplateApplyCo
return b
}
-// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
-// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
-// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractResourceClaimTemplateFrom extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
-// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractResourceClaimTemplateFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
- return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "")
-}
-
-// ExtractResourceClaimTemplateStatus is the same as ExtractResourceClaimTemplate except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractResourceClaimTemplateStatus(resourceClaimTemplate *resourcev1beta2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
- return extractResourceClaimTemplate(resourceClaimTemplate, fieldManager, "status")
-}
-
-func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
+func ExtractResourceClaimTemplateFrom(resourceClaimTemplate *resourcev1beta2.ResourceClaimTemplate, fieldManager string, subresource string) (*ResourceClaimTemplateApplyConfiguration, error) {
b := &ResourceClaimTemplateApplyConfiguration{}
err := managedfields.ExtractInto(resourceClaimTemplate, internal.Parser().Type("io.k8s.api.resource.v1beta2.ResourceClaimTemplate"), fieldManager, b, subresource)
if err != nil {
@@ -81,6 +77,21 @@ func extractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta2.Resourc
b.WithAPIVersion("resource.k8s.io/v1beta2")
return b, nil
}
+
+// ExtractResourceClaimTemplate extracts the applied configuration owned by fieldManager from
+// resourceClaimTemplate. If no managedFields are found in resourceClaimTemplate for fieldManager, a
+// ResourceClaimTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceClaimTemplate must be a unmodified ResourceClaimTemplate API object that was retrieved from the Kubernetes API.
+// ExtractResourceClaimTemplate provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceClaimTemplate(resourceClaimTemplate *resourcev1beta2.ResourceClaimTemplate, fieldManager string) (*ResourceClaimTemplateApplyConfiguration, error) {
+ return ExtractResourceClaimTemplateFrom(resourceClaimTemplate, fieldManager, "")
+}
+
func (b ResourceClaimTemplateApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go
index 7868d1dd..da19bf36 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceclaimtemplatespec.go
@@ -26,9 +26,17 @@ import (
// ResourceClaimTemplateSpecApplyConfiguration represents a declarative configuration of the ResourceClaimTemplateSpec type for use
// with apply.
+//
+// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
type ResourceClaimTemplateSpecApplyConfiguration struct {
+ // ObjectMeta may contain labels and annotations that will be copied into the ResourceClaim
+ // when creating it. No other fields are allowed and will be rejected during
+ // validation.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
+ // Spec for the ResourceClaim. The entire content is copied unchanged
+ // into the ResourceClaim that gets created from this template. The
+ // same fields as in a ResourceClaim are also valid here.
+ Spec *ResourceClaimSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceClaimTemplateSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimTemplateSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go
index 6923085d..f5b0c9b6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourcepool.go
@@ -20,10 +20,34 @@ package v1beta2
// ResourcePoolApplyConfiguration represents a declarative configuration of the ResourcePool type for use
// with apply.
+//
+// ResourcePool describes the pool that ResourceSlices belong to.
type ResourcePoolApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- Generation *int64 `json:"generation,omitempty"`
- ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
+ // Name is used to identify the pool. For node-local devices, this
+ // is often the node name, but this is not required.
+ //
+ // It must not be longer than 253 characters and must consist of one or more DNS sub-domains
+ // separated by slashes. This field is immutable.
+ Name *string `json:"name,omitempty"`
+ // Generation tracks the change in a pool over time. Whenever a driver
+ // changes something about one or more of the resources in a pool, it
+ // must change the generation in all ResourceSlices which are part of
+ // that pool. Consumers of ResourceSlices should only consider
+ // resources from the pool with the highest generation number. The
+ // generation may be reset by drivers, which should be fine for
+ // consumers, assuming that all ResourceSlices in a pool are updated to
+ // match or deleted.
+ //
+ // Combined with ResourceSliceCount, this mechanism enables consumers to
+ // detect pools which are comprised of multiple ResourceSlices and are
+ // in an incomplete state.
+ Generation *int64 `json:"generation,omitempty"`
+ // ResourceSliceCount is the total number of ResourceSlices in the pool at this
+ // generation number. Must be greater than zero.
+ //
+ // Consumers can use this to check whether they have seen all ResourceSlices
+ // belonging to the same pool.
+ ResourceSliceCount *int64 `json:"resourceSliceCount,omitempty"`
}
// ResourcePoolApplyConfiguration constructs a declarative configuration of the ResourcePool type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go
index d62ff1e1..dbbe7b2e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslice.go
@@ -29,10 +29,39 @@ import (
// ResourceSliceApplyConfiguration represents a declarative configuration of the ResourceSlice type for use
// with apply.
+//
+// ResourceSlice represents one or more resources in a pool of similar resources,
+// managed by a common driver. A pool may span more than one ResourceSlice, and exactly how many
+// ResourceSlices comprise a pool is determined by the driver.
+//
+// At the moment, the only supported resources are devices with attributes and capacities.
+// Each device in a given pool, regardless of how many ResourceSlices, must have a unique name.
+// The ResourceSlice in which a device gets published may change over time. The unique identifier
+// for a device is the tuple , , .
+//
+// Whenever a driver needs to update a pool, it increments the pool.Spec.Pool.Generation number
+// and updates all ResourceSlices with that new number and new resource definitions. A consumer
+// must only use ResourceSlices with the highest generation number and ignore all others.
+//
+// When allocating all resources in a pool matching certain criteria or when
+// looking for the best solution among several different alternatives, a
+// consumer should check the number of ResourceSlices in a pool (included in
+// each ResourceSlice) to determine whether its view of a pool is complete and
+// if not, should wait until the driver has completed updating the pool.
+//
+// For resources that are not local to a node, the node name is not set. Instead,
+// the driver may use a node selector to specify where the devices are available.
+//
+// This is an alpha type and requires enabling the DynamicResourceAllocation
+// feature gate.
type ResourceSliceApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
+ // Contains the information published by the driver.
+ //
+ // Changing the spec automatically increments the metadata.generation number.
+ Spec *ResourceSliceSpecApplyConfiguration `json:"spec,omitempty"`
}
// ResourceSlice constructs a declarative configuration of the ResourceSlice type for use with
@@ -45,29 +74,14 @@ func ResourceSlice(name string) *ResourceSliceApplyConfiguration {
return b
}
-// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
-// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
-// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractResourceSliceFrom extracts the applied configuration owned by fieldManager from
+// resourceSlice for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
-// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractResourceSliceFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractResourceSlice(resourceSlice *resourcev1beta2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
- return extractResourceSlice(resourceSlice, fieldManager, "")
-}
-
-// ExtractResourceSliceStatus is the same as ExtractResourceSlice except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractResourceSliceStatus(resourceSlice *resourcev1beta2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
- return extractResourceSlice(resourceSlice, fieldManager, "status")
-}
-
-func extractResourceSlice(resourceSlice *resourcev1beta2.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
+func ExtractResourceSliceFrom(resourceSlice *resourcev1beta2.ResourceSlice, fieldManager string, subresource string) (*ResourceSliceApplyConfiguration, error) {
b := &ResourceSliceApplyConfiguration{}
err := managedfields.ExtractInto(resourceSlice, internal.Parser().Type("io.k8s.api.resource.v1beta2.ResourceSlice"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +93,21 @@ func extractResourceSlice(resourceSlice *resourcev1beta2.ResourceSlice, fieldMan
b.WithAPIVersion("resource.k8s.io/v1beta2")
return b, nil
}
+
+// ExtractResourceSlice extracts the applied configuration owned by fieldManager from
+// resourceSlice. If no managedFields are found in resourceSlice for fieldManager, a
+// ResourceSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// resourceSlice must be a unmodified ResourceSlice API object that was retrieved from the Kubernetes API.
+// ExtractResourceSlice provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractResourceSlice(resourceSlice *resourcev1beta2.ResourceSlice, fieldManager string) (*ResourceSliceApplyConfiguration, error) {
+ return ExtractResourceSliceFrom(resourceSlice, fieldManager, "")
+}
+
func (b ResourceSliceApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go
index 5a000829..2efc7d07 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/resource/v1beta2/resourceslicespec.go
@@ -24,15 +24,64 @@ import (
// ResourceSliceSpecApplyConfiguration represents a declarative configuration of the ResourceSliceSpec type for use
// with apply.
+//
+// ResourceSliceSpec contains the information published by the driver in one ResourceSlice.
type ResourceSliceSpecApplyConfiguration struct {
- Driver *string `json:"driver,omitempty"`
- Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
- NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
- AllNodes *bool `json:"allNodes,omitempty"`
- Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
- PerDeviceNodeSelection *bool `json:"perDeviceNodeSelection,omitempty"`
- SharedCounters []CounterSetApplyConfiguration `json:"sharedCounters,omitempty"`
+ // Driver identifies the DRA driver providing the capacity information.
+ // A field selector can be used to list only ResourceSlice
+ // objects with a certain driver name.
+ //
+ // Must be a DNS subdomain and should end with a DNS domain owned by the
+ // vendor of the driver. It should use only lower case characters.
+ // This field is immutable.
+ Driver *string `json:"driver,omitempty"`
+ // Pool describes the pool that this ResourceSlice belongs to.
+ Pool *ResourcePoolApplyConfiguration `json:"pool,omitempty"`
+ // NodeName identifies the node which provides the resources in this pool.
+ // A field selector can be used to list only ResourceSlice
+ // objects belonging to a certain node.
+ //
+ // This field can be used to limit access from nodes to ResourceSlices with
+ // the same node name. It also indicates to autoscalers that adding
+ // new nodes of the same type as some old node might also make new
+ // resources available.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ // This field is immutable.
+ NodeName *string `json:"nodeName,omitempty"`
+ // NodeSelector defines which nodes have access to the resources in the pool,
+ // when that pool is not limited to a single node.
+ //
+ // Must use exactly one term.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"`
+ // AllNodes indicates that all nodes have access to the resources in the pool.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ AllNodes *bool `json:"allNodes,omitempty"`
+ // Devices lists some or all of the devices in this pool.
+ //
+ // Must not have more than 128 entries. If any device uses taints or consumes counters the limit is 64.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ Devices []DeviceApplyConfiguration `json:"devices,omitempty"`
+ // PerDeviceNodeSelection defines whether the access from nodes to
+ // resources in the pool is set on the ResourceSlice level or on each
+ // device. If it is set to true, every device defined the ResourceSlice
+ // must specify this individually.
+ //
+ // Exactly one of NodeName, NodeSelector, AllNodes, and PerDeviceNodeSelection must be set.
+ PerDeviceNodeSelection *bool `json:"perDeviceNodeSelection,omitempty"`
+ // SharedCounters defines a list of counter sets, each of which
+ // has a name and a list of counters available.
+ //
+ // The names of the counter sets must be unique in the ResourcePool.
+ //
+ // Only one of Devices and SharedCounters can be set in a ResourceSlice.
+ //
+ // The maximum number of counter sets is 8.
+ SharedCounters []CounterSetApplyConfiguration `json:"sharedCounters,omitempty"`
}
// ResourceSliceSpecApplyConfiguration constructs a declarative configuration of the ResourceSliceSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
index 907a1501..4056dcdd 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
@@ -30,13 +30,30 @@ import (
// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
// with apply.
+//
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
type PriorityClassApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Value *int32 `json:"value,omitempty"`
- GlobalDefault *bool `json:"globalDefault,omitempty"`
- Description *string `json:"description,omitempty"`
- PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
+ // value represents the integer value of this priority class. This is the actual priority that pods
+ // receive when they have the name of this class in their pod spec.
+ Value *int32 `json:"value,omitempty"`
+ // globalDefault specifies whether this PriorityClass should be considered as
+ // the default priority for pods that do not have any priority class.
+ // Only one PriorityClass can be marked as `globalDefault`. However, if more than
+ // one PriorityClasses exists with their `globalDefault` field set to true,
+ // the smallest value of such global default PriorityClasses will be used as the default priority.
+ GlobalDefault *bool `json:"globalDefault,omitempty"`
+ // description is an arbitrary string that usually provides guidelines on
+ // when this priority class should be used.
+ Description *string `json:"description,omitempty"`
+ // preemptionPolicy is the Policy for preempting pods with lower priority.
+ // One of Never, PreemptLowerPriority.
+ // Defaults to PreemptLowerPriority if unset.
+ PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
}
// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
@@ -49,29 +66,14 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
return b
}
-// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
-// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
-// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractPriorityClassFrom extracts the applied configuration owned by fieldManager from
+// priorityClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
-// ExtractPriorityClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractPriorityClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
- return extractPriorityClass(priorityClass, fieldManager, "")
-}
-
-// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractPriorityClassStatus(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
- return extractPriorityClass(priorityClass, fieldManager, "status")
-}
-
-func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClassFrom(priorityClass *schedulingv1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +85,21 @@ func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManage
b.WithAPIVersion("scheduling.k8s.io/v1")
return b, nil
}
+
+// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
+// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
+// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
+// ExtractPriorityClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return ExtractPriorityClassFrom(priorityClass, fieldManager, "")
+}
+
func (b PriorityClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/gangschedulingpolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/gangschedulingpolicy.go
new file mode 100644
index 00000000..6bf41585
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/gangschedulingpolicy.go
@@ -0,0 +1,44 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// GangSchedulingPolicyApplyConfiguration represents a declarative configuration of the GangSchedulingPolicy type for use
+// with apply.
+//
+// GangSchedulingPolicy defines the parameters for gang scheduling.
+type GangSchedulingPolicyApplyConfiguration struct {
+ // MinCount is the minimum number of pods that must be schedulable or scheduled
+ // at the same time for the scheduler to admit the entire group.
+ // It must be a positive integer.
+ MinCount *int32 `json:"minCount,omitempty"`
+}
+
+// GangSchedulingPolicyApplyConfiguration constructs a declarative configuration of the GangSchedulingPolicy type for use with
+// apply.
+func GangSchedulingPolicy() *GangSchedulingPolicyApplyConfiguration {
+ return &GangSchedulingPolicyApplyConfiguration{}
+}
+
+// WithMinCount sets the MinCount field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinCount field is set to the value of the last call.
+func (b *GangSchedulingPolicyApplyConfiguration) WithMinCount(value int32) *GangSchedulingPolicyApplyConfiguration {
+ b.MinCount = &value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/podgroup.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/podgroup.go
new file mode 100644
index 00000000..b1ce2617
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/podgroup.go
@@ -0,0 +1,53 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// PodGroupApplyConfiguration represents a declarative configuration of the PodGroup type for use
+// with apply.
+//
+// PodGroup represents a set of pods with a common scheduling policy.
+type PodGroupApplyConfiguration struct {
+ // Name is a unique identifier for the PodGroup within the Workload.
+ // It must be a DNS label. This field is immutable.
+ Name *string `json:"name,omitempty"`
+ // Policy defines the scheduling policy for this PodGroup.
+ Policy *PodGroupPolicyApplyConfiguration `json:"policy,omitempty"`
+}
+
+// PodGroupApplyConfiguration constructs a declarative configuration of the PodGroup type for use with
+// apply.
+func PodGroup() *PodGroupApplyConfiguration {
+ return &PodGroupApplyConfiguration{}
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *PodGroupApplyConfiguration) WithName(value string) *PodGroupApplyConfiguration {
+ b.Name = &value
+ return b
+}
+
+// WithPolicy sets the Policy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Policy field is set to the value of the last call.
+func (b *PodGroupApplyConfiguration) WithPolicy(value *PodGroupPolicyApplyConfiguration) *PodGroupApplyConfiguration {
+ b.Policy = value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/podgrouppolicy.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/podgrouppolicy.go
new file mode 100644
index 00000000..d73c5e44
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/podgrouppolicy.go
@@ -0,0 +1,58 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+)
+
+// PodGroupPolicyApplyConfiguration represents a declarative configuration of the PodGroupPolicy type for use
+// with apply.
+//
+// PodGroupPolicy defines the scheduling configuration for a PodGroup.
+type PodGroupPolicyApplyConfiguration struct {
+ // Basic specifies that the pods in this group should be scheduled using
+ // standard Kubernetes scheduling behavior.
+ Basic *schedulingv1alpha1.BasicSchedulingPolicy `json:"basic,omitempty"`
+ // Gang specifies that the pods in this group should be scheduled using
+ // all-or-nothing semantics.
+ Gang *GangSchedulingPolicyApplyConfiguration `json:"gang,omitempty"`
+}
+
+// PodGroupPolicyApplyConfiguration constructs a declarative configuration of the PodGroupPolicy type for use with
+// apply.
+func PodGroupPolicy() *PodGroupPolicyApplyConfiguration {
+ return &PodGroupPolicyApplyConfiguration{}
+}
+
+// WithBasic sets the Basic field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Basic field is set to the value of the last call.
+func (b *PodGroupPolicyApplyConfiguration) WithBasic(value schedulingv1alpha1.BasicSchedulingPolicy) *PodGroupPolicyApplyConfiguration {
+ b.Basic = &value
+ return b
+}
+
+// WithGang sets the Gang field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Gang field is set to the value of the last call.
+func (b *PodGroupPolicyApplyConfiguration) WithGang(value *GangSchedulingPolicyApplyConfiguration) *PodGroupPolicyApplyConfiguration {
+ b.Gang = value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
index e658b119..1735a6de 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
@@ -30,13 +30,31 @@ import (
// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
// with apply.
+//
+// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass.
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
type PriorityClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Value *int32 `json:"value,omitempty"`
- GlobalDefault *bool `json:"globalDefault,omitempty"`
- Description *string `json:"description,omitempty"`
- PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
+ // value represents the integer value of this priority class. This is the actual priority that pods
+ // receive when they have the name of this class in their pod spec.
+ Value *int32 `json:"value,omitempty"`
+ // globalDefault specifies whether this PriorityClass should be considered as
+ // the default priority for pods that do not have any priority class.
+ // Only one PriorityClass can be marked as `globalDefault`. However, if more than
+ // one PriorityClasses exists with their `globalDefault` field set to true,
+ // the smallest value of such global default PriorityClasses will be used as the default priority.
+ GlobalDefault *bool `json:"globalDefault,omitempty"`
+ // description is an arbitrary string that usually provides guidelines on
+ // when this priority class should be used.
+ Description *string `json:"description,omitempty"`
+ // preemptionPolicy is the Policy for preempting pods with lower priority.
+ // One of Never, PreemptLowerPriority.
+ // Defaults to PreemptLowerPriority if unset.
+ PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
}
// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
@@ -49,29 +67,14 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
return b
}
-// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
-// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
-// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractPriorityClassFrom extracts the applied configuration owned by fieldManager from
+// priorityClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
-// ExtractPriorityClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractPriorityClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
- return extractPriorityClass(priorityClass, fieldManager, "")
-}
-
-// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractPriorityClassStatus(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
- return extractPriorityClass(priorityClass, fieldManager, "status")
-}
-
-func extractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClassFrom(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +86,21 @@ func extractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, field
b.WithAPIVersion("scheduling.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
+// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
+// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
+// ExtractPriorityClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityClass(priorityClass *schedulingv1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return ExtractPriorityClassFrom(priorityClass, fieldManager, "")
+}
+
func (b PriorityClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/typedlocalobjectreference.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/typedlocalobjectreference.go
new file mode 100644
index 00000000..73c11c88
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/typedlocalobjectreference.go
@@ -0,0 +1,67 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// TypedLocalObjectReferenceApplyConfiguration represents a declarative configuration of the TypedLocalObjectReference type for use
+// with apply.
+//
+// TypedLocalObjectReference allows to reference typed object inside the same namespace.
+type TypedLocalObjectReferenceApplyConfiguration struct {
+ // APIGroup is the group for the resource being referenced.
+ // If APIGroup is empty, the specified Kind must be in the core API group.
+ // For any other third-party types, setting APIGroup is required.
+ // It must be a DNS subdomain.
+ APIGroup *string `json:"apiGroup,omitempty"`
+ // Kind is the type of resource being referenced.
+ // It must be a path segment name.
+ Kind *string `json:"kind,omitempty"`
+ // Name is the name of resource being referenced.
+ // It must be a path segment name.
+ Name *string `json:"name,omitempty"`
+}
+
+// TypedLocalObjectReferenceApplyConfiguration constructs a declarative configuration of the TypedLocalObjectReference type for use with
+// apply.
+func TypedLocalObjectReference() *TypedLocalObjectReferenceApplyConfiguration {
+ return &TypedLocalObjectReferenceApplyConfiguration{}
+}
+
+// WithAPIGroup sets the APIGroup field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIGroup field is set to the value of the last call.
+func (b *TypedLocalObjectReferenceApplyConfiguration) WithAPIGroup(value string) *TypedLocalObjectReferenceApplyConfiguration {
+ b.APIGroup = &value
+ return b
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *TypedLocalObjectReferenceApplyConfiguration) WithKind(value string) *TypedLocalObjectReferenceApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *TypedLocalObjectReferenceApplyConfiguration) WithName(value string) *TypedLocalObjectReferenceApplyConfiguration {
+ b.Name = &value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/workload.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/workload.go
new file mode 100644
index 00000000..6530899a
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/workload.go
@@ -0,0 +1,279 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// WorkloadApplyConfiguration represents a declarative configuration of the Workload type for use
+// with apply.
+//
+// Workload allows for expressing scheduling constraints that should be used
+// when managing lifecycle of workloads from scheduling perspective,
+// including scheduling, preemption, eviction and other phases.
+type WorkloadApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // Name must be a DNS subdomain.
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ // Spec defines the desired behavior of a Workload.
+ Spec *WorkloadSpecApplyConfiguration `json:"spec,omitempty"`
+}
+
+// Workload constructs a declarative configuration of the Workload type for use with
+// apply.
+func Workload(name, namespace string) *WorkloadApplyConfiguration {
+ b := &WorkloadApplyConfiguration{}
+ b.WithName(name)
+ b.WithNamespace(namespace)
+ b.WithKind("Workload")
+ b.WithAPIVersion("scheduling.k8s.io/v1alpha1")
+ return b
+}
+
+// ExtractWorkloadFrom extracts the applied configuration owned by fieldManager from
+// workload for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// workload must be a unmodified Workload API object that was retrieved from the Kubernetes API.
+// ExtractWorkloadFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractWorkloadFrom(workload *schedulingv1alpha1.Workload, fieldManager string, subresource string) (*WorkloadApplyConfiguration, error) {
+ b := &WorkloadApplyConfiguration{}
+ err := managedfields.ExtractInto(workload, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.Workload"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(workload.Name)
+ b.WithNamespace(workload.Namespace)
+
+ b.WithKind("Workload")
+ b.WithAPIVersion("scheduling.k8s.io/v1alpha1")
+ return b, nil
+}
+
+// ExtractWorkload extracts the applied configuration owned by fieldManager from
+// workload. If no managedFields are found in workload for fieldManager, a
+// WorkloadApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// workload must be a unmodified Workload API object that was retrieved from the Kubernetes API.
+// ExtractWorkload provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractWorkload(workload *schedulingv1alpha1.Workload, fieldManager string) (*WorkloadApplyConfiguration, error) {
+ return ExtractWorkloadFrom(workload, fieldManager, "")
+}
+
+func (b WorkloadApplyConfiguration) IsApplyConfiguration() {}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithKind(value string) *WorkloadApplyConfiguration {
+ b.TypeMetaApplyConfiguration.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithAPIVersion(value string) *WorkloadApplyConfiguration {
+ b.TypeMetaApplyConfiguration.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithName(value string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithGenerateName(value string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithNamespace(value string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Namespace = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithUID(value types.UID) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithResourceVersion(value string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithGeneration(value int64) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithCreationTimestamp(value metav1.Time) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *WorkloadApplyConfiguration) WithLabels(entries map[string]string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *WorkloadApplyConfiguration) WithAnnotations(entries map[string]string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 {
+ b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.ObjectMetaApplyConfiguration.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *WorkloadApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *WorkloadApplyConfiguration) WithFinalizers(values ...string) *WorkloadApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i])
+ }
+ return b
+}
+
+func (b *WorkloadApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *WorkloadApplyConfiguration) WithSpec(value *WorkloadSpecApplyConfiguration) *WorkloadApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// GetKind retrieves the value of the Kind field in the declarative configuration.
+func (b *WorkloadApplyConfiguration) GetKind() *string {
+ return b.TypeMetaApplyConfiguration.Kind
+}
+
+// GetAPIVersion retrieves the value of the APIVersion field in the declarative configuration.
+func (b *WorkloadApplyConfiguration) GetAPIVersion() *string {
+ return b.TypeMetaApplyConfiguration.APIVersion
+}
+
+// GetName retrieves the value of the Name field in the declarative configuration.
+func (b *WorkloadApplyConfiguration) GetName() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Name
+}
+
+// GetNamespace retrieves the value of the Namespace field in the declarative configuration.
+func (b *WorkloadApplyConfiguration) GetNamespace() *string {
+ b.ensureObjectMetaApplyConfigurationExists()
+ return b.ObjectMetaApplyConfiguration.Namespace
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/workloadspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/workloadspec.go
new file mode 100644
index 00000000..1ceb9210
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/workloadspec.go
@@ -0,0 +1,61 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// WorkloadSpecApplyConfiguration represents a declarative configuration of the WorkloadSpec type for use
+// with apply.
+//
+// WorkloadSpec defines the desired state of a Workload.
+type WorkloadSpecApplyConfiguration struct {
+ // ControllerRef is an optional reference to the controlling object, such as a
+ // Deployment or Job. This field is intended for use by tools like CLIs
+ // to provide a link back to the original workload definition.
+ // When set, it cannot be changed.
+ ControllerRef *TypedLocalObjectReferenceApplyConfiguration `json:"controllerRef,omitempty"`
+ // PodGroups is the list of pod groups that make up the Workload.
+ // The maximum number of pod groups is 8. This field is immutable.
+ PodGroups []PodGroupApplyConfiguration `json:"podGroups,omitempty"`
+}
+
+// WorkloadSpecApplyConfiguration constructs a declarative configuration of the WorkloadSpec type for use with
+// apply.
+func WorkloadSpec() *WorkloadSpecApplyConfiguration {
+ return &WorkloadSpecApplyConfiguration{}
+}
+
+// WithControllerRef sets the ControllerRef field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ControllerRef field is set to the value of the last call.
+func (b *WorkloadSpecApplyConfiguration) WithControllerRef(value *TypedLocalObjectReferenceApplyConfiguration) *WorkloadSpecApplyConfiguration {
+ b.ControllerRef = value
+ return b
+}
+
+// WithPodGroups adds the given value to the PodGroups field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the PodGroups field.
+func (b *WorkloadSpecApplyConfiguration) WithPodGroups(values ...*PodGroupApplyConfiguration) *WorkloadSpecApplyConfiguration {
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithPodGroups")
+ }
+ b.PodGroups = append(b.PodGroups, *values[i])
+ }
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
index 3b5ad5f9..5fab5624 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
@@ -30,13 +30,31 @@ import (
// PriorityClassApplyConfiguration represents a declarative configuration of the PriorityClass type for use
// with apply.
+//
+// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass.
+// PriorityClass defines mapping from a priority class name to the priority
+// integer value. The value can be any valid integer.
type PriorityClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Value *int32 `json:"value,omitempty"`
- GlobalDefault *bool `json:"globalDefault,omitempty"`
- Description *string `json:"description,omitempty"`
- PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
+ // value represents the integer value of this priority class. This is the actual priority that pods
+ // receive when they have the name of this class in their pod spec.
+ Value *int32 `json:"value,omitempty"`
+ // globalDefault specifies whether this PriorityClass should be considered as
+ // the default priority for pods that do not have any priority class.
+ // Only one PriorityClass can be marked as `globalDefault`. However, if more than
+ // one PriorityClasses exists with their `globalDefault` field set to true,
+ // the smallest value of such global default PriorityClasses will be used as the default priority.
+ GlobalDefault *bool `json:"globalDefault,omitempty"`
+ // description is an arbitrary string that usually provides guidelines on
+ // when this priority class should be used.
+ Description *string `json:"description,omitempty"`
+ // preemptionPolicy is the Policy for preempting pods with lower priority.
+ // One of Never, PreemptLowerPriority.
+ // Defaults to PreemptLowerPriority if unset.
+ PreemptionPolicy *corev1.PreemptionPolicy `json:"preemptionPolicy,omitempty"`
}
// PriorityClass constructs a declarative configuration of the PriorityClass type for use with
@@ -49,29 +67,14 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
return b
}
-// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
-// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
-// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractPriorityClassFrom extracts the applied configuration owned by fieldManager from
+// priorityClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
-// ExtractPriorityClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractPriorityClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
- return extractPriorityClass(priorityClass, fieldManager, "")
-}
-
-// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractPriorityClassStatus(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
- return extractPriorityClass(priorityClass, fieldManager, "status")
-}
-
-func extractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
+func ExtractPriorityClassFrom(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
@@ -83,6 +86,21 @@ func extractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldM
b.WithAPIVersion("scheduling.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
+// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
+// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
+// ExtractPriorityClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractPriorityClass(priorityClass *schedulingv1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return ExtractPriorityClassFrom(priorityClass, fieldManager, "")
+}
+
func (b PriorityClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
index 99a8bf39..39ce1eb0 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
@@ -29,10 +29,25 @@ import (
// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use
// with apply.
+//
+// CSIDriver captures information about a Container Storage Interface (CSI)
+// volume driver deployed on the cluster.
+// Kubernetes attach detach controller uses this object to determine whether attach is required.
+// Kubelet uses this object to determine whether pod information needs to be passed on mount.
+// CSIDriver objects are non-namespaced.
type CSIDriverApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
+ // metadata.Name indicates the name of the CSI driver that this object
+ // refers to; it MUST be the same name returned by the CSI GetPluginName()
+ // call for that driver.
+ // The driver name must be 63 characters or less, beginning and ending with
+ // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+ // alphanumerics between.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec represents the specification of the CSI Driver.
+ Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
}
// CSIDriver constructs a declarative configuration of the CSIDriver type for use with
@@ -45,29 +60,14 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
return b
}
-// ExtractCSIDriver extracts the applied configuration owned by fieldManager from
-// cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a
-// CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSIDriverFrom extracts the applied configuration owned by fieldManager from
+// cSIDriver for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API.
-// ExtractCSIDriver provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSIDriverFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
- return extractCSIDriver(cSIDriver, fieldManager, "")
-}
-
-// ExtractCSIDriverStatus is the same as ExtractCSIDriver except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSIDriverStatus(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
- return extractCSIDriver(cSIDriver, fieldManager, "status")
-}
-
-func extractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
+func ExtractCSIDriverFrom(cSIDriver *storagev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
b := &CSIDriverApplyConfiguration{}
err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +79,21 @@ func extractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string, subre
b.WithAPIVersion("storage.k8s.io/v1")
return b, nil
}
+
+// ExtractCSIDriver extracts the applied configuration owned by fieldManager from
+// cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a
+// CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API.
+// ExtractCSIDriver provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSIDriver(cSIDriver *storagev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+ return ExtractCSIDriverFrom(cSIDriver, fieldManager, "")
+}
+
func (b CSIDriverApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
index fc6f2fbf..653329ec 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriverspec.go
@@ -24,16 +24,160 @@ import (
// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use
// with apply.
+//
+// CSIDriverSpec is the specification of a CSIDriver.
type CSIDriverSpecApplyConfiguration struct {
- AttachRequired *bool `json:"attachRequired,omitempty"`
- PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
- VolumeLifecycleModes []storagev1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
- StorageCapacity *bool `json:"storageCapacity,omitempty"`
- FSGroupPolicy *storagev1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
- TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
- RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
- SELinuxMount *bool `json:"seLinuxMount,omitempty"`
- NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty"`
+ // attachRequired indicates this CSI volume driver requires an attach
+ // operation (because it implements the CSI ControllerPublishVolume()
+ // method), and that the Kubernetes attach detach controller should call
+ // the attach volume interface which checks the volumeattachment status
+ // and waits until the volume is attached before proceeding to mounting.
+ // The CSI external-attacher coordinates with CSI volume driver and updates
+ // the volumeattachment status when the attach operation is complete.
+ // If the value is specified to false, the attach operation will be skipped.
+ // Otherwise the attach operation will be called.
+ //
+ // This field is immutable.
+ AttachRequired *bool `json:"attachRequired,omitempty"`
+ // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.)
+ // during mount operations, if set to true.
+ // If set to false, pod information will not be passed on mount.
+ // Default is false.
+ //
+ // The CSI driver specifies podInfoOnMount as part of driver deployment.
+ // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
+ // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
+ //
+ // The following VolumeContext will be passed if podInfoOnMount is set to true.
+ // This list might grow, but the prefix will be used.
+ // "csi.storage.k8s.io/pod.name": pod.Name
+ // "csi.storage.k8s.io/pod.namespace": pod.Namespace
+ // "csi.storage.k8s.io/pod.uid": string(pod.UID)
+ // "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume
+ // defined by a CSIVolumeSource, otherwise "false"
+ //
+ // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only
+ // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode.
+ // Other drivers can leave pod info disabled and/or ignore this field.
+ // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when
+ // deployed on such a cluster and the deployment determines which mode that is, for example
+ // via a command line parameter of the driver.
+ //
+ // This field was immutable in Kubernetes < 1.29 and now is mutable.
+ PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
+ // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports.
+ // The default if the list is empty is "Persistent", which is the usage defined by the
+ // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.
+ //
+ // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec
+ // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod.
+ // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.
+ //
+ // For more information about implementing this mode, see
+ // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
+ // A driver can support one or more of these modes and more modes may be added in the future.
+ //
+ // This field is beta.
+ // This field is immutable.
+ VolumeLifecycleModes []storagev1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
+ // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage
+ // capacity that the driver deployment will report by creating
+ // CSIStorageCapacity objects with capacity information, if set to true.
+ //
+ // The check can be enabled immediately when deploying a driver.
+ // In that case, provisioning new volumes with late binding
+ // will pause until the driver deployment has published
+ // some suitable CSIStorageCapacity object.
+ //
+ // Alternatively, the driver can be deployed with the field
+ // unset or false and it can be flipped later when storage
+ // capacity information has been published.
+ //
+ // This field was immutable in Kubernetes <= 1.22 and now is mutable.
+ StorageCapacity *bool `json:"storageCapacity,omitempty"`
+ // fsGroupPolicy defines if the underlying volume supports changing ownership and
+ // permission of the volume before being mounted.
+ // Refer to the specific FSGroupPolicy values for additional details.
+ //
+ // This field was immutable in Kubernetes < 1.29 and now is mutable.
+ //
+ // Defaults to ReadWriteOnceWithFSType, which will examine each volume
+ // to determine if Kubernetes should modify ownership and permissions of the volume.
+ // With the default policy the defined fsGroup will only be applied
+ // if a fstype is defined and the volume's access mode contains ReadWriteOnce.
+ FSGroupPolicy *storagev1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
+ // tokenRequests indicates the CSI driver needs pods' service account
+ // tokens it is mounting volume for to do necessary authentication. Kubelet
+ // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls.
+ // The CSI driver should parse and validate the following VolumeContext:
+ // "csi.storage.k8s.io/serviceAccount.tokens": {
+ // "": {
+ // "token": ,
+ // "expirationTimestamp": ,
+ // },
+ // ...
+ // }
+ //
+ // Note: Audience in each TokenRequest should be different and at
+ // most one token is empty string. To receive a new token after expiry,
+ // RequiresRepublish can be used to trigger NodePublishVolume periodically.
+ TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
+ // requiresRepublish indicates the CSI driver wants `NodePublishVolume`
+ // being periodically called to reflect any possible change in the mounted
+ // volume. This field defaults to false.
+ //
+ // Note: After a successful initial NodePublishVolume call, subsequent calls
+ // to NodePublishVolume should only update the contents of the volume. New
+ // mount points will not be seen by a running container.
+ RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
+ // seLinuxMount specifies if the CSI driver supports "-o context"
+ // mount option.
+ //
+ // When "true", the CSI driver must ensure that all volumes provided by this CSI
+ // driver can be mounted separately with different `-o context` options. This is
+ // typical for storage backends that provide volumes as filesystems on block
+ // devices or as independent shared volumes.
+ // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount
+ // option when mounting a ReadWriteOncePod volume used in Pod that has
+ // explicitly set SELinux context. In the future, it may be expanded to other
+ // volume AccessModes. In any case, Kubernetes will ensure that the volume is
+ // mounted only with a single SELinux context.
+ //
+ // When "false", Kubernetes won't pass any special SELinux mount options to the driver.
+ // This is typical for volumes that represent subdirectories of a bigger shared filesystem.
+ //
+ // Default is "false".
+ SELinuxMount *bool `json:"seLinuxMount,omitempty"`
+ // nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of
+ // the CSINode allocatable capacity for this driver. When set, both periodic updates and
+ // updates triggered by capacity-related failures are enabled. If not set, no updates
+ // occur (neither periodic nor upon detecting capacity-related failures), and the
+ // allocatable.count remains static. The minimum allowed value for this field is 10 seconds.
+ //
+ // This is a beta feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.
+ //
+ // This field is mutable.
+ NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty"`
+ // serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that
+ // service account tokens should be passed via the Secrets field in NodePublishVolumeRequest
+ // instead of the VolumeContext field. The CSI specification provides a dedicated Secrets
+ // field for sensitive information like tokens, which is the appropriate mechanism for
+ // handling credentials. This addresses security concerns where sensitive tokens were being
+ // logged as part of volume context.
+ //
+ // When "true", kubelet will pass the tokens only in the Secrets field with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens". The CSI driver must be updated to read
+ // tokens from the Secrets field instead of VolumeContext.
+ //
+ // When "false" or not set, kubelet will pass the tokens in VolumeContext with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens" (existing behavior). This maintains backward
+ // compatibility with existing CSI drivers.
+ //
+ // This field can only be set when TokenRequests is configured. The API server will reject
+ // CSIDriver specs that set this field without TokenRequests.
+ //
+ // Default behavior if unset is to pass tokens in the VolumeContext field.
+ ServiceAccountTokenInSecrets *bool `json:"serviceAccountTokenInSecrets,omitempty"`
}
// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with
@@ -120,3 +264,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithNodeAllocatableUpdatePeriodSeconds
b.NodeAllocatableUpdatePeriodSeconds = &value
return b
}
+
+// WithServiceAccountTokenInSecrets sets the ServiceAccountTokenInSecrets field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServiceAccountTokenInSecrets field is set to the value of the last call.
+func (b *CSIDriverSpecApplyConfiguration) WithServiceAccountTokenInSecrets(value bool) *CSIDriverSpecApplyConfiguration {
+ b.ServiceAccountTokenInSecrets = &value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
index 8d141a52..e0b7f349 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
@@ -29,10 +29,23 @@ import (
// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use
// with apply.
+//
+// CSINode holds information about all CSI drivers installed on a node.
+// CSI drivers do not need to create the CSINode object directly. As long as
+// they use the node-driver-registrar sidecar container, the kubelet will
+// automatically populate the CSINode object for the CSI driver as part of
+// kubelet plugin registration.
+// CSINode has the same name as a node. If the object is missing, it means either
+// there are no CSI Drivers available on the node, or the Kubelet version is low
+// enough that it doesn't create this object.
+// CSINode has an OwnerReference that points to the corresponding node object.
type CSINodeApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // metadata.name must be the Kubernetes node name.
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec is the specification of CSINode
+ Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
}
// CSINode constructs a declarative configuration of the CSINode type for use with
@@ -45,29 +58,14 @@ func CSINode(name string) *CSINodeApplyConfiguration {
return b
}
-// ExtractCSINode extracts the applied configuration owned by fieldManager from
-// cSINode. If no managedFields are found in cSINode for fieldManager, a
-// CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSINodeFrom extracts the applied configuration owned by fieldManager from
+// cSINode for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API.
-// ExtractCSINode provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSINodeFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSINode(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
- return extractCSINode(cSINode, fieldManager, "")
-}
-
-// ExtractCSINodeStatus is the same as ExtractCSINode except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSINodeStatus(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
- return extractCSINode(cSINode, fieldManager, "status")
-}
-
-func extractCSINode(cSINode *storagev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
+func ExtractCSINodeFrom(cSINode *storagev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
b := &CSINodeApplyConfiguration{}
err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +77,21 @@ func extractCSINode(cSINode *storagev1.CSINode, fieldManager string, subresource
b.WithAPIVersion("storage.k8s.io/v1")
return b, nil
}
+
+// ExtractCSINode extracts the applied configuration owned by fieldManager from
+// cSINode. If no managedFields are found in cSINode for fieldManager, a
+// CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API.
+// ExtractCSINode provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSINode(cSINode *storagev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+ return ExtractCSINodeFrom(cSINode, fieldManager, "")
+}
+
func (b CSINodeApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go
index 8c69e435..22492d7f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodedriver.go
@@ -20,11 +20,37 @@ package v1
// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use
// with apply.
+//
+// CSINodeDriver holds information about the specification of one CSI driver installed on a node
type CSINodeDriverApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- NodeID *string `json:"nodeID,omitempty"`
- TopologyKeys []string `json:"topologyKeys,omitempty"`
- Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"`
+ // name represents the name of the CSI driver that this object refers to.
+ // This MUST be the same name returned by the CSI GetPluginName() call for
+ // that driver.
+ Name *string `json:"name,omitempty"`
+ // nodeID of the node from the driver point of view.
+ // This field enables Kubernetes to communicate with storage systems that do
+ // not share the same nomenclature for nodes. For example, Kubernetes may
+ // refer to a given node as "node1", but the storage system may refer to
+ // the same node as "nodeA". When Kubernetes issues a command to the storage
+ // system to attach a volume to a specific node, it can use this field to
+ // refer to the node name using the ID that the storage system will
+ // understand, e.g. "nodeA" instead of "node1". This field is required.
+ NodeID *string `json:"nodeID,omitempty"`
+ // topologyKeys is the list of keys supported by the driver.
+ // When a driver is initialized on a cluster, it provides a set of topology
+ // keys that it understands (e.g. "company.com/zone", "company.com/region").
+ // When a driver is initialized on a node, it provides the same topology keys
+ // along with values. Kubelet will expose these topology keys as labels
+ // on its own node object.
+ // When Kubernetes does topology aware provisioning, it can use this list to
+ // determine which labels it should retrieve from the node object and pass
+ // back to the driver.
+ // It is possible for different nodes to use different topology keys.
+ // This can be empty if driver does not support topology.
+ TopologyKeys []string `json:"topologyKeys,omitempty"`
+ // allocatable represents the volume resources of a node that are available for scheduling.
+ // This field is beta.
+ Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"`
}
// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go
index 21d3ba7c..2b1753d7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinodespec.go
@@ -20,7 +20,11 @@ package v1
// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use
// with apply.
+//
+// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
type CSINodeSpecApplyConfiguration struct {
+ // drivers is a list of information of all CSI Drivers existing on a node.
+ // If all drivers in the list are uninstalled, this can become empty.
Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
index 9a5c41c6..8682c434 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csistoragecapacity.go
@@ -30,13 +30,76 @@ import (
// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
// with apply.
+//
+// CSIStorageCapacity stores the result of one CSI GetCapacity call.
+// For a given StorageClass, this describes the available capacity in a
+// particular topology segment. This can be used when considering where to
+// instantiate new PersistentVolumes.
+//
+// For example this can express things like:
+// - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1"
+// - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
+//
+// The following three cases all imply that no capacity is available for
+// a certain combination:
+// - no object exists with suitable topology and storage class name
+// - such an object exists, but the capacity is unset
+// - such an object exists, but the capacity is zero
+//
+// The producer of these objects can decide which approach is more suitable.
+//
+// They are consumed by the kube-scheduler when a CSI driver opts into
+// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler
+// compares the MaximumVolumeSize against the requested size of pending volumes
+// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back
+// to a comparison against the less precise Capacity. If that is also unset,
+// the scheduler assumes that capacity is insufficient and tries some other
+// node.
type CSIStorageCapacityApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters).
+ // To ensure that there are no conflicts with other CSI drivers on the cluster,
+ // the recommendation is to use csisc-, a generated name, or a reverse-domain name
+ // which ends with the unique CSI driver name.
+ //
+ // Objects are namespaced.
+ //
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- NodeTopology *metav1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
- StorageClassName *string `json:"storageClassName,omitempty"`
- Capacity *resource.Quantity `json:"capacity,omitempty"`
- MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
+ // nodeTopology defines which nodes have access to the storage
+ // for which capacity was reported. If not set, the storage is
+ // not accessible from any node in the cluster. If empty, the
+ // storage is accessible from all nodes. This field is
+ // immutable.
+ NodeTopology *metav1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
+ // storageClassName represents the name of the StorageClass that the reported capacity applies to.
+ // It must meet the same requirements as the name of a StorageClass
+ // object (non-empty, DNS subdomain). If that object no longer exists,
+ // the CSIStorageCapacity object is obsolete and should be removed by its
+ // creator.
+ // This field is immutable.
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ // capacity is the value reported by the CSI driver in its GetCapacityResponse
+ // for a GetCapacityRequest with topology and parameters that match the
+ // previous fields.
+ //
+ // The semantic is currently (CSI spec 1.2) defined as:
+ // The available capacity, in bytes, of the storage that can be used
+ // to provision volumes. If not set, that information is currently
+ // unavailable.
+ Capacity *resource.Quantity `json:"capacity,omitempty"`
+ // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
+ // for a GetCapacityRequest with topology and parameters that match the
+ // previous fields.
+ //
+ // This is defined since CSI spec 1.4.0 as the largest size
+ // that may be used in a
+ // CreateVolumeRequest.capacity_range.required_bytes field to
+ // create a volume with the same parameters as those in
+ // GetCapacityRequest. The corresponding value in the Kubernetes
+ // API is ResourceRequirements.Requests in a volume claim.
+ MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
}
// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
@@ -50,29 +113,14 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
return b
}
-// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
-// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
-// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSIStorageCapacityFrom extracts the applied configuration owned by fieldManager from
+// cSIStorageCapacity for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
-// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSIStorageCapacityFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
- return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
-}
-
-// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
- return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
-}
-
-func extractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacityFrom(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
@@ -85,6 +133,21 @@ func extractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity,
b.WithAPIVersion("storage.k8s.io/v1")
return b, nil
}
+
+// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
+// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
+// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
+// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return ExtractCSIStorageCapacityFrom(cSIStorageCapacity, fieldManager, "")
+}
+
func (b CSIStorageCapacityApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
index 0e6c9fbe..766b795c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
@@ -31,16 +31,40 @@ import (
// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use
// with apply.
+//
+// StorageClass describes the parameters for a class of storage for
+// which PersistentVolumes can be dynamically provisioned.
+//
+// StorageClasses are non-namespaced; the name of the storage class
+// according to etcd is in ObjectMeta.Name.
type StorageClassApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Provisioner *string `json:"provisioner,omitempty"`
- Parameters map[string]string `json:"parameters,omitempty"`
- ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
- MountOptions []string `json:"mountOptions,omitempty"`
- AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
- VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
- AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
+ // provisioner indicates the type of the provisioner.
+ Provisioner *string `json:"provisioner,omitempty"`
+ // parameters holds the parameters for the provisioner that should
+ // create volumes of this storage class.
+ Parameters map[string]string `json:"parameters,omitempty"`
+ // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class.
+ // Defaults to Delete.
+ ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
+ // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class.
+ // e.g. ["ro", "soft"]. Not validated -
+ // mount of the PVs will simply fail if one is invalid.
+ MountOptions []string `json:"mountOptions,omitempty"`
+ // allowVolumeExpansion shows whether the storage class allow volume expand.
+ AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
+ // volumeBindingMode indicates how PersistentVolumeClaims should be
+ // provisioned and bound. When unset, VolumeBindingImmediate is used.
+ // This field is only honored by servers that enable the VolumeScheduling feature.
+ VolumeBindingMode *storagev1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
+ // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned.
+ // Each volume plugin defines its own supported topology specifications.
+ // An empty TopologySelectorTerm list means there is no topology restriction.
+ // This field is only honored by servers that enable the VolumeScheduling feature.
+ AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
}
// StorageClass constructs a declarative configuration of the StorageClass type for use with
@@ -53,29 +77,14 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
return b
}
-// ExtractStorageClass extracts the applied configuration owned by fieldManager from
-// storageClass. If no managedFields are found in storageClass for fieldManager, a
-// StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractStorageClassFrom extracts the applied configuration owned by fieldManager from
+// storageClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API.
-// ExtractStorageClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractStorageClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractStorageClass(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
- return extractStorageClass(storageClass, fieldManager, "")
-}
-
-// ExtractStorageClassStatus is the same as ExtractStorageClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractStorageClassStatus(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
- return extractStorageClass(storageClass, fieldManager, "status")
-}
-
-func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
+func ExtractStorageClassFrom(storageClass *storagev1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
b := &StorageClassApplyConfiguration{}
err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1.StorageClass"), fieldManager, b, subresource)
if err != nil {
@@ -87,6 +96,21 @@ func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager stri
b.WithAPIVersion("storage.k8s.io/v1")
return b, nil
}
+
+// ExtractStorageClass extracts the applied configuration owned by fieldManager from
+// storageClass. If no managedFields are found in storageClass for fieldManager, a
+// StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API.
+// ExtractStorageClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStorageClass(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+ return ExtractStorageClassFrom(storageClass, fieldManager, "")
+}
+
func (b StorageClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go
index 77b96db2..a4558aa4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/tokenrequest.go
@@ -20,9 +20,15 @@ package v1
// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use
// with apply.
+//
+// TokenRequest contains parameters of a service account token.
type TokenRequestApplyConfiguration struct {
- Audience *string `json:"audience,omitempty"`
- ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"`
+ // audience is the intended audience of the token in "TokenRequestSpec".
+ // It will default to the audiences of kube apiserver.
+ Audience *string `json:"audience,omitempty"`
+ // expirationSeconds is the duration of validity of the token in "TokenRequestSpec".
+ // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec".
+ ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"`
}
// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
index a7c0a24f..62404218 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
@@ -29,11 +29,23 @@ import (
// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
// with apply.
+//
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
type VolumeAttachmentApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
+ // spec represents specification of the desired attach/detach volume behavior.
+ // Populated by the Kubernetes system.
+ Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents status of the VolumeAttachment request.
+ // Populated by the entity completing the attach or detach
+ // operation, i.e. the external-attacher.
+ Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
}
// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
@@ -46,6 +58,26 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
return b
}
+// ExtractVolumeAttachmentFrom extracts the applied configuration owned by fieldManager from
+// volumeAttachment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API.
+// ExtractVolumeAttachmentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractVolumeAttachmentFrom(volumeAttachment *storagev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
+ b := &VolumeAttachmentApplyConfiguration{}
+ err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(volumeAttachment.Name)
+
+ b.WithKind("VolumeAttachment")
+ b.WithAPIVersion("storage.k8s.io/v1")
+ return b, nil
+}
+
// ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from
// volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a
// VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +88,16 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// ExtractVolumeAttachment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
- return extractVolumeAttachment(volumeAttachment, fieldManager, "")
+ return ExtractVolumeAttachmentFrom(volumeAttachment, fieldManager, "")
}
-// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractVolumeAttachmentStatus extracts the applied configuration owned by fieldManager from
+// volumeAttachment for the status subresource.
func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
- return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
+ return ExtractVolumeAttachmentFrom(volumeAttachment, fieldManager, "status")
}
-func extractVolumeAttachment(volumeAttachment *storagev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
- b := &VolumeAttachmentApplyConfiguration{}
- err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(volumeAttachment.Name)
-
- b.WithKind("VolumeAttachment")
- b.WithAPIVersion("storage.k8s.io/v1")
- return b, nil
-}
func (b VolumeAttachmentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
index 1c865c00..32831261 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentsource.go
@@ -24,9 +24,21 @@ import (
// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
// with apply.
+//
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
+// Exactly one member can be set.
type VolumeAttachmentSourceApplyConfiguration struct {
- PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
- InlineVolumeSpec *corev1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
+ // persistentVolumeName represents the name of the persistent volume to attach.
+ PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
+ // inlineVolumeSpec contains all the information necessary to attach
+ // a persistent volume defined by a pod's inline VolumeSource. This field
+ // is populated only for the CSIMigration feature. It contains
+ // translated fields from a pod's inline VolumeSource to a
+ // PersistentVolumeSpec. This field is beta-level and is only
+ // honored by servers that enabled the CSIMigration feature.
+ InlineVolumeSpec *corev1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
}
// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go
index 89653923..e707834d 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentspec.go
@@ -20,10 +20,16 @@ package v1
// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use
// with apply.
+//
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
type VolumeAttachmentSpecApplyConfiguration struct {
- Attacher *string `json:"attacher,omitempty"`
- Source *VolumeAttachmentSourceApplyConfiguration `json:"source,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
+ // attacher indicates the name of the volume driver that MUST handle this
+ // request. This is the name returned by GetPluginName().
+ Attacher *string `json:"attacher,omitempty"`
+ // source represents the volume that should be attached.
+ Source *VolumeAttachmentSourceApplyConfiguration `json:"source,omitempty"`
+ // nodeName represents the node that the volume should be attached to.
+ NodeName *string `json:"nodeName,omitempty"`
}
// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go
index 14293376..e41c36e5 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachmentstatus.go
@@ -20,11 +20,27 @@ package v1
// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use
// with apply.
+//
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
type VolumeAttachmentStatusApplyConfiguration struct {
- Attached *bool `json:"attached,omitempty"`
- AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty"`
- AttachError *VolumeErrorApplyConfiguration `json:"attachError,omitempty"`
- DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
+ // attached indicates the volume is successfully attached.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ Attached *bool `json:"attached,omitempty"`
+ // attachmentMetadata is populated with any
+ // information returned by the attach operation, upon successful attach, that must be passed
+ // into subsequent WaitForAttach or Mount calls.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty"`
+ // attachError represents the last error encountered during attach operation, if any.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ AttachError *VolumeErrorApplyConfiguration `json:"attachError,omitempty"`
+ // detachError represents the last error encountered during detach operation, if any.
+ // This field must only be set by the entity completing the detach
+ // operation, i.e. the external-attacher.
+ DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
}
// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattributesclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattributesclass.go
index 25774aee..aee276c3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattributesclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattributesclass.go
@@ -29,11 +29,32 @@ import (
// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use
// with apply.
+//
+// VolumeAttributesClass represents a specification of mutable volume attributes
+// defined by the CSI driver. The class can be specified during dynamic provisioning
+// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
type VolumeAttributesClassApplyConfiguration struct {
- metav1.TypeMetaApplyConfiguration `json:",inline"`
+ metav1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*metav1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- DriverName *string `json:"driverName,omitempty"`
- Parameters map[string]string `json:"parameters,omitempty"`
+ // Name of the CSI driver
+ // This field is immutable.
+ DriverName *string `json:"driverName,omitempty"`
+ // parameters hold volume attributes defined by the CSI driver. These values
+ // are opaque to the Kubernetes and are passed directly to the CSI driver.
+ // The underlying storage provider supports changing these attributes on an
+ // existing volume, however the parameters field itself is immutable. To
+ // invoke a volume update, a new VolumeAttributesClass should be created with
+ // new parameters, and the PersistentVolumeClaim should be updated to reference
+ // the new VolumeAttributesClass.
+ //
+ // This field is required and must contain at least one key/value pair.
+ // The keys cannot be empty, and the maximum number of parameters is 512, with
+ // a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
+ // the target PersistentVolumeClaim will be set to an "Infeasible" state in the
+ // modifyVolumeStatus field.
+ Parameters map[string]string `json:"parameters,omitempty"`
}
// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with
@@ -46,29 +67,14 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration
return b
}
-// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
-// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
-// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractVolumeAttributesClassFrom extracts the applied configuration owned by fieldManager from
+// volumeAttributesClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
-// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractVolumeAttributesClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
- return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "")
-}
-
-// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
- return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status")
-}
-
-func extractVolumeAttributesClass(volumeAttributesClass *storagev1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClassFrom(volumeAttributesClass *storagev1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
b := &VolumeAttributesClassApplyConfiguration{}
err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttributesClass"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +86,21 @@ func extractVolumeAttributesClass(volumeAttributesClass *storagev1.VolumeAttribu
b.WithAPIVersion("storage.k8s.io/v1")
return b, nil
}
+
+// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
+// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
+// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
+// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+ return ExtractVolumeAttributesClassFrom(volumeAttributesClass, fieldManager, "")
+}
+
func (b VolumeAttributesClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
index 9becf772..b1a572f4 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeerror.go
@@ -24,10 +24,19 @@ import (
// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
// with apply.
+//
+// VolumeError captures an error encountered during a volume operation.
type VolumeErrorApplyConfiguration struct {
- Time *metav1.Time `json:"time,omitempty"`
- Message *string `json:"message,omitempty"`
- ErrorCode *int32 `json:"errorCode,omitempty"`
+ // time represents the time the error was encountered.
+ Time *metav1.Time `json:"time,omitempty"`
+ // message represents the error encountered during Attach or Detach operation.
+ // This string may be logged, so it should not contain sensitive
+ // information.
+ Message *string `json:"message,omitempty"`
+ // errorCode is a numeric gRPC code representing the error encountered during Attach or Detach operations.
+ //
+ // This is an optional, beta field that requires the MutableCSINodeAllocatableCount feature gate being enabled to be set.
+ ErrorCode *int32 `json:"errorCode,omitempty"`
}
// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go
index 735853c4..6648d59f 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumenoderesources.go
@@ -20,7 +20,13 @@ package v1
// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use
// with apply.
+//
+// VolumeNodeResources is a set of resource limits for scheduling of volumes.
type VolumeNodeResourcesApplyConfiguration struct {
+ // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node.
+ // A volume that is both attached and mounted on a node is considered to be used once, not twice.
+ // The same rule applies for a unique volume that is shared among multiple pods on the same node.
+ // If this field is not specified, then the supported number of volumes on this node is unbounded.
Count *int32 `json:"count,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
index 92e70f10..e52c1adf 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
@@ -30,13 +30,76 @@ import (
// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
// with apply.
+//
+// CSIStorageCapacity stores the result of one CSI GetCapacity call.
+// For a given StorageClass, this describes the available capacity in a
+// particular topology segment. This can be used when considering where to
+// instantiate new PersistentVolumes.
+//
+// For example this can express things like:
+// - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1"
+// - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
+//
+// The following three cases all imply that no capacity is available for
+// a certain combination:
+// - no object exists with suitable topology and storage class name
+// - such an object exists, but the capacity is unset
+// - such an object exists, but the capacity is zero
+//
+// The producer of these objects can decide which approach is more suitable.
+//
+// They are consumed by the kube-scheduler when a CSI driver opts into
+// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler
+// compares the MaximumVolumeSize against the requested size of pending volumes
+// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back
+// to a comparison against the less precise Capacity. If that is also unset,
+// the scheduler assumes that capacity is insufficient and tries some other
+// node.
type CSIStorageCapacityApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata. The name has no particular meaning. It must be
+ // be a DNS subdomain (dots allowed, 253 characters). To ensure that
+ // there are no conflicts with other CSI drivers on the cluster, the recommendation
+ // is to use csisc-, a generated name, or a reverse-domain name which ends
+ // with the unique CSI driver name.
+ //
+ // Objects are namespaced.
+ //
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
- StorageClassName *string `json:"storageClassName,omitempty"`
- Capacity *resource.Quantity `json:"capacity,omitempty"`
- MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
+ // nodeTopology defines which nodes have access to the storage
+ // for which capacity was reported. If not set, the storage is
+ // not accessible from any node in the cluster. If empty, the
+ // storage is accessible from all nodes. This field is
+ // immutable.
+ NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
+ // storageClassName represents the name of the StorageClass that the reported capacity applies to.
+ // It must meet the same requirements as the name of a StorageClass
+ // object (non-empty, DNS subdomain). If that object no longer exists,
+ // the CSIStorageCapacity object is obsolete and should be removed by its
+ // creator.
+ // This field is immutable.
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ // capacity is the value reported by the CSI driver in its GetCapacityResponse
+ // for a GetCapacityRequest with topology and parameters that match the
+ // previous fields.
+ //
+ // The semantic is currently (CSI spec 1.2) defined as:
+ // The available capacity, in bytes, of the storage that can be used
+ // to provision volumes. If not set, that information is currently
+ // unavailable.
+ Capacity *resource.Quantity `json:"capacity,omitempty"`
+ // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
+ // for a GetCapacityRequest with topology and parameters that match the
+ // previous fields.
+ //
+ // This is defined since CSI spec 1.4.0 as the largest size
+ // that may be used in a
+ // CreateVolumeRequest.capacity_range.required_bytes field to
+ // create a volume with the same parameters as those in
+ // GetCapacityRequest. The corresponding value in the Kubernetes
+ // API is ResourceRequirements.Requests in a volume claim.
+ MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
}
// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
@@ -50,29 +113,14 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
return b
}
-// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
-// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
-// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSIStorageCapacityFrom extracts the applied configuration owned by fieldManager from
+// cSIStorageCapacity for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
-// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSIStorageCapacityFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
- return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
-}
-
-// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
- return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
-}
-
-func extractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacityFrom(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
@@ -85,6 +133,21 @@ func extractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCap
b.WithAPIVersion("storage.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
+// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
+// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
+// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return ExtractCSIStorageCapacityFrom(cSIStorageCapacity, fieldManager, "")
+}
+
func (b CSIStorageCapacityApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
index ae8ab651..bd214650 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
@@ -29,11 +29,23 @@ import (
// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
// with apply.
+//
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
type VolumeAttachmentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
+ // spec represents specification of the desired attach/detach volume behavior.
+ // Populated by the Kubernetes system.
+ Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents status of the VolumeAttachment request.
+ // Populated by the entity completing the attach or detach
+ // operation, i.e. the external-attacher.
+ Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
}
// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
@@ -46,6 +58,26 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
return b
}
+// ExtractVolumeAttachmentFrom extracts the applied configuration owned by fieldManager from
+// volumeAttachment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API.
+// ExtractVolumeAttachmentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractVolumeAttachmentFrom(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
+ b := &VolumeAttachmentApplyConfiguration{}
+ err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttachment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(volumeAttachment.Name)
+
+ b.WithKind("VolumeAttachment")
+ b.WithAPIVersion("storage.k8s.io/v1alpha1")
+ return b, nil
+}
+
// ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from
// volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a
// VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +88,16 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// ExtractVolumeAttachment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
- return extractVolumeAttachment(volumeAttachment, fieldManager, "")
+ return ExtractVolumeAttachmentFrom(volumeAttachment, fieldManager, "")
}
-// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractVolumeAttachmentStatus extracts the applied configuration owned by fieldManager from
+// volumeAttachment for the status subresource.
func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
- return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
+ return ExtractVolumeAttachmentFrom(volumeAttachment, fieldManager, "status")
}
-func extractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
- b := &VolumeAttachmentApplyConfiguration{}
- err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttachment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(volumeAttachment.Name)
-
- b.WithKind("VolumeAttachment")
- b.WithAPIVersion("storage.k8s.io/v1alpha1")
- return b, nil
-}
func (b VolumeAttachmentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go
index be7da5dd..a7753ff8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentsource.go
@@ -24,9 +24,21 @@ import (
// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
// with apply.
+//
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
+// Exactly one member can be set.
type VolumeAttachmentSourceApplyConfiguration struct {
- PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
- InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
+ // persistentVolumeName represents the name of the persistent volume to attach.
+ PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
+ // inlineVolumeSpec contains all the information necessary to attach
+ // a persistent volume defined by a pod's inline VolumeSource. This field
+ // is populated only for the CSIMigration feature. It contains
+ // translated fields from a pod's inline VolumeSource to a
+ // PersistentVolumeSpec. This field is alpha-level and is only
+ // honored by servers that enabled the CSIMigration feature.
+ InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
}
// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go
index e97487a6..d6f6a751 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentspec.go
@@ -20,10 +20,16 @@ package v1alpha1
// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use
// with apply.
+//
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
type VolumeAttachmentSpecApplyConfiguration struct {
- Attacher *string `json:"attacher,omitempty"`
- Source *VolumeAttachmentSourceApplyConfiguration `json:"source,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
+ // attacher indicates the name of the volume driver that MUST handle this
+ // request. This is the name returned by GetPluginName().
+ Attacher *string `json:"attacher,omitempty"`
+ // source represents the volume that should be attached.
+ Source *VolumeAttachmentSourceApplyConfiguration `json:"source,omitempty"`
+ // nodeName represents the node that the volume should be attached to.
+ NodeName *string `json:"nodeName,omitempty"`
}
// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go
index a287fc6b..a4681f03 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachmentstatus.go
@@ -20,11 +20,27 @@ package v1alpha1
// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use
// with apply.
+//
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
type VolumeAttachmentStatusApplyConfiguration struct {
- Attached *bool `json:"attached,omitempty"`
- AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty"`
- AttachError *VolumeErrorApplyConfiguration `json:"attachError,omitempty"`
- DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
+ // attached indicates the volume is successfully attached.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ Attached *bool `json:"attached,omitempty"`
+ // attachmentMetadata is populated with any
+ // information returned by the attach operation, upon successful attach, that must be passed
+ // into subsequent WaitForAttach or Mount calls.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty"`
+ // attachError represents the last error encountered during attach operation, if any.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ AttachError *VolumeErrorApplyConfiguration `json:"attachError,omitempty"`
+ // detachError represents the last error encountered during detach operation, if any.
+ // This field must only be set by the entity completing the detach
+ // operation, i.e. the external-attacher.
+ DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
}
// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
index 9982cd6b..62ff6dcb 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattributesclass.go
@@ -29,11 +29,32 @@ import (
// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use
// with apply.
+//
+// VolumeAttributesClass represents a specification of mutable volume attributes
+// defined by the CSI driver. The class can be specified during dynamic provisioning
+// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
type VolumeAttributesClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- DriverName *string `json:"driverName,omitempty"`
- Parameters map[string]string `json:"parameters,omitempty"`
+ // Name of the CSI driver
+ // This field is immutable.
+ DriverName *string `json:"driverName,omitempty"`
+ // parameters hold volume attributes defined by the CSI driver. These values
+ // are opaque to the Kubernetes and are passed directly to the CSI driver.
+ // The underlying storage provider supports changing these attributes on an
+ // existing volume, however the parameters field itself is immutable. To
+ // invoke a volume update, a new VolumeAttributesClass should be created with
+ // new parameters, and the PersistentVolumeClaim should be updated to reference
+ // the new VolumeAttributesClass.
+ //
+ // This field is required and must contain at least one key/value pair.
+ // The keys cannot be empty, and the maximum number of parameters is 512, with
+ // a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
+ // the target PersistentVolumeClaim will be set to an "Infeasible" state in the
+ // modifyVolumeStatus field.
+ Parameters map[string]string `json:"parameters,omitempty"`
}
// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with
@@ -46,29 +67,14 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration
return b
}
-// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
-// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
-// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractVolumeAttributesClassFrom extracts the applied configuration owned by fieldManager from
+// volumeAttributesClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
-// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractVolumeAttributesClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
- return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "")
-}
-
-// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
- return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status")
-}
-
-func extractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClassFrom(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
b := &VolumeAttributesClassApplyConfiguration{}
err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttributesClass"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +86,21 @@ func extractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeA
b.WithAPIVersion("storage.k8s.io/v1alpha1")
return b, nil
}
+
+// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
+// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
+// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
+// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1alpha1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+ return ExtractVolumeAttributesClassFrom(volumeAttributesClass, fieldManager, "")
+}
+
func (b VolumeAttributesClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go
index 19e52751..d9106e36 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeerror.go
@@ -24,10 +24,19 @@ import (
// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
// with apply.
+//
+// VolumeError captures an error encountered during a volume operation.
type VolumeErrorApplyConfiguration struct {
- Time *v1.Time `json:"time,omitempty"`
- Message *string `json:"message,omitempty"`
- ErrorCode *int32 `json:"errorCode,omitempty"`
+ // time represents the time the error was encountered.
+ Time *v1.Time `json:"time,omitempty"`
+ // message represents the error encountered during Attach or Detach operation.
+ // This string maybe logged, so it should not contain sensitive
+ // information.
+ Message *string `json:"message,omitempty"`
+ // errorCode is a numeric gRPC code representing the error encountered during Attach or Detach operations.
+ //
+ // This is an optional, alpha field that requires the MutableCSINodeAllocatableCount feature gate being enabled to be set.
+ ErrorCode *int32 `json:"errorCode,omitempty"`
}
// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
index f7418215..bace6d42 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
@@ -29,10 +29,28 @@ import (
// CSIDriverApplyConfiguration represents a declarative configuration of the CSIDriver type for use
// with apply.
+//
+// CSIDriver captures information about a Container Storage Interface (CSI)
+// volume driver deployed on the cluster.
+// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the
+// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically
+// creates a CSIDriver object representing the driver.
+// Kubernetes attach detach controller uses this object to determine whether attach is required.
+// Kubelet uses this object to determine whether pod information needs to be passed on mount.
+// CSIDriver objects are non-namespaced.
type CSIDriverApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
+ // metadata.Name indicates the name of the CSI driver that this object
+ // refers to; it MUST be the same name returned by the CSI GetPluginName()
+ // call for that driver.
+ // The driver name must be 63 characters or less, beginning and ending with
+ // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+ // alphanumerics between.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec represents the specification of the CSI Driver.
+ Spec *CSIDriverSpecApplyConfiguration `json:"spec,omitempty"`
}
// CSIDriver constructs a declarative configuration of the CSIDriver type for use with
@@ -45,29 +63,14 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
return b
}
-// ExtractCSIDriver extracts the applied configuration owned by fieldManager from
-// cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a
-// CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSIDriverFrom extracts the applied configuration owned by fieldManager from
+// cSIDriver for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API.
-// ExtractCSIDriver provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSIDriverFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
- return extractCSIDriver(cSIDriver, fieldManager, "")
-}
-
-// ExtractCSIDriverStatus is the same as ExtractCSIDriver except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSIDriverStatus(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
- return extractCSIDriver(cSIDriver, fieldManager, "status")
-}
-
-func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
+func ExtractCSIDriverFrom(cSIDriver *storagev1beta1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
b := &CSIDriverApplyConfiguration{}
err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIDriver"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +82,21 @@ func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string,
b.WithAPIVersion("storage.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractCSIDriver extracts the applied configuration owned by fieldManager from
+// cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a
+// CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API.
+// ExtractCSIDriver provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+ return ExtractCSIDriverFrom(cSIDriver, fieldManager, "")
+}
+
func (b CSIDriverApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
index b1c9ec6d..323065d2 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriverspec.go
@@ -24,16 +24,160 @@ import (
// CSIDriverSpecApplyConfiguration represents a declarative configuration of the CSIDriverSpec type for use
// with apply.
+//
+// CSIDriverSpec is the specification of a CSIDriver.
type CSIDriverSpecApplyConfiguration struct {
- AttachRequired *bool `json:"attachRequired,omitempty"`
- PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
- VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
- StorageCapacity *bool `json:"storageCapacity,omitempty"`
- FSGroupPolicy *storagev1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
- TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
- RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
- SELinuxMount *bool `json:"seLinuxMount,omitempty"`
- NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty"`
+ // attachRequired indicates this CSI volume driver requires an attach
+ // operation (because it implements the CSI ControllerPublishVolume()
+ // method), and that the Kubernetes attach detach controller should call
+ // the attach volume interface which checks the volumeattachment status
+ // and waits until the volume is attached before proceeding to mounting.
+ // The CSI external-attacher coordinates with CSI volume driver and updates
+ // the volumeattachment status when the attach operation is complete.
+ // If the value is specified to false, the attach operation will be skipped.
+ // Otherwise the attach operation will be called.
+ //
+ // This field is immutable.
+ AttachRequired *bool `json:"attachRequired,omitempty"`
+ // podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.)
+ // during mount operations, if set to true.
+ // If set to false, pod information will not be passed on mount.
+ // Default is false.
+ //
+ // The CSI driver specifies podInfoOnMount as part of driver deployment.
+ // If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls.
+ // The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.
+ //
+ // The following VolumeContext will be passed if podInfoOnMount is set to true.
+ // This list might grow, but the prefix will be used.
+ // "csi.storage.k8s.io/pod.name": pod.Name
+ // "csi.storage.k8s.io/pod.namespace": pod.Namespace
+ // "csi.storage.k8s.io/pod.uid": string(pod.UID)
+ // "csi.storage.k8s.io/ephemeral": "true" if the volume is an ephemeral inline volume
+ // defined by a CSIVolumeSource, otherwise "false"
+ //
+ // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only
+ // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode.
+ // Other drivers can leave pod info disabled and/or ignore this field.
+ // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when
+ // deployed on such a cluster and the deployment determines which mode that is, for example
+ // via a command line parameter of the driver.
+ //
+ // This field is immutable.
+ PodInfoOnMount *bool `json:"podInfoOnMount,omitempty"`
+ // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports.
+ // The default if the list is empty is "Persistent", which is the usage defined by the
+ // CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.
+ //
+ // The other mode is "Ephemeral". In this mode, volumes are defined inline inside the pod spec
+ // with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod.
+ // A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.
+ //
+ // For more information about implementing this mode, see
+ // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
+ // A driver can support one or more of these modes and
+ // more modes may be added in the future.
+ //
+ // This field is immutable.
+ VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty"`
+ // storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage
+ // capacity that the driver deployment will report by creating
+ // CSIStorageCapacity objects with capacity information, if set to true.
+ //
+ // The check can be enabled immediately when deploying a driver.
+ // In that case, provisioning new volumes with late binding
+ // will pause until the driver deployment has published
+ // some suitable CSIStorageCapacity object.
+ //
+ // Alternatively, the driver can be deployed with the field
+ // unset or false and it can be flipped later when storage
+ // capacity information has been published.
+ //
+ // This field was immutable in Kubernetes <= 1.22 and now is mutable.
+ StorageCapacity *bool `json:"storageCapacity,omitempty"`
+ // fsGroupPolicy defines if the underlying volume supports changing ownership and
+ // permission of the volume before being mounted.
+ // Refer to the specific FSGroupPolicy values for additional details.
+ //
+ // This field is immutable.
+ //
+ // Defaults to ReadWriteOnceWithFSType, which will examine each volume
+ // to determine if Kubernetes should modify ownership and permissions of the volume.
+ // With the default policy the defined fsGroup will only be applied
+ // if a fstype is defined and the volume's access mode contains ReadWriteOnce.
+ FSGroupPolicy *storagev1beta1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"`
+ // tokenRequests indicates the CSI driver needs pods' service account
+ // tokens it is mounting volume for to do necessary authentication. Kubelet
+ // will pass the tokens in VolumeContext in the CSI NodePublishVolume calls.
+ // The CSI driver should parse and validate the following VolumeContext:
+ // "csi.storage.k8s.io/serviceAccount.tokens": {
+ // "": {
+ // "token": ,
+ // "expirationTimestamp": ,
+ // },
+ // ...
+ // }
+ //
+ // Note: Audience in each TokenRequest should be different and at
+ // most one token is empty string. To receive a new token after expiry,
+ // RequiresRepublish can be used to trigger NodePublishVolume periodically.
+ TokenRequests []TokenRequestApplyConfiguration `json:"tokenRequests,omitempty"`
+ // requiresRepublish indicates the CSI driver wants `NodePublishVolume`
+ // being periodically called to reflect any possible change in the mounted
+ // volume. This field defaults to false.
+ //
+ // Note: After a successful initial NodePublishVolume call, subsequent calls
+ // to NodePublishVolume should only update the contents of the volume. New
+ // mount points will not be seen by a running container.
+ RequiresRepublish *bool `json:"requiresRepublish,omitempty"`
+ // seLinuxMount specifies if the CSI driver supports "-o context"
+ // mount option.
+ //
+ // When "true", the CSI driver must ensure that all volumes provided by this CSI
+ // driver can be mounted separately with different `-o context` options. This is
+ // typical for storage backends that provide volumes as filesystems on block
+ // devices or as independent shared volumes.
+ // Kubernetes will call NodeStage / NodePublish with "-o context=xyz" mount
+ // option when mounting a ReadWriteOncePod volume used in Pod that has
+ // explicitly set SELinux context. In the future, it may be expanded to other
+ // volume AccessModes. In any case, Kubernetes will ensure that the volume is
+ // mounted only with a single SELinux context.
+ //
+ // When "false", Kubernetes won't pass any special SELinux mount options to the driver.
+ // This is typical for volumes that represent subdirectories of a bigger shared filesystem.
+ //
+ // Default is "false".
+ SELinuxMount *bool `json:"seLinuxMount,omitempty"`
+ // nodeAllocatableUpdatePeriodSeconds specifies the interval between periodic updates of
+ // the CSINode allocatable capacity for this driver. When set, both periodic updates and
+ // updates triggered by capacity-related failures are enabled. If not set, no updates
+ // occur (neither periodic nor upon detecting capacity-related failures), and the
+ // allocatable.count remains static. The minimum allowed value for this field is 10 seconds.
+ //
+ // This is a beta feature and requires the MutableCSINodeAllocatableCount feature gate to be enabled.
+ //
+ // This field is mutable.
+ NodeAllocatableUpdatePeriodSeconds *int64 `json:"nodeAllocatableUpdatePeriodSeconds,omitempty"`
+ // serviceAccountTokenInSecrets is an opt-in for CSI drivers to indicate that
+ // service account tokens should be passed via the Secrets field in NodePublishVolumeRequest
+ // instead of the VolumeContext field. The CSI specification provides a dedicated Secrets
+ // field for sensitive information like tokens, which is the appropriate mechanism for
+ // handling credentials. This addresses security concerns where sensitive tokens were being
+ // logged as part of volume context.
+ //
+ // When "true", kubelet will pass the tokens only in the Secrets field with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens". The CSI driver must be updated to read
+ // tokens from the Secrets field instead of VolumeContext.
+ //
+ // When "false" or not set, kubelet will pass the tokens in VolumeContext with the key
+ // "csi.storage.k8s.io/serviceAccount.tokens" (existing behavior). This maintains backward
+ // compatibility with existing CSI drivers.
+ //
+ // This field can only be set when TokenRequests is configured. The API server will reject
+ // CSIDriver specs that set this field without TokenRequests.
+ //
+ // Default behavior if unset is to pass tokens in the VolumeContext field.
+ ServiceAccountTokenInSecrets *bool `json:"serviceAccountTokenInSecrets,omitempty"`
}
// CSIDriverSpecApplyConfiguration constructs a declarative configuration of the CSIDriverSpec type for use with
@@ -120,3 +264,11 @@ func (b *CSIDriverSpecApplyConfiguration) WithNodeAllocatableUpdatePeriodSeconds
b.NodeAllocatableUpdatePeriodSeconds = &value
return b
}
+
+// WithServiceAccountTokenInSecrets sets the ServiceAccountTokenInSecrets field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ServiceAccountTokenInSecrets field is set to the value of the last call.
+func (b *CSIDriverSpecApplyConfiguration) WithServiceAccountTokenInSecrets(value bool) *CSIDriverSpecApplyConfiguration {
+ b.ServiceAccountTokenInSecrets = &value
+ return b
+}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
index 85e70903..7f690727 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
@@ -29,10 +29,24 @@ import (
// CSINodeApplyConfiguration represents a declarative configuration of the CSINode type for use
// with apply.
+//
+// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode.
+// See the release notes for more information.
+// CSINode holds information about all CSI drivers installed on a node.
+// CSI drivers do not need to create the CSINode object directly. As long as
+// they use the node-driver-registrar sidecar container, the kubelet will
+// automatically populate the CSINode object for the CSI driver as part of
+// kubelet plugin registration.
+// CSINode has the same name as a node. If the object is missing, it means either
+// there are no CSI Drivers available on the node, or the Kubelet version is low
+// enough that it doesn't create this object.
+// CSINode has an OwnerReference that points to the corresponding node object.
type CSINodeApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // metadata.name must be the Kubernetes node name.
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
+ // spec is the specification of CSINode
+ Spec *CSINodeSpecApplyConfiguration `json:"spec,omitempty"`
}
// CSINode constructs a declarative configuration of the CSINode type for use with
@@ -45,29 +59,14 @@ func CSINode(name string) *CSINodeApplyConfiguration {
return b
}
-// ExtractCSINode extracts the applied configuration owned by fieldManager from
-// cSINode. If no managedFields are found in cSINode for fieldManager, a
-// CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSINodeFrom extracts the applied configuration owned by fieldManager from
+// cSINode for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API.
-// ExtractCSINode provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSINodeFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
- return extractCSINode(cSINode, fieldManager, "")
-}
-
-// ExtractCSINodeStatus is the same as ExtractCSINode except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSINodeStatus(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
- return extractCSINode(cSINode, fieldManager, "status")
-}
-
-func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
+func ExtractCSINodeFrom(cSINode *storagev1beta1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
b := &CSINodeApplyConfiguration{}
err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSINode"), fieldManager, b, subresource)
if err != nil {
@@ -79,6 +78,21 @@ func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subres
b.WithAPIVersion("storage.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractCSINode extracts the applied configuration owned by fieldManager from
+// cSINode. If no managedFields are found in cSINode for fieldManager, a
+// CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API.
+// ExtractCSINode provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+ return ExtractCSINodeFrom(cSINode, fieldManager, "")
+}
+
func (b CSINodeApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go
index 65ad771b..7e16fbdc 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodedriver.go
@@ -20,11 +20,36 @@ package v1beta1
// CSINodeDriverApplyConfiguration represents a declarative configuration of the CSINodeDriver type for use
// with apply.
+//
+// CSINodeDriver holds information about the specification of one CSI driver installed on a node
type CSINodeDriverApplyConfiguration struct {
- Name *string `json:"name,omitempty"`
- NodeID *string `json:"nodeID,omitempty"`
- TopologyKeys []string `json:"topologyKeys,omitempty"`
- Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"`
+ // name represents the name of the CSI driver that this object refers to.
+ // This MUST be the same name returned by the CSI GetPluginName() call for
+ // that driver.
+ Name *string `json:"name,omitempty"`
+ // nodeID of the node from the driver point of view.
+ // This field enables Kubernetes to communicate with storage systems that do
+ // not share the same nomenclature for nodes. For example, Kubernetes may
+ // refer to a given node as "node1", but the storage system may refer to
+ // the same node as "nodeA". When Kubernetes issues a command to the storage
+ // system to attach a volume to a specific node, it can use this field to
+ // refer to the node name using the ID that the storage system will
+ // understand, e.g. "nodeA" instead of "node1". This field is required.
+ NodeID *string `json:"nodeID,omitempty"`
+ // topologyKeys is the list of keys supported by the driver.
+ // When a driver is initialized on a cluster, it provides a set of topology
+ // keys that it understands (e.g. "company.com/zone", "company.com/region").
+ // When a driver is initialized on a node, it provides the same topology keys
+ // along with values. Kubelet will expose these topology keys as labels
+ // on its own node object.
+ // When Kubernetes does topology aware provisioning, it can use this list to
+ // determine which labels it should retrieve from the node object and pass
+ // back to the driver.
+ // It is possible for different nodes to use different topology keys.
+ // This can be empty if driver does not support topology.
+ TopologyKeys []string `json:"topologyKeys,omitempty"`
+ // allocatable represents the volume resources of a node that are available for scheduling.
+ Allocatable *VolumeNodeResourcesApplyConfiguration `json:"allocatable,omitempty"`
}
// CSINodeDriverApplyConfiguration constructs a declarative configuration of the CSINodeDriver type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go
index c9cbea1d..71fc06a7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinodespec.go
@@ -20,7 +20,11 @@ package v1beta1
// CSINodeSpecApplyConfiguration represents a declarative configuration of the CSINodeSpec type for use
// with apply.
+//
+// CSINodeSpec holds information about the specification of all CSI drivers installed on a node
type CSINodeSpecApplyConfiguration struct {
+ // drivers is a list of information of all CSI Drivers existing on a node.
+ // If all drivers in the list are uninstalled, this can become empty.
Drivers []CSINodeDriverApplyConfiguration `json:"drivers,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
index d0da232d..e27221e8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
@@ -30,13 +30,76 @@ import (
// CSIStorageCapacityApplyConfiguration represents a declarative configuration of the CSIStorageCapacity type for use
// with apply.
+//
+// CSIStorageCapacity stores the result of one CSI GetCapacity call.
+// For a given StorageClass, this describes the available capacity in a
+// particular topology segment. This can be used when considering where to
+// instantiate new PersistentVolumes.
+//
+// For example this can express things like:
+// - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1"
+// - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
+//
+// The following three cases all imply that no capacity is available for
+// a certain combination:
+// - no object exists with suitable topology and storage class name
+// - such an object exists, but the capacity is unset
+// - such an object exists, but the capacity is zero
+//
+// The producer of these objects can decide which approach is more suitable.
+//
+// They are consumed by the kube-scheduler when a CSI driver opts into
+// capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler
+// compares the MaximumVolumeSize against the requested size of pending volumes
+// to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back
+// to a comparison against the less precise Capacity. If that is also unset,
+// the scheduler assumes that capacity is insufficient and tries some other
+// node.
type CSIStorageCapacityApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata. The name has no particular meaning. It must be
+ // be a DNS subdomain (dots allowed, 253 characters). To ensure that
+ // there are no conflicts with other CSI drivers on the cluster, the recommendation
+ // is to use csisc-, a generated name, or a reverse-domain name which ends
+ // with the unique CSI driver name.
+ //
+ // Objects are namespaced.
+ //
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
- StorageClassName *string `json:"storageClassName,omitempty"`
- Capacity *resource.Quantity `json:"capacity,omitempty"`
- MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
+ // nodeTopology defines which nodes have access to the storage
+ // for which capacity was reported. If not set, the storage is
+ // not accessible from any node in the cluster. If empty, the
+ // storage is accessible from all nodes. This field is
+ // immutable.
+ NodeTopology *v1.LabelSelectorApplyConfiguration `json:"nodeTopology,omitempty"`
+ // storageClassName represents the name of the StorageClass that the reported capacity applies to.
+ // It must meet the same requirements as the name of a StorageClass
+ // object (non-empty, DNS subdomain). If that object no longer exists,
+ // the CSIStorageCapacity object is obsolete and should be removed by its
+ // creator.
+ // This field is immutable.
+ StorageClassName *string `json:"storageClassName,omitempty"`
+ // capacity is the value reported by the CSI driver in its GetCapacityResponse
+ // for a GetCapacityRequest with topology and parameters that match the
+ // previous fields.
+ //
+ // The semantic is currently (CSI spec 1.2) defined as:
+ // The available capacity, in bytes, of the storage that can be used
+ // to provision volumes. If not set, that information is currently
+ // unavailable.
+ Capacity *resource.Quantity `json:"capacity,omitempty"`
+ // maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse
+ // for a GetCapacityRequest with topology and parameters that match the
+ // previous fields.
+ //
+ // This is defined since CSI spec 1.4.0 as the largest size
+ // that may be used in a
+ // CreateVolumeRequest.capacity_range.required_bytes field to
+ // create a volume with the same parameters as those in
+ // GetCapacityRequest. The corresponding value in the Kubernetes
+ // API is ResourceRequirements.Requests in a volume claim.
+ MaximumVolumeSize *resource.Quantity `json:"maximumVolumeSize,omitempty"`
}
// CSIStorageCapacity constructs a declarative configuration of the CSIStorageCapacity type for use with
@@ -50,29 +113,14 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
return b
}
-// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
-// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
-// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractCSIStorageCapacityFrom extracts the applied configuration owned by fieldManager from
+// cSIStorageCapacity for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
-// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractCSIStorageCapacityFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
- return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
-}
-
-// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
- return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
-}
-
-func extractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
+func ExtractCSIStorageCapacityFrom(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
@@ -85,6 +133,21 @@ func extractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapa
b.WithAPIVersion("storage.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
+// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
+// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
+// ExtractCSIStorageCapacity provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractCSIStorageCapacity(cSIStorageCapacity *storagev1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return ExtractCSIStorageCapacityFrom(cSIStorageCapacity, fieldManager, "")
+}
+
func (b CSIStorageCapacityApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
index 3eccf819..49bf87ac 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
@@ -31,16 +31,40 @@ import (
// StorageClassApplyConfiguration represents a declarative configuration of the StorageClass type for use
// with apply.
+//
+// StorageClass describes the parameters for a class of storage for
+// which PersistentVolumes can be dynamically provisioned.
+//
+// StorageClasses are non-namespaced; the name of the storage class
+// according to etcd is in ObjectMeta.Name.
type StorageClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Provisioner *string `json:"provisioner,omitempty"`
- Parameters map[string]string `json:"parameters,omitempty"`
- ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
- MountOptions []string `json:"mountOptions,omitempty"`
- AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
- VolumeBindingMode *storagev1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
- AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
+ // provisioner indicates the type of the provisioner.
+ Provisioner *string `json:"provisioner,omitempty"`
+ // parameters holds the parameters for the provisioner that should
+ // create volumes of this storage class.
+ Parameters map[string]string `json:"parameters,omitempty"`
+ // reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class.
+ // Defaults to Delete.
+ ReclaimPolicy *corev1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty"`
+ // mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class.
+ // e.g. ["ro", "soft"]. Not validated -
+ // mount of the PVs will simply fail if one is invalid.
+ MountOptions []string `json:"mountOptions,omitempty"`
+ // allowVolumeExpansion shows whether the storage class allow volume expand
+ AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty"`
+ // volumeBindingMode indicates how PersistentVolumeClaims should be
+ // provisioned and bound. When unset, VolumeBindingImmediate is used.
+ // This field is only honored by servers that enable the VolumeScheduling feature.
+ VolumeBindingMode *storagev1beta1.VolumeBindingMode `json:"volumeBindingMode,omitempty"`
+ // allowedTopologies restrict the node topologies where volumes can be dynamically provisioned.
+ // Each volume plugin defines its own supported topology specifications.
+ // An empty TopologySelectorTerm list means there is no topology restriction.
+ // This field is only honored by servers that enable the VolumeScheduling feature.
+ AllowedTopologies []applyconfigurationscorev1.TopologySelectorTermApplyConfiguration `json:"allowedTopologies,omitempty"`
}
// StorageClass constructs a declarative configuration of the StorageClass type for use with
@@ -53,29 +77,14 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
return b
}
-// ExtractStorageClass extracts the applied configuration owned by fieldManager from
-// storageClass. If no managedFields are found in storageClass for fieldManager, a
-// StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractStorageClassFrom extracts the applied configuration owned by fieldManager from
+// storageClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API.
-// ExtractStorageClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractStorageClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
- return extractStorageClass(storageClass, fieldManager, "")
-}
-
-// ExtractStorageClassStatus is the same as ExtractStorageClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractStorageClassStatus(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
- return extractStorageClass(storageClass, fieldManager, "status")
-}
-
-func extractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
+func ExtractStorageClassFrom(storageClass *storagev1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
b := &StorageClassApplyConfiguration{}
err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b, subresource)
if err != nil {
@@ -87,6 +96,21 @@ func extractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager
b.WithAPIVersion("storage.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractStorageClass extracts the applied configuration owned by fieldManager from
+// storageClass. If no managedFields are found in storageClass for fieldManager, a
+// StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API.
+// ExtractStorageClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStorageClass(storageClass *storagev1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+ return ExtractStorageClassFrom(storageClass, fieldManager, "")
+}
+
func (b StorageClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go
index e0f2df28..6a8946e1 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/tokenrequest.go
@@ -20,9 +20,15 @@ package v1beta1
// TokenRequestApplyConfiguration represents a declarative configuration of the TokenRequest type for use
// with apply.
+//
+// TokenRequest contains parameters of a service account token.
type TokenRequestApplyConfiguration struct {
- Audience *string `json:"audience,omitempty"`
- ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"`
+ // audience is the intended audience of the token in "TokenRequestSpec".
+ // It will default to the audiences of kube apiserver.
+ Audience *string `json:"audience,omitempty"`
+ // expirationSeconds is the duration of validity of the token in "TokenRequestSpec".
+ // It has the same default value of "ExpirationSeconds" in "TokenRequestSpec"
+ ExpirationSeconds *int64 `json:"expirationSeconds,omitempty"`
}
// TokenRequestApplyConfiguration constructs a declarative configuration of the TokenRequest type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
index 9e7fce4c..da938bd3 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
@@ -29,11 +29,23 @@ import (
// VolumeAttachmentApplyConfiguration represents a declarative configuration of the VolumeAttachment type for use
// with apply.
+//
+// VolumeAttachment captures the intent to attach or detach the specified volume
+// to/from the specified node.
+//
+// VolumeAttachment objects are non-namespaced.
type VolumeAttachmentApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
- Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
+ // spec represents specification of the desired attach/detach volume behavior.
+ // Populated by the Kubernetes system.
+ Spec *VolumeAttachmentSpecApplyConfiguration `json:"spec,omitempty"`
+ // status represents status of the VolumeAttachment request.
+ // Populated by the entity completing the attach or detach
+ // operation, i.e. the external-attacher.
+ Status *VolumeAttachmentStatusApplyConfiguration `json:"status,omitempty"`
}
// VolumeAttachment constructs a declarative configuration of the VolumeAttachment type for use with
@@ -46,6 +58,26 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
return b
}
+// ExtractVolumeAttachmentFrom extracts the applied configuration owned by fieldManager from
+// volumeAttachment for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API.
+// ExtractVolumeAttachmentFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractVolumeAttachmentFrom(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
+ b := &VolumeAttachmentApplyConfiguration{}
+ err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttachment"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(volumeAttachment.Name)
+
+ b.WithKind("VolumeAttachment")
+ b.WithAPIVersion("storage.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from
// volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a
// VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +88,16 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// ExtractVolumeAttachment provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
func ExtractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
- return extractVolumeAttachment(volumeAttachment, fieldManager, "")
+ return ExtractVolumeAttachmentFrom(volumeAttachment, fieldManager, "")
}
-// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
-// that it extracts the status subresource applied configuration.
-// Experimental!
+// ExtractVolumeAttachmentStatus extracts the applied configuration owned by fieldManager from
+// volumeAttachment for the status subresource.
func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
- return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
+ return ExtractVolumeAttachmentFrom(volumeAttachment, fieldManager, "status")
}
-func extractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
- b := &VolumeAttachmentApplyConfiguration{}
- err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttachment"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(volumeAttachment.Name)
-
- b.WithKind("VolumeAttachment")
- b.WithAPIVersion("storage.k8s.io/v1beta1")
- return b, nil
-}
func (b VolumeAttachmentApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go
index b08dd314..5fcf33c8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentsource.go
@@ -24,9 +24,21 @@ import (
// VolumeAttachmentSourceApplyConfiguration represents a declarative configuration of the VolumeAttachmentSource type for use
// with apply.
+//
+// VolumeAttachmentSource represents a volume that should be attached.
+// Right now only PersistentVolumes can be attached via external attacher,
+// in the future we may allow also inline volumes in pods.
+// Exactly one member can be set.
type VolumeAttachmentSourceApplyConfiguration struct {
- PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
- InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
+ // persistentVolumeName represents the name of the persistent volume to attach.
+ PersistentVolumeName *string `json:"persistentVolumeName,omitempty"`
+ // inlineVolumeSpec contains all the information necessary to attach
+ // a persistent volume defined by a pod's inline VolumeSource. This field
+ // is populated only for the CSIMigration feature. It contains
+ // translated fields from a pod's inline VolumeSource to a
+ // PersistentVolumeSpec. This field is beta-level and is only
+ // honored by servers that enabled the CSIMigration feature.
+ InlineVolumeSpec *v1.PersistentVolumeSpecApplyConfiguration `json:"inlineVolumeSpec,omitempty"`
}
// VolumeAttachmentSourceApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSource type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go
index 3bdaeb45..f018c2a7 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentspec.go
@@ -20,10 +20,16 @@ package v1beta1
// VolumeAttachmentSpecApplyConfiguration represents a declarative configuration of the VolumeAttachmentSpec type for use
// with apply.
+//
+// VolumeAttachmentSpec is the specification of a VolumeAttachment request.
type VolumeAttachmentSpecApplyConfiguration struct {
- Attacher *string `json:"attacher,omitempty"`
- Source *VolumeAttachmentSourceApplyConfiguration `json:"source,omitempty"`
- NodeName *string `json:"nodeName,omitempty"`
+ // attacher indicates the name of the volume driver that MUST handle this
+ // request. This is the name returned by GetPluginName().
+ Attacher *string `json:"attacher,omitempty"`
+ // source represents the volume that should be attached.
+ Source *VolumeAttachmentSourceApplyConfiguration `json:"source,omitempty"`
+ // nodeName represents the node that the volume should be attached to.
+ NodeName *string `json:"nodeName,omitempty"`
}
// VolumeAttachmentSpecApplyConfiguration constructs a declarative configuration of the VolumeAttachmentSpec type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go
index f7046cdb..a8ef137e 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachmentstatus.go
@@ -20,11 +20,27 @@ package v1beta1
// VolumeAttachmentStatusApplyConfiguration represents a declarative configuration of the VolumeAttachmentStatus type for use
// with apply.
+//
+// VolumeAttachmentStatus is the status of a VolumeAttachment request.
type VolumeAttachmentStatusApplyConfiguration struct {
- Attached *bool `json:"attached,omitempty"`
- AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty"`
- AttachError *VolumeErrorApplyConfiguration `json:"attachError,omitempty"`
- DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
+ // attached indicates the volume is successfully attached.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ Attached *bool `json:"attached,omitempty"`
+ // attachmentMetadata is populated with any
+ // information returned by the attach operation, upon successful attach, that must be passed
+ // into subsequent WaitForAttach or Mount calls.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty"`
+ // attachError represents the last error encountered during attach operation, if any.
+ // This field must only be set by the entity completing the attach
+ // operation, i.e. the external-attacher.
+ AttachError *VolumeErrorApplyConfiguration `json:"attachError,omitempty"`
+ // detachError represents the last error encountered during detach operation, if any.
+ // This field must only be set by the entity completing the detach
+ // operation, i.e. the external-attacher.
+ DetachError *VolumeErrorApplyConfiguration `json:"detachError,omitempty"`
}
// VolumeAttachmentStatusApplyConfiguration constructs a declarative configuration of the VolumeAttachmentStatus type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
index 7def1435..7d04a86c 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go
@@ -29,11 +29,32 @@ import (
// VolumeAttributesClassApplyConfiguration represents a declarative configuration of the VolumeAttributesClass type for use
// with apply.
+//
+// VolumeAttributesClass represents a specification of mutable volume attributes
+// defined by the CSI driver. The class can be specified during dynamic provisioning
+// of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.
type VolumeAttributesClassApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- DriverName *string `json:"driverName,omitempty"`
- Parameters map[string]string `json:"parameters,omitempty"`
+ // Name of the CSI driver
+ // This field is immutable.
+ DriverName *string `json:"driverName,omitempty"`
+ // parameters hold volume attributes defined by the CSI driver. These values
+ // are opaque to the Kubernetes and are passed directly to the CSI driver.
+ // The underlying storage provider supports changing these attributes on an
+ // existing volume, however the parameters field itself is immutable. To
+ // invoke a volume update, a new VolumeAttributesClass should be created with
+ // new parameters, and the PersistentVolumeClaim should be updated to reference
+ // the new VolumeAttributesClass.
+ //
+ // This field is required and must contain at least one key/value pair.
+ // The keys cannot be empty, and the maximum number of parameters is 512, with
+ // a cumulative max size of 256K. If the CSI driver rejects invalid parameters,
+ // the target PersistentVolumeClaim will be set to an "Infeasible" state in the
+ // modifyVolumeStatus field.
+ Parameters map[string]string `json:"parameters,omitempty"`
}
// VolumeAttributesClass constructs a declarative configuration of the VolumeAttributesClass type for use with
@@ -46,29 +67,14 @@ func VolumeAttributesClass(name string) *VolumeAttributesClassApplyConfiguration
return b
}
-// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
-// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
-// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. It is possible that no managed fields were found for because other
-// field managers have taken ownership of all the fields previously owned by fieldManager, or because
-// the fieldManager never owned fields any fields.
+// ExtractVolumeAttributesClassFrom extracts the applied configuration owned by fieldManager from
+// volumeAttributesClass for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
-// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
+// ExtractVolumeAttributesClassFrom provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
- return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "")
-}
-
-// ExtractVolumeAttributesClassStatus is the same as ExtractVolumeAttributesClass except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractVolumeAttributesClassStatus(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
- return extractVolumeAttributesClass(volumeAttributesClass, fieldManager, "status")
-}
-
-func extractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
+func ExtractVolumeAttributesClassFrom(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string, subresource string) (*VolumeAttributesClassApplyConfiguration, error) {
b := &VolumeAttributesClassApplyConfiguration{}
err := managedfields.ExtractInto(volumeAttributesClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttributesClass"), fieldManager, b, subresource)
if err != nil {
@@ -80,6 +86,21 @@ func extractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAt
b.WithAPIVersion("storage.k8s.io/v1beta1")
return b, nil
}
+
+// ExtractVolumeAttributesClass extracts the applied configuration owned by fieldManager from
+// volumeAttributesClass. If no managedFields are found in volumeAttributesClass for fieldManager, a
+// VolumeAttributesClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// volumeAttributesClass must be a unmodified VolumeAttributesClass API object that was retrieved from the Kubernetes API.
+// ExtractVolumeAttributesClass provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractVolumeAttributesClass(volumeAttributesClass *storagev1beta1.VolumeAttributesClass, fieldManager string) (*VolumeAttributesClassApplyConfiguration, error) {
+ return ExtractVolumeAttributesClassFrom(volumeAttributesClass, fieldManager, "")
+}
+
func (b VolumeAttributesClassApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go
index 015bcd86..e3dca51b 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeerror.go
@@ -24,10 +24,19 @@ import (
// VolumeErrorApplyConfiguration represents a declarative configuration of the VolumeError type for use
// with apply.
+//
+// VolumeError captures an error encountered during a volume operation.
type VolumeErrorApplyConfiguration struct {
- Time *v1.Time `json:"time,omitempty"`
- Message *string `json:"message,omitempty"`
- ErrorCode *int32 `json:"errorCode,omitempty"`
+ // time represents the time the error was encountered.
+ Time *v1.Time `json:"time,omitempty"`
+ // message represents the error encountered during Attach or Detach operation.
+ // This string may be logged, so it should not contain sensitive
+ // information.
+ Message *string `json:"message,omitempty"`
+ // errorCode is a numeric gRPC code representing the error encountered during Attach or Detach operations.
+ //
+ // This is an optional, beta field that requires the MutableCSINodeAllocatableCount feature gate being enabled to be set.
+ ErrorCode *int32 `json:"errorCode,omitempty"`
}
// VolumeErrorApplyConfiguration constructs a declarative configuration of the VolumeError type for use with
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go
index b42c9dec..92522236 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumenoderesources.go
@@ -20,7 +20,13 @@ package v1beta1
// VolumeNodeResourcesApplyConfiguration represents a declarative configuration of the VolumeNodeResources type for use
// with apply.
+//
+// VolumeNodeResources is a set of resource limits for scheduling of volumes.
type VolumeNodeResourcesApplyConfiguration struct {
+ // count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node.
+ // A volume that is both attached and mounted on a node is considered to be used once, not twice.
+ // The same rule applies for a unique volume that is shared among multiple pods on the same node.
+ // If this field is nil, then the supported number of volumes on this node is unbounded.
Count *int32 `json:"count,omitempty"`
}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
deleted file mode 100644
index 5ffd572e..00000000
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by applyconfiguration-gen. DO NOT EDIT.
-
-package v1alpha1
-
-import (
- v1 "k8s.io/api/core/v1"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// MigrationConditionApplyConfiguration represents a declarative configuration of the MigrationCondition type for use
-// with apply.
-type MigrationConditionApplyConfiguration struct {
- Type *storagemigrationv1alpha1.MigrationConditionType `json:"type,omitempty"`
- Status *v1.ConditionStatus `json:"status,omitempty"`
- LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Message *string `json:"message,omitempty"`
-}
-
-// MigrationConditionApplyConfiguration constructs a declarative configuration of the MigrationCondition type for use with
-// apply.
-func MigrationCondition() *MigrationConditionApplyConfiguration {
- return &MigrationConditionApplyConfiguration{}
-}
-
-// WithType sets the Type field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Type field is set to the value of the last call.
-func (b *MigrationConditionApplyConfiguration) WithType(value storagemigrationv1alpha1.MigrationConditionType) *MigrationConditionApplyConfiguration {
- b.Type = &value
- return b
-}
-
-// WithStatus sets the Status field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Status field is set to the value of the last call.
-func (b *MigrationConditionApplyConfiguration) WithStatus(value v1.ConditionStatus) *MigrationConditionApplyConfiguration {
- b.Status = &value
- return b
-}
-
-// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the LastUpdateTime field is set to the value of the last call.
-func (b *MigrationConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *MigrationConditionApplyConfiguration {
- b.LastUpdateTime = &value
- return b
-}
-
-// WithReason sets the Reason field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Reason field is set to the value of the last call.
-func (b *MigrationConditionApplyConfiguration) WithReason(value string) *MigrationConditionApplyConfiguration {
- b.Reason = &value
- return b
-}
-
-// WithMessage sets the Message field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the Message field is set to the value of the last call.
-func (b *MigrationConditionApplyConfiguration) WithMessage(value string) *MigrationConditionApplyConfiguration {
- b.Message = &value
- return b
-}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigration.go
similarity index 85%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigration.go
index e7963d55..e52fa5d8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigration.go
@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
@@ -29,11 +29,18 @@ import (
// StorageVersionMigrationApplyConfiguration represents a declarative configuration of the StorageVersionMigration type for use
// with apply.
+//
+// StorageVersionMigration represents a migration of stored data to the latest
+// storage version.
type StorageVersionMigrationApplyConfiguration struct {
- v1.TypeMetaApplyConfiguration `json:",inline"`
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ // Standard object metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
- Spec *StorageVersionMigrationSpecApplyConfiguration `json:"spec,omitempty"`
- Status *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"`
+ // Specification of the migration.
+ Spec *StorageVersionMigrationSpecApplyConfiguration `json:"spec,omitempty"`
+ // Status of the migration.
+ Status *StorageVersionMigrationStatusApplyConfiguration `json:"status,omitempty"`
}
// StorageVersionMigration constructs a declarative configuration of the StorageVersionMigration type for use with
@@ -42,10 +49,30 @@ func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfigura
b := &StorageVersionMigrationApplyConfiguration{}
b.WithName(name)
b.WithKind("StorageVersionMigration")
- b.WithAPIVersion("storagemigration.k8s.io/v1alpha1")
+ b.WithAPIVersion("storagemigration.k8s.io/v1beta1")
return b
}
+// ExtractStorageVersionMigrationFrom extracts the applied configuration owned by fieldManager from
+// storageVersionMigration for the specified subresource. Pass an empty string for subresource to extract
+// the main resource. Common subresources include "status", "scale", etc.
+// storageVersionMigration must be a unmodified StorageVersionMigration API object that was retrieved from the Kubernetes API.
+// ExtractStorageVersionMigrationFrom provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+func ExtractStorageVersionMigrationFrom(storageVersionMigration *storagemigrationv1beta1.StorageVersionMigration, fieldManager string, subresource string) (*StorageVersionMigrationApplyConfiguration, error) {
+ b := &StorageVersionMigrationApplyConfiguration{}
+ err := managedfields.ExtractInto(storageVersionMigration, internal.Parser().Type("io.k8s.api.storagemigration.v1beta1.StorageVersionMigration"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(storageVersionMigration.Name)
+
+ b.WithKind("StorageVersionMigration")
+ b.WithAPIVersion("storagemigration.k8s.io/v1beta1")
+ return b, nil
+}
+
// ExtractStorageVersionMigration extracts the applied configuration owned by fieldManager from
// storageVersionMigration. If no managedFields are found in storageVersionMigration for fieldManager, a
// StorageVersionMigrationApplyConfiguration is returned with only the Name, Namespace (if applicable),
@@ -56,30 +83,16 @@ func StorageVersionMigration(name string) *StorageVersionMigrationApplyConfigura
// ExtractStorageVersionMigration provides a way to perform a extract/modify-in-place/apply workflow.
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
// applied if another fieldManager has updated or force applied any of the previously applied fields.
-// Experimental!
-func ExtractStorageVersionMigration(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) {
- return extractStorageVersionMigration(storageVersionMigration, fieldManager, "")
+func ExtractStorageVersionMigration(storageVersionMigration *storagemigrationv1beta1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) {
+ return ExtractStorageVersionMigrationFrom(storageVersionMigration, fieldManager, "")
}
-// ExtractStorageVersionMigrationStatus is the same as ExtractStorageVersionMigration except
-// that it extracts the status subresource applied configuration.
-// Experimental!
-func ExtractStorageVersionMigrationStatus(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) {
- return extractStorageVersionMigration(storageVersionMigration, fieldManager, "status")
+// ExtractStorageVersionMigrationStatus extracts the applied configuration owned by fieldManager from
+// storageVersionMigration for the status subresource.
+func ExtractStorageVersionMigrationStatus(storageVersionMigration *storagemigrationv1beta1.StorageVersionMigration, fieldManager string) (*StorageVersionMigrationApplyConfiguration, error) {
+ return ExtractStorageVersionMigrationFrom(storageVersionMigration, fieldManager, "status")
}
-func extractStorageVersionMigration(storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, fieldManager string, subresource string) (*StorageVersionMigrationApplyConfiguration, error) {
- b := &StorageVersionMigrationApplyConfiguration{}
- err := managedfields.ExtractInto(storageVersionMigration, internal.Parser().Type("io.k8s.api.storagemigration.v1alpha1.StorageVersionMigration"), fieldManager, b, subresource)
- if err != nil {
- return nil, err
- }
- b.WithName(storageVersionMigration.Name)
-
- b.WithKind("StorageVersionMigration")
- b.WithAPIVersion("storagemigration.k8s.io/v1alpha1")
- return b, nil
-}
func (b StorageVersionMigrationApplyConfiguration) IsApplyConfiguration() {}
// WithKind sets the Kind field in the declarative configuration to the given value
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigrationspec.go
similarity index 66%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigrationspec.go
index 02ddb540..57e013d6 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigrationspec.go
@@ -16,13 +16,21 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
// StorageVersionMigrationSpecApplyConfiguration represents a declarative configuration of the StorageVersionMigrationSpec type for use
// with apply.
+//
+// Spec of the storage version migration.
type StorageVersionMigrationSpecApplyConfiguration struct {
- Resource *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"`
- ContinueToken *string `json:"continueToken,omitempty"`
+ // The resource that is being migrated. The migrator sends requests to
+ // the endpoint serving the resource.
+ // Immutable.
+ Resource *v1.GroupResourceApplyConfiguration `json:"resource,omitempty"`
}
// StorageVersionMigrationSpecApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationSpec type for use with
@@ -34,15 +42,7 @@ func StorageVersionMigrationSpec() *StorageVersionMigrationSpecApplyConfiguratio
// WithResource sets the Resource field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Resource field is set to the value of the last call.
-func (b *StorageVersionMigrationSpecApplyConfiguration) WithResource(value *GroupVersionResourceApplyConfiguration) *StorageVersionMigrationSpecApplyConfiguration {
+func (b *StorageVersionMigrationSpecApplyConfiguration) WithResource(value *v1.GroupResourceApplyConfiguration) *StorageVersionMigrationSpecApplyConfiguration {
b.Resource = value
return b
}
-
-// WithContinueToken sets the ContinueToken field in the declarative configuration to the given value
-// and returns the receiver, so that objects can be built by chaining "With" function invocations.
-// If called multiple times, the ContinueToken field is set to the value of the last call.
-func (b *StorageVersionMigrationSpecApplyConfiguration) WithContinueToken(value string) *StorageVersionMigrationSpecApplyConfiguration {
- b.ContinueToken = &value
- return b
-}
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigrationstatus.go
similarity index 76%
rename from operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go
rename to operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigrationstatus.go
index fc957cb1..65f4ca72 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1beta1/storageversionmigrationstatus.go
@@ -16,13 +16,23 @@ limitations under the License.
// Code generated by applyconfiguration-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
+
+import (
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
// StorageVersionMigrationStatusApplyConfiguration represents a declarative configuration of the StorageVersionMigrationStatus type for use
// with apply.
+//
+// Status of the storage version migration.
type StorageVersionMigrationStatusApplyConfiguration struct {
- Conditions []MigrationConditionApplyConfiguration `json:"conditions,omitempty"`
- ResourceVersion *string `json:"resourceVersion,omitempty"`
+ // The latest available observations of the migration's current state.
+ Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
+ // ResourceVersion to compare with the GC cache for performing the migration.
+ // This is the current resource version of given group, version and resource when
+ // kube-controller-manager first observes this StorageVersionMigration resource.
+ ResourceVersion *string `json:"resourceVersion,omitempty"`
}
// StorageVersionMigrationStatusApplyConfiguration constructs a declarative configuration of the StorageVersionMigrationStatus type for use with
@@ -34,7 +44,7 @@ func StorageVersionMigrationStatus() *StorageVersionMigrationStatusApplyConfigur
// WithConditions adds the given value to the Conditions field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Conditions field.
-func (b *StorageVersionMigrationStatusApplyConfiguration) WithConditions(values ...*MigrationConditionApplyConfiguration) *StorageVersionMigrationStatusApplyConfiguration {
+func (b *StorageVersionMigrationStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *StorageVersionMigrationStatusApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithConditions")
diff --git a/operator/vendor/k8s.io/client-go/applyconfigurations/utils.go b/operator/vendor/k8s.io/client-go/applyconfigurations/utils.go
index af434668..ef6f16e8 100644
--- a/operator/vendor/k8s.io/client-go/applyconfigurations/utils.go
+++ b/operator/vendor/k8s.io/client-go/applyconfigurations/utils.go
@@ -69,7 +69,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -126,7 +126,7 @@ import (
applyconfigurationsstoragev1 "k8s.io/client-go/applyconfigurations/storage/v1"
applyconfigurationsstoragev1alpha1 "k8s.io/client-go/applyconfigurations/storage/v1alpha1"
applyconfigurationsstoragev1beta1 "k8s.io/client-go/applyconfigurations/storage/v1beta1"
- applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
+ applyconfigurationsstoragemigrationv1beta1 "k8s.io/client-go/applyconfigurations/storagemigration/v1beta1"
)
// ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no
@@ -630,12 +630,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleApplyConfiguration{}
case certificatesv1alpha1.SchemeGroupVersion.WithKind("ClusterTrustBundleSpec"):
return &applyconfigurationscertificatesv1alpha1.ClusterTrustBundleSpecApplyConfiguration{}
- case certificatesv1alpha1.SchemeGroupVersion.WithKind("PodCertificateRequest"):
- return &applyconfigurationscertificatesv1alpha1.PodCertificateRequestApplyConfiguration{}
- case certificatesv1alpha1.SchemeGroupVersion.WithKind("PodCertificateRequestSpec"):
- return &applyconfigurationscertificatesv1alpha1.PodCertificateRequestSpecApplyConfiguration{}
- case certificatesv1alpha1.SchemeGroupVersion.WithKind("PodCertificateRequestStatus"):
- return &applyconfigurationscertificatesv1alpha1.PodCertificateRequestStatusApplyConfiguration{}
// Group=certificates.k8s.io, Version=v1beta1
case certificatesv1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequest"):
@@ -650,6 +644,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationscertificatesv1beta1.ClusterTrustBundleApplyConfiguration{}
case certificatesv1beta1.SchemeGroupVersion.WithKind("ClusterTrustBundleSpec"):
return &applyconfigurationscertificatesv1beta1.ClusterTrustBundleSpecApplyConfiguration{}
+ case certificatesv1beta1.SchemeGroupVersion.WithKind("PodCertificateRequest"):
+ return &applyconfigurationscertificatesv1beta1.PodCertificateRequestApplyConfiguration{}
+ case certificatesv1beta1.SchemeGroupVersion.WithKind("PodCertificateRequestSpec"):
+ return &applyconfigurationscertificatesv1beta1.PodCertificateRequestSpecApplyConfiguration{}
+ case certificatesv1beta1.SchemeGroupVersion.WithKind("PodCertificateRequestStatus"):
+ return &applyconfigurationscertificatesv1beta1.PodCertificateRequestStatusApplyConfiguration{}
// Group=coordination.k8s.io, Version=v1
case coordinationv1.SchemeGroupVersion.WithKind("Lease"):
@@ -1084,6 +1084,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationscorev1.WeightedPodAffinityTermApplyConfiguration{}
case corev1.SchemeGroupVersion.WithKind("WindowsSecurityContextOptions"):
return &applyconfigurationscorev1.WindowsSecurityContextOptionsApplyConfiguration{}
+ case corev1.SchemeGroupVersion.WithKind("WorkloadReference"):
+ return &applyconfigurationscorev1.WorkloadReferenceApplyConfiguration{}
// Group=discovery.k8s.io, Version=v1
case discoveryv1.SchemeGroupVersion.WithKind("Endpoint"):
@@ -1406,6 +1408,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationsmetav1.ConditionApplyConfiguration{}
case metav1.SchemeGroupVersion.WithKind("DeleteOptions"):
return &applyconfigurationsmetav1.DeleteOptionsApplyConfiguration{}
+ case metav1.SchemeGroupVersion.WithKind("GroupResource"):
+ return &applyconfigurationsmetav1.GroupResourceApplyConfiguration{}
case metav1.SchemeGroupVersion.WithKind("LabelSelector"):
return &applyconfigurationsmetav1.LabelSelectorApplyConfiguration{}
case metav1.SchemeGroupVersion.WithKind("LabelSelectorRequirement"):
@@ -1708,16 +1712,14 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationsresourcev1.ResourceSliceSpecApplyConfiguration{}
// Group=resource.k8s.io, Version=v1alpha3
- case v1alpha3.SchemeGroupVersion.WithKind("CELDeviceSelector"):
- return &resourcev1alpha3.CELDeviceSelectorApplyConfiguration{}
- case v1alpha3.SchemeGroupVersion.WithKind("DeviceSelector"):
- return &resourcev1alpha3.DeviceSelectorApplyConfiguration{}
case v1alpha3.SchemeGroupVersion.WithKind("DeviceTaint"):
return &resourcev1alpha3.DeviceTaintApplyConfiguration{}
case v1alpha3.SchemeGroupVersion.WithKind("DeviceTaintRule"):
return &resourcev1alpha3.DeviceTaintRuleApplyConfiguration{}
case v1alpha3.SchemeGroupVersion.WithKind("DeviceTaintRuleSpec"):
return &resourcev1alpha3.DeviceTaintRuleSpecApplyConfiguration{}
+ case v1alpha3.SchemeGroupVersion.WithKind("DeviceTaintRuleStatus"):
+ return &resourcev1alpha3.DeviceTaintRuleStatusApplyConfiguration{}
case v1alpha3.SchemeGroupVersion.WithKind("DeviceTaintSelector"):
return &resourcev1alpha3.DeviceTaintSelectorApplyConfiguration{}
@@ -1886,8 +1888,20 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationsschedulingv1.PriorityClassApplyConfiguration{}
// Group=scheduling.k8s.io, Version=v1alpha1
+ case schedulingv1alpha1.SchemeGroupVersion.WithKind("GangSchedulingPolicy"):
+ return &applyconfigurationsschedulingv1alpha1.GangSchedulingPolicyApplyConfiguration{}
+ case schedulingv1alpha1.SchemeGroupVersion.WithKind("PodGroup"):
+ return &applyconfigurationsschedulingv1alpha1.PodGroupApplyConfiguration{}
+ case schedulingv1alpha1.SchemeGroupVersion.WithKind("PodGroupPolicy"):
+ return &applyconfigurationsschedulingv1alpha1.PodGroupPolicyApplyConfiguration{}
case schedulingv1alpha1.SchemeGroupVersion.WithKind("PriorityClass"):
return &applyconfigurationsschedulingv1alpha1.PriorityClassApplyConfiguration{}
+ case schedulingv1alpha1.SchemeGroupVersion.WithKind("TypedLocalObjectReference"):
+ return &applyconfigurationsschedulingv1alpha1.TypedLocalObjectReferenceApplyConfiguration{}
+ case schedulingv1alpha1.SchemeGroupVersion.WithKind("Workload"):
+ return &applyconfigurationsschedulingv1alpha1.WorkloadApplyConfiguration{}
+ case schedulingv1alpha1.SchemeGroupVersion.WithKind("WorkloadSpec"):
+ return &applyconfigurationsschedulingv1alpha1.WorkloadSpecApplyConfiguration{}
// Group=scheduling.k8s.io, Version=v1beta1
case schedulingv1beta1.SchemeGroupVersion.WithKind("PriorityClass"):
@@ -1973,17 +1987,13 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
case storagev1beta1.SchemeGroupVersion.WithKind("VolumeNodeResources"):
return &applyconfigurationsstoragev1beta1.VolumeNodeResourcesApplyConfiguration{}
- // Group=storagemigration.k8s.io, Version=v1alpha1
- case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("GroupVersionResource"):
- return &applyconfigurationsstoragemigrationv1alpha1.GroupVersionResourceApplyConfiguration{}
- case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("MigrationCondition"):
- return &applyconfigurationsstoragemigrationv1alpha1.MigrationConditionApplyConfiguration{}
- case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration"):
- return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration{}
- case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationSpec"):
- return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationSpecApplyConfiguration{}
- case storagemigrationv1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigrationStatus"):
- return &applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationStatusApplyConfiguration{}
+ // Group=storagemigration.k8s.io, Version=v1beta1
+ case storagemigrationv1beta1.SchemeGroupVersion.WithKind("StorageVersionMigration"):
+ return &applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationApplyConfiguration{}
+ case storagemigrationv1beta1.SchemeGroupVersion.WithKind("StorageVersionMigrationSpec"):
+ return &applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationSpecApplyConfiguration{}
+ case storagemigrationv1beta1.SchemeGroupVersion.WithKind("StorageVersionMigrationStatus"):
+ return &applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationStatusApplyConfiguration{}
}
return nil
diff --git a/operator/vendor/k8s.io/client-go/discovery/discovery_client.go b/operator/vendor/k8s.io/client-go/discovery/discovery_client.go
index 646820cb..aae94f9b 100644
--- a/operator/vendor/k8s.io/client-go/discovery/discovery_client.go
+++ b/operator/vendor/k8s.io/client-go/discovery/discovery_client.go
@@ -63,8 +63,9 @@ const (
// Aggregated discovery content-type (v2beta1). NOTE: content-type parameters
// MUST be ordered (g, v, as) for server in "Accept" header (BUT we are resilient
// to ordering when comparing returned values in "Content-Type" header).
- AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList"
- AcceptV2 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2;as=APIGroupDiscoveryList"
+ AcceptV2Beta1 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2beta1;as=APIGroupDiscoveryList"
+ AcceptV2 = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2;as=APIGroupDiscoveryList"
+ AcceptV2NoPeer = runtime.ContentTypeJSON + ";" + "g=apidiscovery.k8s.io;v=v2;as=APIGroupDiscoveryList;profile=nopeer"
// Prioritize aggregated discovery by placing first in the order of discovery accept types.
acceptDiscoveryFormats = AcceptV2 + "," + AcceptV2Beta1 + "," + AcceptV1
)
@@ -168,6 +169,11 @@ type DiscoveryClient struct {
LegacyPrefix string
// Forces the client to request only "unaggregated" (legacy) discovery.
UseLegacyDiscovery bool
+ // NoPeerDiscovery will request the "nopeer" profile of aggregated discovery.
+ // This allows a client to get just the discovery documents served by the single apiserver
+ // that it is talking to. This is useful for clients that need to understand the state
+ // of a single apiserver, for example, to validate that the apiserver is ready to serve traffic.
+ NoPeerDiscovery bool
}
var _ AggregatedDiscoveryInterface = &DiscoveryClient{}
@@ -241,10 +247,7 @@ func (d *DiscoveryClient) downloadLegacy() (
map[schema.GroupVersion]*metav1.APIResourceList,
map[schema.GroupVersion]error,
error) {
- accept := acceptDiscoveryFormats
- if d.UseLegacyDiscovery {
- accept = AcceptV1
- }
+ accept := selectDiscoveryAcceptHeader(d.UseLegacyDiscovery, d.NoPeerDiscovery)
var responseContentType string
body, err := d.restClient.Get().
AbsPath("/api").
@@ -307,10 +310,7 @@ func (d *DiscoveryClient) downloadAPIs() (
map[schema.GroupVersion]*metav1.APIResourceList,
map[schema.GroupVersion]error,
error) {
- accept := acceptDiscoveryFormats
- if d.UseLegacyDiscovery {
- accept = AcceptV1
- }
+ accept := selectDiscoveryAcceptHeader(d.UseLegacyDiscovery, d.NoPeerDiscovery)
var responseContentType string
body, err := d.restClient.Get().
AbsPath("/apis").
@@ -351,6 +351,16 @@ func (d *DiscoveryClient) downloadAPIs() (
return apiGroupList, resourcesByGV, failedGVs, nil
}
+func selectDiscoveryAcceptHeader(useLegacy, nopeer bool) string {
+ if useLegacy {
+ return AcceptV1
+ }
+ if nopeer {
+ return AcceptV2NoPeer + "," + acceptDiscoveryFormats
+ }
+ return acceptDiscoveryFormats
+}
+
// ContentTypeIsGVK checks of the content-type string is both
// "application/json" and matches the provided GVK. An error
// is returned if the content type string is malformed.
diff --git a/operator/vendor/k8s.io/client-go/features/features.go b/operator/vendor/k8s.io/client-go/features/features.go
index 5ccdcc55..cabb7468 100644
--- a/operator/vendor/k8s.io/client-go/features/features.go
+++ b/operator/vendor/k8s.io/client-go/features/features.go
@@ -21,6 +21,7 @@ import (
"sync/atomic"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/version"
)
// NOTE: types Feature, FeatureSpec, prerelease (and its values)
@@ -49,8 +50,14 @@ type FeatureSpec struct {
LockToDefault bool
// PreRelease indicates the maturity level of the feature
PreRelease prerelease
+ // Version indicates the earliest version from which this FeatureSpec is valid.
+ // If multiple FeatureSpecs exist for a Feature, the one with the highest version that is less
+ // than or equal to the effective version of the component is used.
+ Version *version.Version
}
+type VersionedSpecs []FeatureSpec
+
// Gates indicates whether a given feature is enabled or not.
type Gates interface {
// Enabled returns true if the key is enabled.
@@ -66,6 +73,15 @@ type Registry interface {
Add(map[Feature]FeatureSpec) error
}
+// VersionedRegistry represents an external versioned feature gates registry.
+type VersionedRegistry interface {
+ // AddVersioned adds existing versioned feature gates to the provided registry.
+ //
+ // As of today, this method is used by AddVersionedFeaturesToExistingFeatureGates and
+ // ReplaceFeatureGates to take control of the features exposed by this library.
+ AddVersioned(in map[Feature]VersionedSpecs) error
+}
+
// FeatureGates returns the feature gates exposed by this library.
//
// By default, only the default features gates will be returned.
@@ -85,7 +101,15 @@ func FeatureGates() Gates {
// Usually this function is combined with ReplaceFeatureGates to take control of the
// features exposed by this library.
func AddFeaturesToExistingFeatureGates(registry Registry) error {
- return registry.Add(defaultKubernetesFeatureGates)
+ return registry.Add(unversionedFeatureGates(defaultVersionedKubernetesFeatureGates))
+}
+
+// AddFeaturesToExistingFeatureGates adds the default versioned feature gates to the provided registry.
+// Usually this function is combined with ReplaceFeatureGates to take control of the
+// features exposed by this library.
+// Generally only used by k/k.
+func AddVersionedFeaturesToExistingFeatureGates(registry VersionedRegistry) error {
+ return registry.AddVersioned(defaultVersionedKubernetesFeatureGates)
}
// ReplaceFeatureGates overwrites the default implementation of the feature gates
@@ -121,8 +145,23 @@ func replaceFeatureGatesWithWarningIndicator(newFeatureGates Gates) bool {
return shouldProduceWarning
}
+// unversionedFeatureGates takes the latest entry from the VersionedSpecs of each feature, and clears out the version information,
+// so that the result can be used with an unversioned feature gate.
+func unversionedFeatureGates(featureGates map[Feature]VersionedSpecs) map[Feature]FeatureSpec {
+ unversioned := map[Feature]FeatureSpec{}
+ for feature, specs := range featureGates {
+ if len(specs) == 0 {
+ continue
+ }
+ latestSpec := specs[len(specs)-1]
+ latestSpec.Version = nil // Clear version information.
+ unversioned[feature] = latestSpec
+ }
+ return unversioned
+}
+
func init() {
- envVarGates := newEnvVarFeatureGates(defaultKubernetesFeatureGates)
+ envVarGates := newEnvVarFeatureGates(unversionedFeatureGates(defaultVersionedKubernetesFeatureGates))
wrappedFeatureGates := &featureGatesWrapper{envVarGates}
featureGates.Store(wrappedFeatureGates)
diff --git a/operator/vendor/k8s.io/client-go/features/known_features.go b/operator/vendor/k8s.io/client-go/features/known_features.go
index 4aa8e40c..4b022c4b 100644
--- a/operator/vendor/k8s.io/client-go/features/known_features.go
+++ b/operator/vendor/k8s.io/client-go/features/known_features.go
@@ -16,6 +16,10 @@ limitations under the License.
package features
+import (
+ "k8s.io/apimachinery/pkg/util/version"
+)
+
// Every feature gate should have an entry here following this template:
//
// // owner: @username
@@ -54,30 +58,48 @@ const (
// Refactor informers to deliver watch stream events in order instead of out of order.
InOrderInformers Feature = "InOrderInformers"
- // owner: @nilekhc
+ // owner: @yue9944882
+ // beta: v1.35
+ //
+ // Allow InOrderInformer to process incoming events in batches to expedite the process rate.
+ InOrderInformersBatchProcess Feature = "InOrderInformersBatchProcess"
+
+ // owner: @enj, @michaelasp
// alpha: v1.30
+ // GA: v1.35
InformerResourceVersion Feature = "InformerResourceVersion"
// owner: @p0lyn0mial
// beta: v1.30
//
// Allow the client to get a stream of individual items instead of chunking from the server.
- //
- // NOTE:
- // The feature is disabled in Beta by default because
- // it will only be turned on for selected control plane component(s).
WatchListClient Feature = "WatchListClient"
)
-// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
+// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
//
// To add a new feature, define a key for it above and add it here.
// After registering with the binary, the features are, by default, controllable using environment variables.
// For more details, please see envVarFeatureGates implementation.
-var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{
- ClientsAllowCBOR: {Default: false, PreRelease: Alpha},
- ClientsPreferCBOR: {Default: false, PreRelease: Alpha},
- InOrderInformers: {Default: true, PreRelease: Beta},
- InformerResourceVersion: {Default: false, PreRelease: Alpha},
- WatchListClient: {Default: false, PreRelease: Beta},
+var defaultVersionedKubernetesFeatureGates = map[Feature]VersionedSpecs{
+ ClientsAllowCBOR: {
+ {Version: version.MustParse("1.32"), Default: false, PreRelease: Alpha},
+ },
+ ClientsPreferCBOR: {
+ {Version: version.MustParse("1.32"), Default: false, PreRelease: Alpha},
+ },
+ InOrderInformers: {
+ {Version: version.MustParse("1.33"), Default: true, PreRelease: Beta},
+ },
+ InOrderInformersBatchProcess: {
+ {Version: version.MustParse("1.35"), Default: true, PreRelease: Beta},
+ },
+ InformerResourceVersion: {
+ {Version: version.MustParse("1.30"), Default: false, PreRelease: Alpha},
+ {Version: version.MustParse("1.35"), Default: true, PreRelease: GA},
+ },
+ WatchListClient: {
+ {Version: version.MustParse("1.30"), Default: false, PreRelease: Beta},
+ {Version: version.MustParse("1.35"), Default: true, PreRelease: Beta},
+ },
}
diff --git a/operator/vendor/k8s.io/client-go/gentype/type.go b/operator/vendor/k8s.io/client-go/gentype/type.go
index e9f42e14..e91941d4 100644
--- a/operator/vendor/k8s.io/client-go/gentype/type.go
+++ b/operator/vendor/k8s.io/client-go/gentype/type.go
@@ -23,9 +23,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
- types "k8s.io/apimachinery/pkg/types"
- watch "k8s.io/apimachinery/pkg/watch"
- rest "k8s.io/client-go/rest"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/rest"
"k8s.io/client-go/util/apply"
)
@@ -318,7 +318,7 @@ func (a *alsoApplier[T, C]) Apply(ctx context.Context, obj C, opts metav1.ApplyO
return result, err
}
-// Apply takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource.
+// ApplyStatus takes the given apply declarative configuration, applies it to the status subresource and returns the applied resource.
func (a *alsoApplier[T, C]) ApplyStatus(ctx context.Context, obj C, opts metav1.ApplyOptions) (T, error) {
if obj == *new(C) {
return *new(T), fmt.Errorf("object provided to Apply must not be nil")
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
index 7adafde2..77439c4b 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/mutatingwebhookconfiguration.go
@@ -56,7 +56,7 @@ func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface
}
return client.AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1.MutatingWebhookConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go
index 92cfa1fa..89b86c09 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go
@@ -56,7 +56,7 @@ func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r
}
return client.AdmissionregistrationV1().ValidatingAdmissionPolicies().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1.ValidatingAdmissionPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go
index e0c35ec5..b318ed44 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go
@@ -56,7 +56,7 @@ func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, re
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter
}
return client.AdmissionregistrationV1().ValidatingAdmissionPolicyBindings().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1.ValidatingAdmissionPolicyBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
index 8ddeb049..d1d38407 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingwebhookconfiguration.go
@@ -56,7 +56,7 @@ func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resy
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa
}
return client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1.ValidatingWebhookConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
index 939eff98..2a0cb2cf 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go
@@ -56,7 +56,7 @@ func NewMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredMutatingAdmissionPolicyInformer(client kubernetes.Interface, res
}
return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicies().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1alpha1.MutatingAdmissionPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
index a94f6d27..23ef580f 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go
@@ -56,7 +56,7 @@ func NewMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resy
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredMutatingAdmissionPolicyBindingInformer(client kubernetes.Interfa
}
return client.AdmissionregistrationV1alpha1().MutatingAdmissionPolicyBindings().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1alpha1.MutatingAdmissionPolicyBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
index 1a6f7d56..9a896b70 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicy.go
@@ -56,7 +56,7 @@ func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r
}
return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicies().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
index 3afaa3be..630471e9 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/validatingadmissionpolicybinding.go
@@ -56,7 +56,7 @@ func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, re
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter
}
return client.AdmissionregistrationV1alpha1().ValidatingAdmissionPolicyBindings().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1alpha1.ValidatingAdmissionPolicyBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicy.go
index c2df805f..bfd1e424 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicy.go
@@ -56,7 +56,7 @@ func NewMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMutatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredMutatingAdmissionPolicyInformer(client kubernetes.Interface, res
}
return client.AdmissionregistrationV1beta1().MutatingAdmissionPolicies().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1beta1.MutatingAdmissionPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go
index 0adc02f7..416590c0 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingadmissionpolicybinding.go
@@ -56,7 +56,7 @@ func NewMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resy
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMutatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredMutatingAdmissionPolicyBindingInformer(client kubernetes.Interfa
}
return client.AdmissionregistrationV1beta1().MutatingAdmissionPolicyBindings().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1beta1.MutatingAdmissionPolicyBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
index 697dae85..2f086798 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -56,7 +56,7 @@ func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface
}
return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1beta1.MutatingWebhookConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
index 31c3569d..18960ff9 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicy.go
@@ -56,7 +56,7 @@ func NewValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingAdmissionPolicyInformer(client kubernetes.Interface, r
}
return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicies().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1beta1.ValidatingAdmissionPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
index fb2c10e3..ab3febb1 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingadmissionpolicybinding.go
@@ -56,7 +56,7 @@ func NewValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, re
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingAdmissionPolicyBindingInformer(client kubernetes.Inter
}
return client.AdmissionregistrationV1beta1().ValidatingAdmissionPolicyBindings().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1beta1.ValidatingAdmissionPolicyBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
index 2eb6991c..ae888605 100644
--- a/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -56,7 +56,7 @@ func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resy
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interfa
}
return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiadmissionregistrationv1beta1.ValidatingWebhookConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go b/operator/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go
index e8e1669d..1f1cfd50 100644
--- a/operator/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go
+++ b/operator/vendor/k8s.io/client-go/informers/apiserverinternal/v1alpha1/storageversion.go
@@ -56,7 +56,7 @@ func NewStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Du
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredStorageVersionInformer(client kubernetes.Interface, resyncPeriod
}
return client.InternalV1alpha1().StorageVersions().Watch(ctx, options)
},
- },
+ }, client),
&apiapiserverinternalv1alpha1.StorageVersion{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/operator/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
index 64eeddec..1d306123 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go
@@ -57,7 +57,7 @@ func NewControllerRevisionInformer(client kubernetes.Interface, namespace string
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac
}
return client.AppsV1().ControllerRevisions(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1.ControllerRevision{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
index 4a3e95e1..658d6b9a 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go
@@ -57,7 +57,7 @@ func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncP
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string,
}
return client.AppsV1().DaemonSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1.DaemonSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/operator/vendor/k8s.io/client-go/informers/apps/v1/deployment.go
index 9c0c20c5..4a8a29c2 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1/deployment.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1/deployment.go
@@ -57,7 +57,7 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string
}
return client.AppsV1().Deployments(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1.Deployment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
index 75c7a79e..d37292cb 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go
@@ -57,7 +57,7 @@ func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string
}
return client.AppsV1().ReplicaSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1.ReplicaSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
index f759e046..70a5f9c0 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go
@@ -57,7 +57,7 @@ func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin
}
return client.AppsV1().StatefulSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1.StatefulSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
index 79b2fb90..deaaa24c 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go
@@ -57,7 +57,7 @@ func NewControllerRevisionInformer(client kubernetes.Interface, namespace string
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac
}
return client.AppsV1beta1().ControllerRevisions(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta1.ControllerRevision{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
index 1334c03a..4dbad1e9 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go
@@ -57,7 +57,7 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string
}
return client.AppsV1beta1().Deployments(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta1.Deployment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
index 2d52ae02..2c5aa846 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go
@@ -57,7 +57,7 @@ func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin
}
return client.AppsV1beta1().StatefulSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta1.StatefulSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
index 0936ef7b..f6cca7d5 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go
@@ -57,7 +57,7 @@ func NewControllerRevisionInformer(client kubernetes.Interface, namespace string
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespac
}
return client.AppsV1beta2().ControllerRevisions(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta2.ControllerRevision{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
index d5c49d77..aaec4988 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go
@@ -57,7 +57,7 @@ func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncP
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string,
}
return client.AppsV1beta2().DaemonSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta2.DaemonSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
index 575ddbfc..69f6e3fb 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go
@@ -57,7 +57,7 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string
}
return client.AppsV1beta2().Deployments(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta2.Deployment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
index cfc4b328..a65d4516 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go
@@ -57,7 +57,7 @@ func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string
}
return client.AppsV1beta2().ReplicaSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta2.ReplicaSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
index a514c5bb..5e90c39c 100644
--- a/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
+++ b/operator/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go
@@ -57,7 +57,7 @@ func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace strin
}
return client.AppsV1beta2().StatefulSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiappsv1beta2.StatefulSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
index e92f7563..98aacb1e 100644
--- a/operator/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go
@@ -57,7 +57,7 @@ func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace s
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam
}
return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiautoscalingv1.HorizontalPodAutoscaler{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go
index b5d4123e..ce86ea87 100644
--- a/operator/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/informers/autoscaling/v2/horizontalpodautoscaler.go
@@ -57,7 +57,7 @@ func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace s
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam
}
return client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiautoscalingv2.HorizontalPodAutoscaler{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
index 5a64e7ef..71368420 100644
--- a/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -57,7 +57,7 @@ func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace s
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam
}
return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiautoscalingv2beta1.HorizontalPodAutoscaler{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go b/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go
index 2d4c3f1d..4ff8b103 100644
--- a/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go
+++ b/operator/vendor/k8s.io/client-go/informers/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -57,7 +57,7 @@ func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace s
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, nam
}
return client.AutoscalingV2beta2().HorizontalPodAutoscalers(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiautoscalingv2beta2.HorizontalPodAutoscaler{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go b/operator/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go
index ee4f8808..9875a88b 100644
--- a/operator/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go
+++ b/operator/vendor/k8s.io/client-go/informers/batch/v1/cronjob.go
@@ -57,7 +57,7 @@ func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r
}
return client.BatchV1().CronJobs(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apibatchv1.CronJob{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/batch/v1/job.go b/operator/vendor/k8s.io/client-go/informers/batch/v1/job.go
index d3965f53..a68178d6 100644
--- a/operator/vendor/k8s.io/client-go/informers/batch/v1/job.go
+++ b/operator/vendor/k8s.io/client-go/informers/batch/v1/job.go
@@ -57,7 +57,7 @@ func NewJobInformer(client kubernetes.Interface, namespace string, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyn
}
return client.BatchV1().Jobs(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apibatchv1.Job{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/operator/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go
index 1cf169d9..16644920 100644
--- a/operator/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go
+++ b/operator/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go
@@ -57,7 +57,7 @@ func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, r
}
return client.BatchV1beta1().CronJobs(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apibatchv1beta1.CronJob{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go
index 076da136..a16eb3f3 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1/certificatesigningrequest.go
@@ -56,7 +56,7 @@ func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r
}
return client.CertificatesV1().CertificateSigningRequests().Watch(ctx, options)
},
- },
+ }, client),
&apicertificatesv1.CertificateSigningRequest{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go
index ca5ee2c9..02d8800f 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/clustertrustbundle.go
@@ -56,7 +56,7 @@ func NewClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod tim
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPe
}
return client.CertificatesV1alpha1().ClusterTrustBundles().Watch(ctx, options)
},
- },
+ }, client),
&apicertificatesv1alpha1.ClusterTrustBundle{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go
index 870fa0cf..40ce8f42 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/interface.go
@@ -26,8 +26,6 @@ import (
type Interface interface {
// ClusterTrustBundles returns a ClusterTrustBundleInformer.
ClusterTrustBundles() ClusterTrustBundleInformer
- // PodCertificateRequests returns a PodCertificateRequestInformer.
- PodCertificateRequests() PodCertificateRequestInformer
}
type version struct {
@@ -45,8 +43,3 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
func (v *version) ClusterTrustBundles() ClusterTrustBundleInformer {
return &clusterTrustBundleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
-
-// PodCertificateRequests returns a PodCertificateRequestInformer.
-func (v *version) PodCertificateRequests() PodCertificateRequestInformer {
- return &podCertificateRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
-}
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go
index f93d435a..57097b7a 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go
@@ -56,7 +56,7 @@ func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, r
}
return client.CertificatesV1beta1().CertificateSigningRequests().Watch(ctx, options)
},
- },
+ }, client),
&apicertificatesv1beta1.CertificateSigningRequest{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/clustertrustbundle.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/clustertrustbundle.go
index c4a69b22..93b5d631 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/clustertrustbundle.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/clustertrustbundle.go
@@ -56,7 +56,7 @@ func NewClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod tim
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterTrustBundleInformer(client kubernetes.Interface, resyncPe
}
return client.CertificatesV1beta1().ClusterTrustBundles().Watch(ctx, options)
},
- },
+ }, client),
&apicertificatesv1beta1.ClusterTrustBundle{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go
index f13d5e66..b2d39e20 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go
@@ -28,6 +28,8 @@ type Interface interface {
CertificateSigningRequests() CertificateSigningRequestInformer
// ClusterTrustBundles returns a ClusterTrustBundleInformer.
ClusterTrustBundles() ClusterTrustBundleInformer
+ // PodCertificateRequests returns a PodCertificateRequestInformer.
+ PodCertificateRequests() PodCertificateRequestInformer
}
type version struct {
@@ -50,3 +52,8 @@ func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer
func (v *version) ClusterTrustBundles() ClusterTrustBundleInformer {
return &clusterTrustBundleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
+
+// PodCertificateRequests returns a PodCertificateRequestInformer.
+func (v *version) PodCertificateRequests() PodCertificateRequestInformer {
+ return &podCertificateRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/podcertificaterequest.go b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/podcertificaterequest.go
similarity index 77%
rename from operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/podcertificaterequest.go
rename to operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/podcertificaterequest.go
index 71382e24..2d41c76e 100644
--- a/operator/vendor/k8s.io/client-go/informers/certificates/v1alpha1/podcertificaterequest.go
+++ b/operator/vendor/k8s.io/client-go/informers/certificates/v1beta1/podcertificaterequest.go
@@ -16,19 +16,19 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
context "context"
time "time"
- apicertificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
+ apicertificatesv1beta1 "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
kubernetes "k8s.io/client-go/kubernetes"
- certificatesv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
+ certificatesv1beta1 "k8s.io/client-go/listers/certificates/v1beta1"
cache "k8s.io/client-go/tools/cache"
)
@@ -36,7 +36,7 @@ import (
// PodCertificateRequests.
type PodCertificateRequestInformer interface {
Informer() cache.SharedIndexInformer
- Lister() certificatesv1alpha1.PodCertificateRequestLister
+ Lister() certificatesv1beta1.PodCertificateRequestLister
}
type podCertificateRequestInformer struct {
@@ -57,33 +57,33 @@ func NewPodCertificateRequestInformer(client kubernetes.Interface, namespace str
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodCertificateRequestInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.CertificatesV1alpha1().PodCertificateRequests(namespace).List(context.Background(), options)
+ return client.CertificatesV1beta1().PodCertificateRequests(namespace).List(context.Background(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.CertificatesV1alpha1().PodCertificateRequests(namespace).Watch(context.Background(), options)
+ return client.CertificatesV1beta1().PodCertificateRequests(namespace).Watch(context.Background(), options)
},
ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.CertificatesV1alpha1().PodCertificateRequests(namespace).List(ctx, options)
+ return client.CertificatesV1beta1().PodCertificateRequests(namespace).List(ctx, options)
},
WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.CertificatesV1alpha1().PodCertificateRequests(namespace).Watch(ctx, options)
+ return client.CertificatesV1beta1().PodCertificateRequests(namespace).Watch(ctx, options)
},
- },
- &apicertificatesv1alpha1.PodCertificateRequest{},
+ }, client),
+ &apicertificatesv1beta1.PodCertificateRequest{},
resyncPeriod,
indexers,
)
@@ -94,9 +94,9 @@ func (f *podCertificateRequestInformer) defaultInformer(client kubernetes.Interf
}
func (f *podCertificateRequestInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&apicertificatesv1alpha1.PodCertificateRequest{}, f.defaultInformer)
+ return f.factory.InformerFor(&apicertificatesv1beta1.PodCertificateRequest{}, f.defaultInformer)
}
-func (f *podCertificateRequestInformer) Lister() certificatesv1alpha1.PodCertificateRequestLister {
- return certificatesv1alpha1.NewPodCertificateRequestLister(f.Informer().GetIndexer())
+func (f *podCertificateRequestInformer) Lister() certificatesv1beta1.PodCertificateRequestLister {
+ return certificatesv1beta1.NewPodCertificateRequestLister(f.Informer().GetIndexer())
}
diff --git a/operator/vendor/k8s.io/client-go/informers/coordination/v1/lease.go b/operator/vendor/k8s.io/client-go/informers/coordination/v1/lease.go
index 2d0c812d..1a34bf73 100644
--- a/operator/vendor/k8s.io/client-go/informers/coordination/v1/lease.go
+++ b/operator/vendor/k8s.io/client-go/informers/coordination/v1/lease.go
@@ -57,7 +57,7 @@ func NewLeaseInformer(client kubernetes.Interface, namespace string, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res
}
return client.CoordinationV1().Leases(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicoordinationv1.Lease{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go b/operator/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go
index c220a9b2..f029be06 100644
--- a/operator/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go
+++ b/operator/vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go
@@ -57,7 +57,7 @@ func NewLeaseCandidateInformer(client kubernetes.Interface, namespace string, re
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace st
}
return client.CoordinationV1alpha2().LeaseCandidates(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicoordinationv1alpha2.LeaseCandidate{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go b/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go
index ef91381c..f254e30d 100644
--- a/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go
+++ b/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/lease.go
@@ -57,7 +57,7 @@ func NewLeaseInformer(client kubernetes.Interface, namespace string, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredLeaseInformer(client kubernetes.Interface, namespace string, res
}
return client.CoordinationV1beta1().Leases(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicoordinationv1beta1.Lease{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/leasecandidate.go b/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/leasecandidate.go
index 4315e50c..bdb3e1be 100644
--- a/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/leasecandidate.go
+++ b/operator/vendor/k8s.io/client-go/informers/coordination/v1beta1/leasecandidate.go
@@ -57,7 +57,7 @@ func NewLeaseCandidateInformer(client kubernetes.Interface, namespace string, re
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredLeaseCandidateInformer(client kubernetes.Interface, namespace st
}
return client.CoordinationV1beta1().LeaseCandidates(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicoordinationv1beta1.LeaseCandidate{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/operator/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go
index a7992bfd..9cf1a062 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go
@@ -56,7 +56,7 @@ func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.D
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPerio
}
return client.CoreV1().ComponentStatuses().Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.ComponentStatus{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/operator/vendor/k8s.io/client-go/informers/core/v1/configmap.go
index 014e55af..5772678b 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/configmap.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/configmap.go
@@ -57,7 +57,7 @@ func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncP
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string,
}
return client.CoreV1().ConfigMaps(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.ConfigMap{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/operator/vendor/k8s.io/client-go/informers/core/v1/endpoints.go
index 2d4412ad..6c55df98 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/endpoints.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/endpoints.go
@@ -57,7 +57,7 @@ func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncP
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string,
}
return client.CoreV1().Endpoints(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Endpoints{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/event.go b/operator/vendor/k8s.io/client-go/informers/core/v1/event.go
index 80a5cad8..63df621b 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/event.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/event.go
@@ -57,7 +57,7 @@ func NewEventInformer(client kubernetes.Interface, namespace string, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res
}
return client.CoreV1().Events(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Event{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/operator/vendor/k8s.io/client-go/informers/core/v1/limitrange.go
index cf8e1eb4..229adb2b 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/limitrange.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/limitrange.go
@@ -57,7 +57,7 @@ func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string
}
return client.CoreV1().LimitRanges(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.LimitRange{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/operator/vendor/k8s.io/client-go/informers/core/v1/namespace.go
index ae09888b..d45d2eee 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/namespace.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/namespace.go
@@ -56,7 +56,7 @@ func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duratio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time
}
return client.CoreV1().Namespaces().Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Namespace{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/node.go b/operator/vendor/k8s.io/client-go/informers/core/v1/node.go
index a036db03..a5224761 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/node.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/node.go
@@ -56,7 +56,7 @@ func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, in
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Dura
}
return client.CoreV1().Nodes().Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Node{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go
index 4d1d63ea..0458fc19 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go
@@ -56,7 +56,7 @@ func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeri
}
return client.CoreV1().PersistentVolumes().Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.PersistentVolume{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go
index 87a4cc03..dd75924d 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go
@@ -57,7 +57,7 @@ func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace str
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, names
}
return client.CoreV1().PersistentVolumeClaims(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.PersistentVolumeClaim{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/pod.go b/operator/vendor/k8s.io/client-go/informers/core/v1/pod.go
index e3a40729..b68ebc1d 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/pod.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/pod.go
@@ -57,7 +57,7 @@ func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyn
}
return client.CoreV1().Pods(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Pod{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/operator/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go
index 9d6e7048..c4b4cd12 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go
@@ -57,7 +57,7 @@ func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace strin
}
return client.CoreV1().PodTemplates(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.PodTemplate{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/operator/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go
index 89c216e8..58b66c2b 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go
@@ -57,7 +57,7 @@ func NewReplicationControllerInformer(client kubernetes.Interface, namespace str
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredReplicationControllerInformer(client kubernetes.Interface, names
}
return client.CoreV1().ReplicationControllers(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.ReplicationController{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/operator/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go
index aa77e057..408c0245 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go
@@ -57,7 +57,7 @@ func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace str
}
return client.CoreV1().ResourceQuotas(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.ResourceQuota{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/secret.go b/operator/vendor/k8s.io/client-go/informers/core/v1/secret.go
index be5a80a9..7f7338d0 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/secret.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/secret.go
@@ -57,7 +57,7 @@ func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeri
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, re
}
return client.CoreV1().Secrets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Secret{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/service.go b/operator/vendor/k8s.io/client-go/informers/core/v1/service.go
index 10b05875..b4dcb75d 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/service.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/service.go
@@ -57,7 +57,7 @@ func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, r
}
return client.CoreV1().Services(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.Service{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/operator/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go
index 59443961..5ddc98b2 100644
--- a/operator/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go
+++ b/operator/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go
@@ -57,7 +57,7 @@ func NewServiceAccountInformer(client kubernetes.Interface, namespace string, re
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace st
}
return client.CoreV1().ServiceAccounts(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apicorev1.ServiceAccount{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go b/operator/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go
index 38f09183..12919128 100644
--- a/operator/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go
+++ b/operator/vendor/k8s.io/client-go/informers/discovery/v1/endpointslice.go
@@ -57,7 +57,7 @@ func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str
}
return client.DiscoveryV1().EndpointSlices(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apidiscoveryv1.EndpointSlice{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go b/operator/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go
index 23c51eb7..80147e24 100644
--- a/operator/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go
+++ b/operator/vendor/k8s.io/client-go/informers/discovery/v1beta1/endpointslice.go
@@ -57,7 +57,7 @@ func NewEndpointSliceInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredEndpointSliceInformer(client kubernetes.Interface, namespace str
}
return client.DiscoveryV1beta1().EndpointSlices(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apidiscoveryv1beta1.EndpointSlice{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/events/v1/event.go b/operator/vendor/k8s.io/client-go/informers/events/v1/event.go
index 139cc155..68eafe31 100644
--- a/operator/vendor/k8s.io/client-go/informers/events/v1/event.go
+++ b/operator/vendor/k8s.io/client-go/informers/events/v1/event.go
@@ -57,7 +57,7 @@ func NewEventInformer(client kubernetes.Interface, namespace string, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res
}
return client.EventsV1().Events(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apieventsv1.Event{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/operator/vendor/k8s.io/client-go/informers/events/v1beta1/event.go
index 75818c0e..b26c2089 100644
--- a/operator/vendor/k8s.io/client-go/informers/events/v1beta1/event.go
+++ b/operator/vendor/k8s.io/client-go/informers/events/v1beta1/event.go
@@ -57,7 +57,7 @@ func NewEventInformer(client kubernetes.Interface, namespace string, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredEventInformer(client kubernetes.Interface, namespace string, res
}
return client.EventsV1beta1().Events(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apieventsv1beta1.Event{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go
index ce8612c1..fdfbb023 100644
--- a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go
+++ b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go
@@ -57,7 +57,7 @@ func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncP
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string,
}
return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiextensionsv1beta1.DaemonSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go
index 5dd957ba..64422662 100644
--- a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go
+++ b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go
@@ -57,7 +57,7 @@ func NewDeploymentInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string
}
return client.ExtensionsV1beta1().Deployments(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiextensionsv1beta1.Deployment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go
index 935f6868..bbd72868 100644
--- a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go
+++ b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go
@@ -57,7 +57,7 @@ func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r
}
return client.ExtensionsV1beta1().Ingresses(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiextensionsv1beta1.Ingress{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go
index f485f314..45eb5e95 100644
--- a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/networkpolicy.go
@@ -57,7 +57,7 @@ func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str
}
return client.ExtensionsV1beta1().NetworkPolicies(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiextensionsv1beta1.NetworkPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go
index 4b1be542..82a183b3 100644
--- a/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go
+++ b/operator/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go
@@ -57,7 +57,7 @@ func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resync
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string
}
return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiextensionsv1beta1.ReplicaSet{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/factory.go b/operator/vendor/k8s.io/client-go/informers/factory.go
index 86c24551..bd3d16c0 100644
--- a/operator/vendor/k8s.io/client-go/informers/factory.go
+++ b/operator/vendor/k8s.io/client-go/informers/factory.go
@@ -116,6 +116,7 @@ func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Du
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
+//
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
@@ -223,7 +224,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
//
// It is typically used like this:
//
-// ctx, cancel := context.Background()
+// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go
index f8918dcf..734ee0eb 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/flowschema.go
@@ -56,7 +56,7 @@ func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Durati
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim
}
return client.FlowcontrolV1().FlowSchemas().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1.FlowSchema{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go
index 2ec4f398..a5f23218 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1/prioritylevelconfiguration.go
@@ -56,7 +56,7 @@ func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPe
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface,
}
return client.FlowcontrolV1().PriorityLevelConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1.PriorityLevelConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go
index 322fa318..5e88fe8c 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/flowschema.go
@@ -56,7 +56,7 @@ func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Durati
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim
}
return client.FlowcontrolV1beta1().FlowSchemas().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1beta1.FlowSchema{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go
index aebc788f..f86a96e3 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta1/prioritylevelconfiguration.go
@@ -56,7 +56,7 @@ func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPe
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface,
}
return client.FlowcontrolV1beta1().PriorityLevelConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1beta1.PriorityLevelConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go
index 522e24b7..e17e4c9f 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/flowschema.go
@@ -56,7 +56,7 @@ func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Durati
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim
}
return client.FlowcontrolV1beta2().FlowSchemas().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1beta2.FlowSchema{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go
index 0ee0506e..1db6efa6 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta2/prioritylevelconfiguration.go
@@ -56,7 +56,7 @@ func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPe
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface,
}
return client.FlowcontrolV1beta2().PriorityLevelConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1beta2.PriorityLevelConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go
index 3b0dca3c..626a20d6 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/flowschema.go
@@ -56,7 +56,7 @@ func NewFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Durati
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredFlowSchemaInformer(client kubernetes.Interface, resyncPeriod tim
}
return client.FlowcontrolV1beta3().FlowSchemas().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1beta3.FlowSchema{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go
index 77ff4e4e..43f69742 100644
--- a/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go
+++ b/operator/vendor/k8s.io/client-go/informers/flowcontrol/v1beta3/prioritylevelconfiguration.go
@@ -56,7 +56,7 @@ func NewPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPe
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityLevelConfigurationInformer(client kubernetes.Interface,
}
return client.FlowcontrolV1beta3().PriorityLevelConfigurations().Watch(ctx, options)
},
- },
+ }, client),
&apiflowcontrolv1beta3.PriorityLevelConfiguration{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/generic.go b/operator/vendor/k8s.io/client-go/informers/generic.go
index 980c9916..2b7efe7f 100644
--- a/operator/vendor/k8s.io/client-go/informers/generic.go
+++ b/operator/vendor/k8s.io/client-go/informers/generic.go
@@ -70,7 +70,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@@ -200,14 +200,14 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
// Group=certificates.k8s.io, Version=v1alpha1
case certificatesv1alpha1.SchemeGroupVersion.WithResource("clustertrustbundles"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().ClusterTrustBundles().Informer()}, nil
- case certificatesv1alpha1.SchemeGroupVersion.WithResource("podcertificaterequests"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().PodCertificateRequests().Informer()}, nil
// Group=certificates.k8s.io, Version=v1beta1
case certificatesv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil
case certificatesv1beta1.SchemeGroupVersion.WithResource("clustertrustbundles"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().ClusterTrustBundles().Informer()}, nil
+ case certificatesv1beta1.SchemeGroupVersion.WithResource("podcertificaterequests"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().PodCertificateRequests().Informer()}, nil
// Group=coordination.k8s.io, Version=v1
case coordinationv1.SchemeGroupVersion.WithResource("leases"):
@@ -426,6 +426,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
// Group=scheduling.k8s.io, Version=v1alpha1
case schedulingv1alpha1.SchemeGroupVersion.WithResource("priorityclasses"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil
+ case schedulingv1alpha1.SchemeGroupVersion.WithResource("workloads"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().Workloads().Informer()}, nil
// Group=scheduling.k8s.io, Version=v1beta1
case schedulingv1beta1.SchemeGroupVersion.WithResource("priorityclasses"):
@@ -467,9 +469,9 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case storagev1beta1.SchemeGroupVersion.WithResource("volumeattributesclasses"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().VolumeAttributesClasses().Informer()}, nil
- // Group=storagemigration.k8s.io, Version=v1alpha1
- case storagemigrationv1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"):
- return &genericInformer{resource: resource.GroupResource(), informer: f.Storagemigration().V1alpha1().StorageVersionMigrations().Informer()}, nil
+ // Group=storagemigration.k8s.io, Version=v1beta1
+ case storagemigrationv1beta1.SchemeGroupVersion.WithResource("storageversionmigrations"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Storagemigration().V1beta1().StorageVersionMigrations().Informer()}, nil
}
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1/ingress.go b/operator/vendor/k8s.io/client-go/informers/networking/v1/ingress.go
index 6f1b0b78..fa7252df 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1/ingress.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1/ingress.go
@@ -57,7 +57,7 @@ func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r
}
return client.NetworkingV1().Ingresses(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1.Ingress{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go b/operator/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go
index b0d4803d..bd6696a3 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1/ingressclass.go
@@ -56,7 +56,7 @@ func NewIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.NetworkingV1().IngressClasses().Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1.IngressClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1/ipaddress.go b/operator/vendor/k8s.io/client-go/informers/networking/v1/ipaddress.go
index e3459e72..8ab3ac5d 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1/ipaddress.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1/ipaddress.go
@@ -56,7 +56,7 @@ func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duratio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time
}
return client.NetworkingV1().IPAddresses().Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1.IPAddress{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/operator/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go
index 0dba59c5..1b39b0e6 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go
@@ -57,7 +57,7 @@ func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace str
}
return client.NetworkingV1().NetworkPolicies(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1.NetworkPolicy{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1/servicecidr.go b/operator/vendor/k8s.io/client-go/informers/networking/v1/servicecidr.go
index 039cdc75..299e4061 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1/servicecidr.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1/servicecidr.go
@@ -56,7 +56,7 @@ func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.NetworkingV1().ServiceCIDRs().Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1.ServiceCIDR{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go
index 6c616b90..3e279cb5 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingress.go
@@ -57,7 +57,7 @@ func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPer
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, r
}
return client.NetworkingV1beta1().Ingresses(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1beta1.Ingress{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go
index dd3a9aa7..296abfa8 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ingressclass.go
@@ -56,7 +56,7 @@ func NewIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredIngressClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.NetworkingV1beta1().IngressClasses().Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1beta1.IngressClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go
index 32ce3c4a..94f785f1 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go
@@ -56,7 +56,7 @@ func NewIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duratio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredIPAddressInformer(client kubernetes.Interface, resyncPeriod time
}
return client.NetworkingV1beta1().IPAddresses().Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1beta1.IPAddress{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go
index 25843d2f..67776463 100644
--- a/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go
+++ b/operator/vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go
@@ -56,7 +56,7 @@ func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.NetworkingV1beta1().ServiceCIDRs().Watch(ctx, options)
},
- },
+ }, client),
&apinetworkingv1beta1.ServiceCIDR{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go b/operator/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go
index 85625e3e..d794c569 100644
--- a/operator/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/node/v1/runtimeclass.go
@@ -56,7 +56,7 @@ func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.NodeV1().RuntimeClasses().Watch(ctx, options)
},
- },
+ }, client),
&apinodev1.RuntimeClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go b/operator/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go
index b3ac2e2a..01718ac1 100644
--- a/operator/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/node/v1alpha1/runtimeclass.go
@@ -56,7 +56,7 @@ func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.NodeV1alpha1().RuntimeClasses().Watch(ctx, options)
},
- },
+ }, client),
&apinodev1alpha1.RuntimeClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go b/operator/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go
index b562476d..09390f58 100644
--- a/operator/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/node/v1beta1/runtimeclass.go
@@ -56,7 +56,7 @@ func NewRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredRuntimeClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.NodeV1beta1().RuntimeClasses().Watch(ctx, options)
},
- },
+ }, client),
&apinodev1beta1.RuntimeClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go b/operator/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go
index f80d7dd9..10e45d85 100644
--- a/operator/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go
+++ b/operator/vendor/k8s.io/client-go/informers/policy/v1/poddisruptionbudget.go
@@ -57,7 +57,7 @@ func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace strin
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa
}
return client.PolicyV1().PodDisruptionBudgets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apipolicyv1.PodDisruptionBudget{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/operator/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go
index 92e37d0e..a45d753e 100644
--- a/operator/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go
+++ b/operator/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go
@@ -57,7 +57,7 @@ func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace strin
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespa
}
return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apipolicyv1beta1.PodDisruptionBudget{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go
index 4118bfff..02dab4f9 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go
@@ -56,7 +56,7 @@ func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.RbacV1().ClusterRoles().Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1.ClusterRole{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go
index 67c06d60..182009da 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go
@@ -56,7 +56,7 @@ func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod tim
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe
}
return client.RbacV1().ClusterRoleBindings().Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1.ClusterRoleBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1/role.go
index e931d239..5a0a1f80 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1/role.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1/role.go
@@ -57,7 +57,7 @@ func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy
}
return client.RbacV1().Roles(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1.Role{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go
index 89b11eff..0f0eec60 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go
@@ -57,7 +57,7 @@ func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin
}
return client.RbacV1().RoleBindings(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1.RoleBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go
index ff95f62f..e1b25180 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go
@@ -56,7 +56,7 @@ func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.RbacV1alpha1().ClusterRoles().Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1alpha1.ClusterRole{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go
index 1002f163..d61e8b29 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go
@@ -56,7 +56,7 @@ func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod tim
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe
}
return client.RbacV1alpha1().ClusterRoleBindings().Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1alpha1.ClusterRoleBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go
index ad7b1c0b..497bccbb 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go
@@ -57,7 +57,7 @@ func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy
}
return client.RbacV1alpha1().Roles(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1alpha1.Role{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go
index c5d915d2..0d8b390b 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go
@@ -57,7 +57,7 @@ func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin
}
return client.RbacV1alpha1().RoleBindings(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1alpha1.RoleBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go
index 24aad0b8..1cc1a0dd 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go
@@ -56,7 +56,7 @@ func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.RbacV1beta1().ClusterRoles().Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1beta1.ClusterRole{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go
index 3506b797..a96ed088 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go
@@ -56,7 +56,7 @@ func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod tim
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPe
}
return client.RbacV1beta1().ClusterRoleBindings().Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1beta1.ClusterRoleBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go
index 119a601f..922ac5dc 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go
@@ -57,7 +57,7 @@ func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resy
}
return client.RbacV1beta1().Roles(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1beta1.Role{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go
index c36c295c..5fcd4884 100644
--- a/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go
+++ b/operator/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go
@@ -57,7 +57,7 @@ func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyn
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace strin
}
return client.RbacV1beta1().RoleBindings(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apirbacv1beta1.RoleBinding{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1/deviceclass.go b/operator/vendor/k8s.io/client-go/informers/resource/v1/deviceclass.go
index 2b7e6b54..867bb7b3 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1/deviceclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1/deviceclass.go
@@ -56,7 +56,7 @@ func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.ResourceV1().DeviceClasses().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1.DeviceClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaim.go b/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaim.go
index 19100c4d..64deaed6 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaim.go
@@ -57,7 +57,7 @@ func NewResourceClaimInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str
}
return client.ResourceV1().ResourceClaims(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1.ResourceClaim{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaimtemplate.go b/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaimtemplate.go
index 99978526..131e159e 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceclaimtemplate.go
@@ -57,7 +57,7 @@ func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace str
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names
}
return client.ResourceV1().ResourceClaimTemplates(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1.ResourceClaimTemplate{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceslice.go b/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceslice.go
index ec2099c7..a01abf07 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceslice.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1/resourceslice.go
@@ -56,7 +56,7 @@ func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Dur
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod
}
return client.ResourceV1().ResourceSlices().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1.ResourceSlice{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1alpha3/devicetaintrule.go b/operator/vendor/k8s.io/client-go/informers/resource/v1alpha3/devicetaintrule.go
index 9a07c8f4..0fdc88ac 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1alpha3/devicetaintrule.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1alpha3/devicetaintrule.go
@@ -56,7 +56,7 @@ func NewDeviceTaintRuleInformer(client kubernetes.Interface, resyncPeriod time.D
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeviceTaintRuleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredDeviceTaintRuleInformer(client kubernetes.Interface, resyncPerio
}
return client.ResourceV1alpha3().DeviceTaintRules().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1alpha3.DeviceTaintRule{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go
index bb0b2824..cb15fbc0 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go
@@ -56,7 +56,7 @@ func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.ResourceV1beta1().DeviceClasses().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta1.DeviceClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go
index 5e13b797..982c5985 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go
@@ -57,7 +57,7 @@ func NewResourceClaimInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str
}
return client.ResourceV1beta1().ResourceClaims(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta1.ResourceClaim{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go
index 86c13a8f..51a6381a 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaimtemplate.go
@@ -57,7 +57,7 @@ func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace str
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names
}
return client.ResourceV1beta1().ResourceClaimTemplates(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta1.ResourceClaimTemplate{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go
index 6cc3c65f..b6b1cc1f 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go
@@ -56,7 +56,7 @@ func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Dur
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod
}
return client.ResourceV1beta1().ResourceSlices().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta1.ResourceSlice{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/deviceclass.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/deviceclass.go
index 372d35d8..a940a214 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/deviceclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/deviceclass.go
@@ -56,7 +56,7 @@ func NewDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Durat
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredDeviceClassInformer(client kubernetes.Interface, resyncPeriod ti
}
return client.ResourceV1beta2().DeviceClasses().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta2.DeviceClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaim.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaim.go
index e245d998..a5c085c2 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaim.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaim.go
@@ -57,7 +57,7 @@ func NewResourceClaimInformer(client kubernetes.Interface, namespace string, res
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceClaimInformer(client kubernetes.Interface, namespace str
}
return client.ResourceV1beta2().ResourceClaims(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta2.ResourceClaim{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaimtemplate.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaimtemplate.go
index 4b973bd9..7df9f74d 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaimtemplate.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceclaimtemplate.go
@@ -57,7 +57,7 @@ func NewResourceClaimTemplateInformer(client kubernetes.Interface, namespace str
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredResourceClaimTemplateInformer(client kubernetes.Interface, names
}
return client.ResourceV1beta2().ResourceClaimTemplates(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta2.ResourceClaimTemplate{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceslice.go b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceslice.go
index c0cdc67a..83e6cc03 100644
--- a/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceslice.go
+++ b/operator/vendor/k8s.io/client-go/informers/resource/v1beta2/resourceslice.go
@@ -56,7 +56,7 @@ func NewResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Dur
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredResourceSliceInformer(client kubernetes.Interface, resyncPeriod
}
return client.ResourceV1beta2().ResourceSlices().Watch(ctx, options)
},
- },
+ }, client),
&apiresourcev1beta2.ResourceSlice{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go b/operator/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go
index df426366..8cce79ce 100644
--- a/operator/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/scheduling/v1/priorityclass.go
@@ -56,7 +56,7 @@ func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Dur
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod
}
return client.SchedulingV1().PriorityClasses().Watch(ctx, options)
},
- },
+ }, client),
&apischedulingv1.PriorityClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go b/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go
index cd908d14..fdfc4e6a 100644
--- a/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go
+++ b/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go
@@ -26,6 +26,8 @@ import (
type Interface interface {
// PriorityClasses returns a PriorityClassInformer.
PriorityClasses() PriorityClassInformer
+ // Workloads returns a WorkloadInformer.
+ Workloads() WorkloadInformer
}
type version struct {
@@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
func (v *version) PriorityClasses() PriorityClassInformer {
return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
+
+// Workloads returns a WorkloadInformer.
+func (v *version) Workloads() WorkloadInformer {
+ return &workloadInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go
index 228240af..88f99a7d 100644
--- a/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go
@@ -56,7 +56,7 @@ func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Dur
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod
}
return client.SchedulingV1alpha1().PriorityClasses().Watch(ctx, options)
},
- },
+ }, client),
&apischedulingv1alpha1.PriorityClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/workload.go b/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/workload.go
new file mode 100644
index 00000000..c58e7889
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/workload.go
@@ -0,0 +1,102 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ context "context"
+ time "time"
+
+ apischedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
+ kubernetes "k8s.io/client-go/kubernetes"
+ schedulingv1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// WorkloadInformer provides access to a shared informer and lister for
+// Workloads.
+type WorkloadInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() schedulingv1alpha1.WorkloadLister
+}
+
+type workloadInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewWorkloadInformer constructs a new informer for Workload type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewWorkloadInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredWorkloadInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredWorkloadInformer constructs a new informer for Workload type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredWorkloadInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SchedulingV1alpha1().Workloads(namespace).List(context.Background(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SchedulingV1alpha1().Workloads(namespace).Watch(context.Background(), options)
+ },
+ ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SchedulingV1alpha1().Workloads(namespace).List(ctx, options)
+ },
+ WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SchedulingV1alpha1().Workloads(namespace).Watch(ctx, options)
+ },
+ }, client),
+ &apischedulingv1alpha1.Workload{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *workloadInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredWorkloadInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *workloadInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&apischedulingv1alpha1.Workload{}, f.defaultInformer)
+}
+
+func (f *workloadInformer) Lister() schedulingv1alpha1.WorkloadLister {
+ return schedulingv1alpha1.NewWorkloadLister(f.Informer().GetIndexer())
+}
diff --git a/operator/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go b/operator/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go
index fd40bd08..e8057f92 100644
--- a/operator/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/scheduling/v1beta1/priorityclass.go
@@ -56,7 +56,7 @@ func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Dur
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod
}
return client.SchedulingV1beta1().PriorityClasses().Watch(ctx, options)
},
- },
+ }, client),
&apischedulingv1beta1.PriorityClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go b/operator/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go
index b79a51ca..3a1922ed 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1/csidriver.go
@@ -56,7 +56,7 @@ func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duratio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time
}
return client.StorageV1().CSIDrivers().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1.CSIDriver{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1/csinode.go b/operator/vendor/k8s.io/client-go/informers/storage/v1/csinode.go
index 7a604079..350ab61e 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1/csinode.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1/csinode.go
@@ -56,7 +56,7 @@ func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration,
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D
}
return client.StorageV1().CSINodes().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1.CSINode{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go b/operator/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go
index 84ef70f2..7c88e6e0 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1/csistoragecapacity.go
@@ -57,7 +57,7 @@ func NewCSIStorageCapacityInformer(client kubernetes.Interface, namespace string
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac
}
return client.StorageV1().CSIStorageCapacities(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1.CSIStorageCapacity{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/operator/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go
index 7f17ecf8..620c1d34 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go
@@ -56,7 +56,7 @@ func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.StorageV1().StorageClasses().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1.StorageClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go b/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go
index 3dee340d..3b7c4611 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattachment.go
@@ -56,7 +56,7 @@ func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri
}
return client.StorageV1().VolumeAttachments().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1.VolumeAttachment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattributesclass.go b/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattributesclass.go
index a230ba56..34dc8b48 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattributesclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1/volumeattributesclass.go
@@ -56,7 +56,7 @@ func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyn
}
return client.StorageV1().VolumeAttributesClasses().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1.VolumeAttributesClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go b/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go
index 794de10d..3b275320 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/csistoragecapacity.go
@@ -57,7 +57,7 @@ func NewCSIStorageCapacityInformer(client kubernetes.Interface, namespace string
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac
}
return client.StorageV1alpha1().CSIStorageCapacities(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1alpha1.CSIStorageCapacity{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go
index dc68be23..10ac7e55 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go
@@ -56,7 +56,7 @@ func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri
}
return client.StorageV1alpha1().VolumeAttachments().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1alpha1.VolumeAttachment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go b/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go
index 5210ea79..312a443e 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattributesclass.go
@@ -56,7 +56,7 @@ func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyn
}
return client.StorageV1alpha1().VolumeAttributesClasses().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1alpha1.VolumeAttributesClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go
index a21dc94f..007caaab 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csidriver.go
@@ -56,7 +56,7 @@ func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duratio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time
}
return client.StorageV1beta1().CSIDrivers().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1beta1.CSIDriver{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go
index e789fe30..ce518f0b 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csinode.go
@@ -56,7 +56,7 @@ func NewCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration,
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredCSINodeInformer(client kubernetes.Interface, resyncPeriod time.D
}
return client.StorageV1beta1().CSINodes().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1beta1.CSINode{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go
index fa75b0b4..88446b76 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/csistoragecapacity.go
@@ -57,7 +57,7 @@ func NewCSIStorageCapacityInformer(client kubernetes.Interface, namespace string
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -82,7 +82,7 @@ func NewFilteredCSIStorageCapacityInformer(client kubernetes.Interface, namespac
}
return client.StorageV1beta1().CSIStorageCapacities(namespace).Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1beta1.CSIStorageCapacity{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go
index 23d7ca4f..786d46d3 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go
@@ -56,7 +56,7 @@ func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Dura
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod t
}
return client.StorageV1beta1().StorageClasses().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1beta1.StorageClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go
index 691b2c6d..0f1bf216 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattachment.go
@@ -56,7 +56,7 @@ func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeri
}
return client.StorageV1beta1().VolumeAttachments().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1beta1.VolumeAttachment{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go
index 7d66c581..90e79cc7 100644
--- a/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go
+++ b/operator/vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go
@@ -56,7 +56,7 @@ func NewVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
@@ -81,7 +81,7 @@ func NewFilteredVolumeAttributesClassInformer(client kubernetes.Interface, resyn
}
return client.StorageV1beta1().VolumeAttributesClasses().Watch(ctx, options)
},
- },
+ }, client),
&apistoragev1beta1.VolumeAttributesClass{},
resyncPeriod,
indexers,
diff --git a/operator/vendor/k8s.io/client-go/informers/storagemigration/interface.go b/operator/vendor/k8s.io/client-go/informers/storagemigration/interface.go
index 1f7030fe..426e50fd 100644
--- a/operator/vendor/k8s.io/client-go/informers/storagemigration/interface.go
+++ b/operator/vendor/k8s.io/client-go/informers/storagemigration/interface.go
@@ -20,13 +20,13 @@ package storagemigration
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
- v1alpha1 "k8s.io/client-go/informers/storagemigration/v1alpha1"
+ v1beta1 "k8s.io/client-go/informers/storagemigration/v1beta1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
- // V1alpha1 provides access to shared informers for resources in V1alpha1.
- V1alpha1() v1alpha1.Interface
+ // V1beta1 provides access to shared informers for resources in V1beta1.
+ V1beta1() v1beta1.Interface
}
type group struct {
@@ -40,7 +40,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
-// V1alpha1 returns a new v1alpha1.Interface.
-func (g *group) V1alpha1() v1alpha1.Interface {
- return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+// V1beta1 returns a new v1beta1.Interface.
+func (g *group) V1beta1() v1beta1.Interface {
+ return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
}
diff --git a/operator/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go b/operator/vendor/k8s.io/client-go/informers/storagemigration/v1beta1/interface.go
similarity index 98%
rename from operator/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go
rename to operator/vendor/k8s.io/client-go/informers/storagemigration/v1beta1/interface.go
index 60724e7a..220e1f5c 100644
--- a/operator/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go
+++ b/operator/vendor/k8s.io/client-go/informers/storagemigration/v1beta1/interface.go
@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
diff --git a/operator/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go b/operator/vendor/k8s.io/client-go/informers/storagemigration/v1beta1/storageversionmigration.go
similarity index 77%
rename from operator/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go
rename to operator/vendor/k8s.io/client-go/informers/storagemigration/v1beta1/storageversionmigration.go
index 4debb5ee..370d88f2 100644
--- a/operator/vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go
+++ b/operator/vendor/k8s.io/client-go/informers/storagemigration/v1beta1/storageversionmigration.go
@@ -16,19 +16,19 @@ limitations under the License.
// Code generated by informer-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
context "context"
time "time"
- apistoragemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ apistoragemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
kubernetes "k8s.io/client-go/kubernetes"
- storagemigrationv1alpha1 "k8s.io/client-go/listers/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/client-go/listers/storagemigration/v1beta1"
cache "k8s.io/client-go/tools/cache"
)
@@ -36,7 +36,7 @@ import (
// StorageVersionMigrations.
type StorageVersionMigrationInformer interface {
Informer() cache.SharedIndexInformer
- Lister() storagemigrationv1alpha1.StorageVersionMigrationLister
+ Lister() storagemigrationv1beta1.StorageVersionMigrationLister
}
type storageVersionMigrationInformer struct {
@@ -56,33 +56,33 @@ func NewStorageVersionMigrationInformer(client kubernetes.Interface, resyncPerio
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredStorageVersionMigrationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
- &cache.ListWatch{
+ cache.ToListWatcherWithWatchListSemantics(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.StoragemigrationV1alpha1().StorageVersionMigrations().List(context.Background(), options)
+ return client.StoragemigrationV1beta1().StorageVersionMigrations().List(context.Background(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.StoragemigrationV1alpha1().StorageVersionMigrations().Watch(context.Background(), options)
+ return client.StoragemigrationV1beta1().StorageVersionMigrations().Watch(context.Background(), options)
},
ListWithContextFunc: func(ctx context.Context, options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.StoragemigrationV1alpha1().StorageVersionMigrations().List(ctx, options)
+ return client.StoragemigrationV1beta1().StorageVersionMigrations().List(ctx, options)
},
WatchFuncWithContext: func(ctx context.Context, options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
- return client.StoragemigrationV1alpha1().StorageVersionMigrations().Watch(ctx, options)
+ return client.StoragemigrationV1beta1().StorageVersionMigrations().Watch(ctx, options)
},
- },
- &apistoragemigrationv1alpha1.StorageVersionMigration{},
+ }, client),
+ &apistoragemigrationv1beta1.StorageVersionMigration{},
resyncPeriod,
indexers,
)
@@ -93,9 +93,9 @@ func (f *storageVersionMigrationInformer) defaultInformer(client kubernetes.Inte
}
func (f *storageVersionMigrationInformer) Informer() cache.SharedIndexInformer {
- return f.factory.InformerFor(&apistoragemigrationv1alpha1.StorageVersionMigration{}, f.defaultInformer)
+ return f.factory.InformerFor(&apistoragemigrationv1beta1.StorageVersionMigration{}, f.defaultInformer)
}
-func (f *storageVersionMigrationInformer) Lister() storagemigrationv1alpha1.StorageVersionMigrationLister {
- return storagemigrationv1alpha1.NewStorageVersionMigrationLister(f.Informer().GetIndexer())
+func (f *storageVersionMigrationInformer) Lister() storagemigrationv1beta1.StorageVersionMigrationLister {
+ return storagemigrationv1beta1.NewStorageVersionMigrationLister(f.Informer().GetIndexer())
}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/clientset.go b/operator/vendor/k8s.io/client-go/kubernetes/clientset.go
index 9a8b6f2a..1ef0ff4b 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/clientset.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/clientset.go
@@ -77,7 +77,7 @@ import (
storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1"
storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1"
storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1"
- storagemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
@@ -138,7 +138,7 @@ type Interface interface {
StorageV1beta1() storagev1beta1.StorageV1beta1Interface
StorageV1() storagev1.StorageV1Interface
StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface
- StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface
+ StoragemigrationV1beta1() storagemigrationv1beta1.StoragemigrationV1beta1Interface
}
// Clientset contains the clients for groups.
@@ -198,7 +198,7 @@ type Clientset struct {
storageV1beta1 *storagev1beta1.StorageV1beta1Client
storageV1 *storagev1.StorageV1Client
storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client
- storagemigrationV1alpha1 *storagemigrationv1alpha1.StoragemigrationV1alpha1Client
+ storagemigrationV1beta1 *storagemigrationv1beta1.StoragemigrationV1beta1Client
}
// AdmissionregistrationV1 retrieves the AdmissionregistrationV1Client
@@ -471,9 +471,9 @@ func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface {
return c.storageV1alpha1
}
-// StoragemigrationV1alpha1 retrieves the StoragemigrationV1alpha1Client
-func (c *Clientset) StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface {
- return c.storagemigrationV1alpha1
+// StoragemigrationV1beta1 retrieves the StoragemigrationV1beta1Client
+func (c *Clientset) StoragemigrationV1beta1() storagemigrationv1beta1.StoragemigrationV1beta1Interface {
+ return c.storagemigrationV1beta1
}
// Discovery retrieves the DiscoveryClient
@@ -736,7 +736,7 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
- cs.storagemigrationV1alpha1, err = storagemigrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ cs.storagemigrationV1beta1, err = storagemigrationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
@@ -815,7 +815,7 @@ func New(c rest.Interface) *Clientset {
cs.storageV1beta1 = storagev1beta1.New(c)
cs.storageV1 = storagev1.New(c)
cs.storageV1alpha1 = storagev1alpha1.New(c)
- cs.storagemigrationV1alpha1 = storagemigrationv1alpha1.New(c)
+ cs.storagemigrationV1beta1 = storagemigrationv1beta1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/operator/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
index 973b8a71..f729718b 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go
@@ -134,8 +134,8 @@ import (
fakestoragev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake"
storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1"
fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake"
- storagemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1"
- fakestoragemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake"
+ storagemigrationv1beta1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1"
+ fakestoragemigrationv1beta1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake"
"k8s.io/client-go/testing"
)
@@ -144,7 +144,7 @@ import (
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
//
-// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves
+// Deprecated: NewClientset replaces this with support for field management, which significantly improves
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
// via --with-applyconfig).
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
@@ -160,8 +160,8 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
var opts metav1.ListOptions
- if watchActcion, ok := action.(testing.WatchActionImpl); ok {
- opts = watchActcion.ListOptions
+ if watchAction, ok := action.(testing.WatchActionImpl); ok {
+ opts = watchAction.ListOptions
}
gvr := action.GetResource()
ns := action.GetNamespace()
@@ -192,6 +192,17 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
+// IsWatchListSemanticsSupported informs the reflector that this client
+// doesn't support WatchList semantics.
+//
+// This is a synthetic method whose sole purpose is to satisfy the optional
+// interface check performed by the reflector.
+// Returning true signals that WatchList can NOT be used.
+// No additional logic is implemented here.
+func (c *Clientset) IsWatchListSemanticsUnSupported() bool {
+ return true
+}
+
// NewClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
@@ -503,7 +514,7 @@ func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface {
return &fakestoragev1alpha1.FakeStorageV1alpha1{Fake: &c.Fake}
}
-// StoragemigrationV1alpha1 retrieves the StoragemigrationV1alpha1Client
-func (c *Clientset) StoragemigrationV1alpha1() storagemigrationv1alpha1.StoragemigrationV1alpha1Interface {
- return &fakestoragemigrationv1alpha1.FakeStoragemigrationV1alpha1{Fake: &c.Fake}
+// StoragemigrationV1beta1 retrieves the StoragemigrationV1beta1Client
+func (c *Clientset) StoragemigrationV1beta1() storagemigrationv1beta1.StoragemigrationV1beta1Interface {
+ return &fakestoragemigrationv1beta1.FakeStoragemigrationV1beta1{Fake: &c.Fake}
}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/fake/register.go b/operator/vendor/k8s.io/client-go/kubernetes/fake/register.go
index 3be5276f..7a2bacfb 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/fake/register.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/fake/register.go
@@ -73,7 +73,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -139,7 +139,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
storagev1beta1.AddToScheme,
storagev1.AddToScheme,
storagev1alpha1.AddToScheme,
- storagemigrationv1alpha1.AddToScheme,
+ storagemigrationv1beta1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/operator/vendor/k8s.io/client-go/kubernetes/scheme/register.go
index b96612ee..9557cba2 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/scheme/register.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/scheme/register.go
@@ -73,7 +73,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
storagev1beta1 "k8s.io/api/storage/v1beta1"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -139,7 +139,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
storagev1beta1.AddToScheme,
storagev1.AddToScheme,
storagev1alpha1.AddToScheme,
- storagemigrationv1alpha1.AddToScheme,
+ storagemigrationv1beta1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go
index 06d3ce56..163ddad0 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/certificates_client.go
@@ -29,7 +29,6 @@ import (
type CertificatesV1alpha1Interface interface {
RESTClient() rest.Interface
ClusterTrustBundlesGetter
- PodCertificateRequestsGetter
}
// CertificatesV1alpha1Client is used to interact with features provided by the certificates.k8s.io group.
@@ -41,10 +40,6 @@ func (c *CertificatesV1alpha1Client) ClusterTrustBundles() ClusterTrustBundleInt
return newClusterTrustBundles(c)
}
-func (c *CertificatesV1alpha1Client) PodCertificateRequests(namespace string) PodCertificateRequestInterface {
- return newPodCertificateRequests(c, namespace)
-}
-
// NewForConfig creates a new CertificatesV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go
index 7202eedd..491e3810 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go
@@ -32,10 +32,6 @@ func (c *FakeCertificatesV1alpha1) ClusterTrustBundles() v1alpha1.ClusterTrustBu
return newFakeClusterTrustBundles(c)
}
-func (c *FakeCertificatesV1alpha1) PodCertificateRequests(namespace string) v1alpha1.PodCertificateRequestInterface {
- return newFakePodCertificateRequests(c, namespace)
-}
-
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeCertificatesV1alpha1) RESTClient() rest.Interface {
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_podcertificaterequest.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_podcertificaterequest.go
deleted file mode 100644
index d41e7718..00000000
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/fake/fake_podcertificaterequest.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- v1alpha1 "k8s.io/api/certificates/v1alpha1"
- certificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1"
- gentype "k8s.io/client-go/gentype"
- typedcertificatesv1alpha1 "k8s.io/client-go/kubernetes/typed/certificates/v1alpha1"
-)
-
-// fakePodCertificateRequests implements PodCertificateRequestInterface
-type fakePodCertificateRequests struct {
- *gentype.FakeClientWithListAndApply[*v1alpha1.PodCertificateRequest, *v1alpha1.PodCertificateRequestList, *certificatesv1alpha1.PodCertificateRequestApplyConfiguration]
- Fake *FakeCertificatesV1alpha1
-}
-
-func newFakePodCertificateRequests(fake *FakeCertificatesV1alpha1, namespace string) typedcertificatesv1alpha1.PodCertificateRequestInterface {
- return &fakePodCertificateRequests{
- gentype.NewFakeClientWithListAndApply[*v1alpha1.PodCertificateRequest, *v1alpha1.PodCertificateRequestList, *certificatesv1alpha1.PodCertificateRequestApplyConfiguration](
- fake.Fake,
- namespace,
- v1alpha1.SchemeGroupVersion.WithResource("podcertificaterequests"),
- v1alpha1.SchemeGroupVersion.WithKind("PodCertificateRequest"),
- func() *v1alpha1.PodCertificateRequest { return &v1alpha1.PodCertificateRequest{} },
- func() *v1alpha1.PodCertificateRequestList { return &v1alpha1.PodCertificateRequestList{} },
- func(dst, src *v1alpha1.PodCertificateRequestList) { dst.ListMeta = src.ListMeta },
- func(list *v1alpha1.PodCertificateRequestList) []*v1alpha1.PodCertificateRequest {
- return gentype.ToPointerSlice(list.Items)
- },
- func(list *v1alpha1.PodCertificateRequestList, items []*v1alpha1.PodCertificateRequest) {
- list.Items = gentype.FromPointerSlice(items)
- },
- ),
- fake,
- }
-}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go
index 9e7382eb..43cc534b 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/generated_expansion.go
@@ -19,5 +19,3 @@ limitations under the License.
package v1alpha1
type ClusterTrustBundleExpansion interface{}
-
-type PodCertificateRequestExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
index 8de95609..68063070 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go
@@ -30,6 +30,7 @@ type CertificatesV1beta1Interface interface {
RESTClient() rest.Interface
CertificateSigningRequestsGetter
ClusterTrustBundlesGetter
+ PodCertificateRequestsGetter
}
// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group.
@@ -45,6 +46,10 @@ func (c *CertificatesV1beta1Client) ClusterTrustBundles() ClusterTrustBundleInte
return newClusterTrustBundles(c)
}
+func (c *CertificatesV1beta1Client) PodCertificateRequests(namespace string) PodCertificateRequestInterface {
+ return newPodCertificateRequests(c, namespace)
+}
+
// NewForConfig creates a new CertificatesV1beta1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go
index fba168ea..b6cef6de 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go
@@ -36,6 +36,10 @@ func (c *FakeCertificatesV1beta1) ClusterTrustBundles() v1beta1.ClusterTrustBund
return newFakeClusterTrustBundles(c)
}
+func (c *FakeCertificatesV1beta1) PodCertificateRequests(namespace string) v1beta1.PodCertificateRequestInterface {
+ return newFakePodCertificateRequests(c, namespace)
+}
+
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeCertificatesV1beta1) RESTClient() rest.Interface {
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_podcertificaterequest.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_podcertificaterequest.go
new file mode 100644
index 00000000..23c56d9b
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_podcertificaterequest.go
@@ -0,0 +1,53 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/certificates/v1beta1"
+ certificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1"
+ gentype "k8s.io/client-go/gentype"
+ typedcertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
+)
+
+// fakePodCertificateRequests implements PodCertificateRequestInterface
+type fakePodCertificateRequests struct {
+ *gentype.FakeClientWithListAndApply[*v1beta1.PodCertificateRequest, *v1beta1.PodCertificateRequestList, *certificatesv1beta1.PodCertificateRequestApplyConfiguration]
+ Fake *FakeCertificatesV1beta1
+}
+
+func newFakePodCertificateRequests(fake *FakeCertificatesV1beta1, namespace string) typedcertificatesv1beta1.PodCertificateRequestInterface {
+ return &fakePodCertificateRequests{
+ gentype.NewFakeClientWithListAndApply[*v1beta1.PodCertificateRequest, *v1beta1.PodCertificateRequestList, *certificatesv1beta1.PodCertificateRequestApplyConfiguration](
+ fake.Fake,
+ namespace,
+ v1beta1.SchemeGroupVersion.WithResource("podcertificaterequests"),
+ v1beta1.SchemeGroupVersion.WithKind("PodCertificateRequest"),
+ func() *v1beta1.PodCertificateRequest { return &v1beta1.PodCertificateRequest{} },
+ func() *v1beta1.PodCertificateRequestList { return &v1beta1.PodCertificateRequestList{} },
+ func(dst, src *v1beta1.PodCertificateRequestList) { dst.ListMeta = src.ListMeta },
+ func(list *v1beta1.PodCertificateRequestList) []*v1beta1.PodCertificateRequest {
+ return gentype.ToPointerSlice(list.Items)
+ },
+ func(list *v1beta1.PodCertificateRequestList, items []*v1beta1.PodCertificateRequest) {
+ list.Items = gentype.FromPointerSlice(items)
+ },
+ ),
+ fake,
+ }
+}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go
index 408936e0..3db79695 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go
@@ -19,3 +19,5 @@ limitations under the License.
package v1beta1
type ClusterTrustBundleExpansion interface{}
+
+type PodCertificateRequestExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/podcertificaterequest.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/podcertificaterequest.go
similarity index 58%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/podcertificaterequest.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/podcertificaterequest.go
index cc908efe..4ef7d339 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1alpha1/podcertificaterequest.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/podcertificaterequest.go
@@ -16,16 +16,16 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
context "context"
- certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- applyconfigurationscertificatesv1alpha1 "k8s.io/client-go/applyconfigurations/certificates/v1alpha1"
+ applyconfigurationscertificatesv1beta1 "k8s.io/client-go/applyconfigurations/certificates/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,42 +38,40 @@ type PodCertificateRequestsGetter interface {
// PodCertificateRequestInterface has methods to work with PodCertificateRequest resources.
type PodCertificateRequestInterface interface {
- Create(ctx context.Context, podCertificateRequest *certificatesv1alpha1.PodCertificateRequest, opts v1.CreateOptions) (*certificatesv1alpha1.PodCertificateRequest, error)
- Update(ctx context.Context, podCertificateRequest *certificatesv1alpha1.PodCertificateRequest, opts v1.UpdateOptions) (*certificatesv1alpha1.PodCertificateRequest, error)
+ Create(ctx context.Context, podCertificateRequest *certificatesv1beta1.PodCertificateRequest, opts v1.CreateOptions) (*certificatesv1beta1.PodCertificateRequest, error)
+ Update(ctx context.Context, podCertificateRequest *certificatesv1beta1.PodCertificateRequest, opts v1.UpdateOptions) (*certificatesv1beta1.PodCertificateRequest, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, podCertificateRequest *certificatesv1alpha1.PodCertificateRequest, opts v1.UpdateOptions) (*certificatesv1alpha1.PodCertificateRequest, error)
+ UpdateStatus(ctx context.Context, podCertificateRequest *certificatesv1beta1.PodCertificateRequest, opts v1.UpdateOptions) (*certificatesv1beta1.PodCertificateRequest, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1alpha1.PodCertificateRequest, error)
- List(ctx context.Context, opts v1.ListOptions) (*certificatesv1alpha1.PodCertificateRequestList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*certificatesv1beta1.PodCertificateRequest, error)
+ List(ctx context.Context, opts v1.ListOptions) (*certificatesv1beta1.PodCertificateRequestList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1alpha1.PodCertificateRequest, err error)
- Apply(ctx context.Context, podCertificateRequest *applyconfigurationscertificatesv1alpha1.PodCertificateRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1alpha1.PodCertificateRequest, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *certificatesv1beta1.PodCertificateRequest, err error)
+ Apply(ctx context.Context, podCertificateRequest *applyconfigurationscertificatesv1beta1.PodCertificateRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.PodCertificateRequest, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, podCertificateRequest *applyconfigurationscertificatesv1alpha1.PodCertificateRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1alpha1.PodCertificateRequest, err error)
+ ApplyStatus(ctx context.Context, podCertificateRequest *applyconfigurationscertificatesv1beta1.PodCertificateRequestApplyConfiguration, opts v1.ApplyOptions) (result *certificatesv1beta1.PodCertificateRequest, err error)
PodCertificateRequestExpansion
}
// podCertificateRequests implements PodCertificateRequestInterface
type podCertificateRequests struct {
- *gentype.ClientWithListAndApply[*certificatesv1alpha1.PodCertificateRequest, *certificatesv1alpha1.PodCertificateRequestList, *applyconfigurationscertificatesv1alpha1.PodCertificateRequestApplyConfiguration]
+ *gentype.ClientWithListAndApply[*certificatesv1beta1.PodCertificateRequest, *certificatesv1beta1.PodCertificateRequestList, *applyconfigurationscertificatesv1beta1.PodCertificateRequestApplyConfiguration]
}
// newPodCertificateRequests returns a PodCertificateRequests
-func newPodCertificateRequests(c *CertificatesV1alpha1Client, namespace string) *podCertificateRequests {
+func newPodCertificateRequests(c *CertificatesV1beta1Client, namespace string) *podCertificateRequests {
return &podCertificateRequests{
- gentype.NewClientWithListAndApply[*certificatesv1alpha1.PodCertificateRequest, *certificatesv1alpha1.PodCertificateRequestList, *applyconfigurationscertificatesv1alpha1.PodCertificateRequestApplyConfiguration](
+ gentype.NewClientWithListAndApply[*certificatesv1beta1.PodCertificateRequest, *certificatesv1beta1.PodCertificateRequestList, *applyconfigurationscertificatesv1beta1.PodCertificateRequestApplyConfiguration](
"podcertificaterequests",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
- func() *certificatesv1alpha1.PodCertificateRequest {
- return &certificatesv1alpha1.PodCertificateRequest{}
+ func() *certificatesv1beta1.PodCertificateRequest { return &certificatesv1beta1.PodCertificateRequest{} },
+ func() *certificatesv1beta1.PodCertificateRequestList {
+ return &certificatesv1beta1.PodCertificateRequestList{}
},
- func() *certificatesv1alpha1.PodCertificateRequestList {
- return &certificatesv1alpha1.PodCertificateRequestList{}
- },
- gentype.PrefersProtobuf[*certificatesv1alpha1.PodCertificateRequest](),
+ gentype.PrefersProtobuf[*certificatesv1beta1.PodCertificateRequest](),
),
}
}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go
index 77e26b6e..dd2512c9 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/devicetaintrule.go
@@ -40,6 +40,8 @@ type DeviceTaintRulesGetter interface {
type DeviceTaintRuleInterface interface {
Create(ctx context.Context, deviceTaintRule *resourcev1alpha3.DeviceTaintRule, opts v1.CreateOptions) (*resourcev1alpha3.DeviceTaintRule, error)
Update(ctx context.Context, deviceTaintRule *resourcev1alpha3.DeviceTaintRule, opts v1.UpdateOptions) (*resourcev1alpha3.DeviceTaintRule, error)
+ // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+ UpdateStatus(ctx context.Context, deviceTaintRule *resourcev1alpha3.DeviceTaintRule, opts v1.UpdateOptions) (*resourcev1alpha3.DeviceTaintRule, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.DeviceTaintRule, error)
@@ -47,6 +49,8 @@ type DeviceTaintRuleInterface interface {
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.DeviceTaintRule, err error)
Apply(ctx context.Context, deviceTaintRule *applyconfigurationsresourcev1alpha3.DeviceTaintRuleApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.DeviceTaintRule, err error)
+ // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
+ ApplyStatus(ctx context.Context, deviceTaintRule *applyconfigurationsresourcev1alpha3.DeviceTaintRuleApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.DeviceTaintRule, err error)
DeviceTaintRuleExpansion
}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go
index 34e8ad9b..2be14126 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go
@@ -32,6 +32,10 @@ func (c *FakeSchedulingV1alpha1) PriorityClasses() v1alpha1.PriorityClassInterfa
return newFakePriorityClasses(c)
}
+func (c *FakeSchedulingV1alpha1) Workloads(namespace string) v1alpha1.WorkloadInterface {
+ return newFakeWorkloads(c, namespace)
+}
+
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface {
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_workload.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_workload.go
new file mode 100644
index 00000000..06a48d24
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_workload.go
@@ -0,0 +1,51 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ schedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1"
+ gentype "k8s.io/client-go/gentype"
+ typedschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1"
+)
+
+// fakeWorkloads implements WorkloadInterface
+type fakeWorkloads struct {
+ *gentype.FakeClientWithListAndApply[*v1alpha1.Workload, *v1alpha1.WorkloadList, *schedulingv1alpha1.WorkloadApplyConfiguration]
+ Fake *FakeSchedulingV1alpha1
+}
+
+func newFakeWorkloads(fake *FakeSchedulingV1alpha1, namespace string) typedschedulingv1alpha1.WorkloadInterface {
+ return &fakeWorkloads{
+ gentype.NewFakeClientWithListAndApply[*v1alpha1.Workload, *v1alpha1.WorkloadList, *schedulingv1alpha1.WorkloadApplyConfiguration](
+ fake.Fake,
+ namespace,
+ v1alpha1.SchemeGroupVersion.WithResource("workloads"),
+ v1alpha1.SchemeGroupVersion.WithKind("Workload"),
+ func() *v1alpha1.Workload { return &v1alpha1.Workload{} },
+ func() *v1alpha1.WorkloadList { return &v1alpha1.WorkloadList{} },
+ func(dst, src *v1alpha1.WorkloadList) { dst.ListMeta = src.ListMeta },
+ func(list *v1alpha1.WorkloadList) []*v1alpha1.Workload { return gentype.ToPointerSlice(list.Items) },
+ func(list *v1alpha1.WorkloadList, items []*v1alpha1.Workload) {
+ list.Items = gentype.FromPointerSlice(items)
+ },
+ ),
+ fake,
+ }
+}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go
index 52f81d88..24946041 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go
@@ -19,3 +19,5 @@ limitations under the License.
package v1alpha1
type PriorityClassExpansion interface{}
+
+type WorkloadExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
index b17b182f..0c53bc36 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go
@@ -29,6 +29,7 @@ import (
type SchedulingV1alpha1Interface interface {
RESTClient() rest.Interface
PriorityClassesGetter
+ WorkloadsGetter
}
// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.k8s.io group.
@@ -40,6 +41,10 @@ func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface {
return newPriorityClasses(c)
}
+func (c *SchedulingV1alpha1Client) Workloads(namespace string) WorkloadInterface {
+ return newWorkloads(c, namespace)
+}
+
// NewForConfig creates a new SchedulingV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/workload.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/workload.go
new file mode 100644
index 00000000..9c906b5e
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/workload.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ context "context"
+
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ applyconfigurationsschedulingv1alpha1 "k8s.io/client-go/applyconfigurations/scheduling/v1alpha1"
+ gentype "k8s.io/client-go/gentype"
+ scheme "k8s.io/client-go/kubernetes/scheme"
+)
+
+// WorkloadsGetter has a method to return a WorkloadInterface.
+// A group's client should implement this interface.
+type WorkloadsGetter interface {
+ Workloads(namespace string) WorkloadInterface
+}
+
+// WorkloadInterface has methods to work with Workload resources.
+type WorkloadInterface interface {
+ Create(ctx context.Context, workload *schedulingv1alpha1.Workload, opts v1.CreateOptions) (*schedulingv1alpha1.Workload, error)
+ Update(ctx context.Context, workload *schedulingv1alpha1.Workload, opts v1.UpdateOptions) (*schedulingv1alpha1.Workload, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*schedulingv1alpha1.Workload, error)
+ List(ctx context.Context, opts v1.ListOptions) (*schedulingv1alpha1.WorkloadList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *schedulingv1alpha1.Workload, err error)
+ Apply(ctx context.Context, workload *applyconfigurationsschedulingv1alpha1.WorkloadApplyConfiguration, opts v1.ApplyOptions) (result *schedulingv1alpha1.Workload, err error)
+ WorkloadExpansion
+}
+
+// workloads implements WorkloadInterface
+type workloads struct {
+ *gentype.ClientWithListAndApply[*schedulingv1alpha1.Workload, *schedulingv1alpha1.WorkloadList, *applyconfigurationsschedulingv1alpha1.WorkloadApplyConfiguration]
+}
+
+// newWorkloads returns a Workloads
+func newWorkloads(c *SchedulingV1alpha1Client, namespace string) *workloads {
+ return &workloads{
+ gentype.NewClientWithListAndApply[*schedulingv1alpha1.Workload, *schedulingv1alpha1.WorkloadList, *applyconfigurationsschedulingv1alpha1.WorkloadApplyConfiguration](
+ "workloads",
+ c.RESTClient(),
+ scheme.ParameterCodec,
+ namespace,
+ func() *schedulingv1alpha1.Workload { return &schedulingv1alpha1.Workload{} },
+ func() *schedulingv1alpha1.WorkloadList { return &schedulingv1alpha1.WorkloadList{} },
+ gentype.PrefersProtobuf[*schedulingv1alpha1.Workload](),
+ ),
+ }
+}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go
deleted file mode 100644
index 02de9f30..00000000
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Code generated by client-gen. DO NOT EDIT.
-
-package fake
-
-import (
- v1alpha1 "k8s.io/api/storagemigration/v1alpha1"
- storagemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
- gentype "k8s.io/client-go/gentype"
- typedstoragemigrationv1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1"
-)
-
-// fakeStorageVersionMigrations implements StorageVersionMigrationInterface
-type fakeStorageVersionMigrations struct {
- *gentype.FakeClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]
- Fake *FakeStoragemigrationV1alpha1
-}
-
-func newFakeStorageVersionMigrations(fake *FakeStoragemigrationV1alpha1) typedstoragemigrationv1alpha1.StorageVersionMigrationInterface {
- return &fakeStorageVersionMigrations{
- gentype.NewFakeClientWithListAndApply[*v1alpha1.StorageVersionMigration, *v1alpha1.StorageVersionMigrationList, *storagemigrationv1alpha1.StorageVersionMigrationApplyConfiguration](
- fake.Fake,
- "",
- v1alpha1.SchemeGroupVersion.WithResource("storageversionmigrations"),
- v1alpha1.SchemeGroupVersion.WithKind("StorageVersionMigration"),
- func() *v1alpha1.StorageVersionMigration { return &v1alpha1.StorageVersionMigration{} },
- func() *v1alpha1.StorageVersionMigrationList { return &v1alpha1.StorageVersionMigrationList{} },
- func(dst, src *v1alpha1.StorageVersionMigrationList) { dst.ListMeta = src.ListMeta },
- func(list *v1alpha1.StorageVersionMigrationList) []*v1alpha1.StorageVersionMigration {
- return gentype.ToPointerSlice(list.Items)
- },
- func(list *v1alpha1.StorageVersionMigrationList, items []*v1alpha1.StorageVersionMigration) {
- list.Items = gentype.FromPointerSlice(items)
- },
- ),
- fake,
- }
-}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/doc.go
similarity index 97%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/doc.go
index df51baa4..77110195 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/doc.go
@@ -17,4 +17,4 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
-package v1alpha1
+package v1beta1
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/doc.go
similarity index 100%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/doc.go
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/fake_storagemigration_client.go
similarity index 75%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/fake_storagemigration_client.go
index c33a1c01..3c87c10a 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/fake_storagemigration_client.go
@@ -19,22 +19,22 @@ limitations under the License.
package fake
import (
- v1alpha1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1"
+ v1beta1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
-type FakeStoragemigrationV1alpha1 struct {
+type FakeStoragemigrationV1beta1 struct {
*testing.Fake
}
-func (c *FakeStoragemigrationV1alpha1) StorageVersionMigrations() v1alpha1.StorageVersionMigrationInterface {
+func (c *FakeStoragemigrationV1beta1) StorageVersionMigrations() v1beta1.StorageVersionMigrationInterface {
return newFakeStorageVersionMigrations(c)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
-func (c *FakeStoragemigrationV1alpha1) RESTClient() rest.Interface {
+func (c *FakeStoragemigrationV1beta1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/fake_storageversionmigration.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/fake_storageversionmigration.go
new file mode 100644
index 00000000..21fc8dc8
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake/fake_storageversionmigration.go
@@ -0,0 +1,53 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "k8s.io/api/storagemigration/v1beta1"
+ storagemigrationv1beta1 "k8s.io/client-go/applyconfigurations/storagemigration/v1beta1"
+ gentype "k8s.io/client-go/gentype"
+ typedstoragemigrationv1beta1 "k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1"
+)
+
+// fakeStorageVersionMigrations implements StorageVersionMigrationInterface
+type fakeStorageVersionMigrations struct {
+ *gentype.FakeClientWithListAndApply[*v1beta1.StorageVersionMigration, *v1beta1.StorageVersionMigrationList, *storagemigrationv1beta1.StorageVersionMigrationApplyConfiguration]
+ Fake *FakeStoragemigrationV1beta1
+}
+
+func newFakeStorageVersionMigrations(fake *FakeStoragemigrationV1beta1) typedstoragemigrationv1beta1.StorageVersionMigrationInterface {
+ return &fakeStorageVersionMigrations{
+ gentype.NewFakeClientWithListAndApply[*v1beta1.StorageVersionMigration, *v1beta1.StorageVersionMigrationList, *storagemigrationv1beta1.StorageVersionMigrationApplyConfiguration](
+ fake.Fake,
+ "",
+ v1beta1.SchemeGroupVersion.WithResource("storageversionmigrations"),
+ v1beta1.SchemeGroupVersion.WithKind("StorageVersionMigration"),
+ func() *v1beta1.StorageVersionMigration { return &v1beta1.StorageVersionMigration{} },
+ func() *v1beta1.StorageVersionMigrationList { return &v1beta1.StorageVersionMigrationList{} },
+ func(dst, src *v1beta1.StorageVersionMigrationList) { dst.ListMeta = src.ListMeta },
+ func(list *v1beta1.StorageVersionMigrationList) []*v1beta1.StorageVersionMigration {
+ return gentype.ToPointerSlice(list.Items)
+ },
+ func(list *v1beta1.StorageVersionMigrationList, items []*v1beta1.StorageVersionMigration) {
+ list.Items = gentype.FromPointerSlice(items)
+ },
+ ),
+ fake,
+ }
+}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/generated_expansion.go
similarity index 97%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/generated_expansion.go
index 89220c3c..87a244af 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/generated_expansion.go
@@ -16,6 +16,6 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
type StorageVersionMigrationExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/storagemigration_client.go
similarity index 62%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/storagemigration_client.go
index f7b5f5a1..01824079 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/storagemigration_client.go
@@ -16,34 +16,34 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
http "net/http"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
-type StoragemigrationV1alpha1Interface interface {
+type StoragemigrationV1beta1Interface interface {
RESTClient() rest.Interface
StorageVersionMigrationsGetter
}
-// StoragemigrationV1alpha1Client is used to interact with features provided by the storagemigration.k8s.io group.
-type StoragemigrationV1alpha1Client struct {
+// StoragemigrationV1beta1Client is used to interact with features provided by the storagemigration.k8s.io group.
+type StoragemigrationV1beta1Client struct {
restClient rest.Interface
}
-func (c *StoragemigrationV1alpha1Client) StorageVersionMigrations() StorageVersionMigrationInterface {
+func (c *StoragemigrationV1beta1Client) StorageVersionMigrations() StorageVersionMigrationInterface {
return newStorageVersionMigrations(c)
}
-// NewForConfig creates a new StoragemigrationV1alpha1Client for the given config.
+// NewForConfig creates a new StoragemigrationV1beta1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
-func NewForConfig(c *rest.Config) (*StoragemigrationV1alpha1Client, error) {
+func NewForConfig(c *rest.Config) (*StoragemigrationV1beta1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
@@ -53,21 +53,21 @@ func NewForConfig(c *rest.Config) (*StoragemigrationV1alpha1Client, error) {
return NewForConfigAndClient(&config, httpClient)
}
-// NewForConfigAndClient creates a new StoragemigrationV1alpha1Client for the given config and http client.
+// NewForConfigAndClient creates a new StoragemigrationV1beta1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
-func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StoragemigrationV1alpha1Client, error) {
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*StoragemigrationV1beta1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
- return &StoragemigrationV1alpha1Client{client}, nil
+ return &StoragemigrationV1beta1Client{client}, nil
}
-// NewForConfigOrDie creates a new StoragemigrationV1alpha1Client for the given config and
+// NewForConfigOrDie creates a new StoragemigrationV1beta1Client for the given config and
// panics if there is an error in the config.
-func NewForConfigOrDie(c *rest.Config) *StoragemigrationV1alpha1Client {
+func NewForConfigOrDie(c *rest.Config) *StoragemigrationV1beta1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
@@ -75,13 +75,13 @@ func NewForConfigOrDie(c *rest.Config) *StoragemigrationV1alpha1Client {
return client
}
-// New creates a new StoragemigrationV1alpha1Client for the given RESTClient.
-func New(c rest.Interface) *StoragemigrationV1alpha1Client {
- return &StoragemigrationV1alpha1Client{c}
+// New creates a new StoragemigrationV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *StoragemigrationV1beta1Client {
+ return &StoragemigrationV1beta1Client{c}
}
func setConfigDefaults(config *rest.Config) {
- gv := storagemigrationv1alpha1.SchemeGroupVersion
+ gv := storagemigrationv1beta1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
@@ -93,7 +93,7 @@ func setConfigDefaults(config *rest.Config) {
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
-func (c *StoragemigrationV1alpha1Client) RESTClient() rest.Interface {
+func (c *StoragemigrationV1beta1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
diff --git a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/storageversionmigration.go
similarity index 57%
rename from operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
rename to operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/storageversionmigration.go
index 5c6981ec..67eba28e 100644
--- a/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go
+++ b/operator/vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/storageversionmigration.go
@@ -16,16 +16,16 @@ limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
context "context"
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
- applyconfigurationsstoragemigrationv1alpha1 "k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1"
+ applyconfigurationsstoragemigrationv1beta1 "k8s.io/client-go/applyconfigurations/storagemigration/v1beta1"
gentype "k8s.io/client-go/gentype"
scheme "k8s.io/client-go/kubernetes/scheme"
)
@@ -38,42 +38,42 @@ type StorageVersionMigrationsGetter interface {
// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources.
type StorageVersionMigrationInterface interface {
- Create(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
- Update(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
+ Create(ctx context.Context, storageVersionMigration *storagemigrationv1beta1.StorageVersionMigration, opts v1.CreateOptions) (*storagemigrationv1beta1.StorageVersionMigration, error)
+ Update(ctx context.Context, storageVersionMigration *storagemigrationv1beta1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1beta1.StorageVersionMigration, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
- UpdateStatus(ctx context.Context, storageVersionMigration *storagemigrationv1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
+ UpdateStatus(ctx context.Context, storageVersionMigration *storagemigrationv1beta1.StorageVersionMigration, opts v1.UpdateOptions) (*storagemigrationv1beta1.StorageVersionMigration, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
- Get(ctx context.Context, name string, opts v1.GetOptions) (*storagemigrationv1alpha1.StorageVersionMigration, error)
- List(ctx context.Context, opts v1.ListOptions) (*storagemigrationv1alpha1.StorageVersionMigrationList, error)
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*storagemigrationv1beta1.StorageVersionMigration, error)
+ List(ctx context.Context, opts v1.ListOptions) (*storagemigrationv1beta1.StorageVersionMigrationList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
- Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagemigrationv1alpha1.StorageVersionMigration, err error)
- Apply(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *storagemigrationv1beta1.StorageVersionMigration, err error)
+ Apply(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1beta1.StorageVersionMigration, err error)
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
- ApplyStatus(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1alpha1.StorageVersionMigration, err error)
+ ApplyStatus(ctx context.Context, storageVersionMigration *applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationApplyConfiguration, opts v1.ApplyOptions) (result *storagemigrationv1beta1.StorageVersionMigration, err error)
StorageVersionMigrationExpansion
}
// storageVersionMigrations implements StorageVersionMigrationInterface
type storageVersionMigrations struct {
- *gentype.ClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration]
+ *gentype.ClientWithListAndApply[*storagemigrationv1beta1.StorageVersionMigration, *storagemigrationv1beta1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationApplyConfiguration]
}
// newStorageVersionMigrations returns a StorageVersionMigrations
-func newStorageVersionMigrations(c *StoragemigrationV1alpha1Client) *storageVersionMigrations {
+func newStorageVersionMigrations(c *StoragemigrationV1beta1Client) *storageVersionMigrations {
return &storageVersionMigrations{
- gentype.NewClientWithListAndApply[*storagemigrationv1alpha1.StorageVersionMigration, *storagemigrationv1alpha1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1alpha1.StorageVersionMigrationApplyConfiguration](
+ gentype.NewClientWithListAndApply[*storagemigrationv1beta1.StorageVersionMigration, *storagemigrationv1beta1.StorageVersionMigrationList, *applyconfigurationsstoragemigrationv1beta1.StorageVersionMigrationApplyConfiguration](
"storageversionmigrations",
c.RESTClient(),
scheme.ParameterCodec,
"",
- func() *storagemigrationv1alpha1.StorageVersionMigration {
- return &storagemigrationv1alpha1.StorageVersionMigration{}
+ func() *storagemigrationv1beta1.StorageVersionMigration {
+ return &storagemigrationv1beta1.StorageVersionMigration{}
},
- func() *storagemigrationv1alpha1.StorageVersionMigrationList {
- return &storagemigrationv1alpha1.StorageVersionMigrationList{}
+ func() *storagemigrationv1beta1.StorageVersionMigrationList {
+ return &storagemigrationv1beta1.StorageVersionMigrationList{}
},
- gentype.PrefersProtobuf[*storagemigrationv1alpha1.StorageVersionMigration](),
+ gentype.PrefersProtobuf[*storagemigrationv1beta1.StorageVersionMigration](),
),
}
}
diff --git a/operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go b/operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go
index fd8d9219..d77258cb 100644
--- a/operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go
+++ b/operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/expansion_generated.go
@@ -21,11 +21,3 @@ package v1alpha1
// ClusterTrustBundleListerExpansion allows custom methods to be added to
// ClusterTrustBundleLister.
type ClusterTrustBundleListerExpansion interface{}
-
-// PodCertificateRequestListerExpansion allows custom methods to be added to
-// PodCertificateRequestLister.
-type PodCertificateRequestListerExpansion interface{}
-
-// PodCertificateRequestNamespaceListerExpansion allows custom methods to be added to
-// PodCertificateRequestNamespaceLister.
-type PodCertificateRequestNamespaceListerExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go b/operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go
index 12a2554d..13e0fd65 100644
--- a/operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go
+++ b/operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go
@@ -25,3 +25,11 @@ type CertificateSigningRequestListerExpansion interface{}
// ClusterTrustBundleListerExpansion allows custom methods to be added to
// ClusterTrustBundleLister.
type ClusterTrustBundleListerExpansion interface{}
+
+// PodCertificateRequestListerExpansion allows custom methods to be added to
+// PodCertificateRequestLister.
+type PodCertificateRequestListerExpansion interface{}
+
+// PodCertificateRequestNamespaceListerExpansion allows custom methods to be added to
+// PodCertificateRequestNamespaceLister.
+type PodCertificateRequestNamespaceListerExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/podcertificaterequest.go b/operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/podcertificaterequest.go
similarity index 79%
rename from operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/podcertificaterequest.go
rename to operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/podcertificaterequest.go
index 0f90e8b1..94f7d0db 100644
--- a/operator/vendor/k8s.io/client-go/listers/certificates/v1alpha1/podcertificaterequest.go
+++ b/operator/vendor/k8s.io/client-go/listers/certificates/v1beta1/podcertificaterequest.go
@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
- certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
+ certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
@@ -30,7 +30,7 @@ import (
type PodCertificateRequestLister interface {
// List lists all PodCertificateRequests in the indexer.
// Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*certificatesv1alpha1.PodCertificateRequest, err error)
+ List(selector labels.Selector) (ret []*certificatesv1beta1.PodCertificateRequest, err error)
// PodCertificateRequests returns an object that can list and get PodCertificateRequests.
PodCertificateRequests(namespace string) PodCertificateRequestNamespaceLister
PodCertificateRequestListerExpansion
@@ -38,17 +38,17 @@ type PodCertificateRequestLister interface {
// podCertificateRequestLister implements the PodCertificateRequestLister interface.
type podCertificateRequestLister struct {
- listers.ResourceIndexer[*certificatesv1alpha1.PodCertificateRequest]
+ listers.ResourceIndexer[*certificatesv1beta1.PodCertificateRequest]
}
// NewPodCertificateRequestLister returns a new PodCertificateRequestLister.
func NewPodCertificateRequestLister(indexer cache.Indexer) PodCertificateRequestLister {
- return &podCertificateRequestLister{listers.New[*certificatesv1alpha1.PodCertificateRequest](indexer, certificatesv1alpha1.Resource("podcertificaterequest"))}
+ return &podCertificateRequestLister{listers.New[*certificatesv1beta1.PodCertificateRequest](indexer, certificatesv1beta1.Resource("podcertificaterequest"))}
}
// PodCertificateRequests returns an object that can list and get PodCertificateRequests.
func (s *podCertificateRequestLister) PodCertificateRequests(namespace string) PodCertificateRequestNamespaceLister {
- return podCertificateRequestNamespaceLister{listers.NewNamespaced[*certificatesv1alpha1.PodCertificateRequest](s.ResourceIndexer, namespace)}
+ return podCertificateRequestNamespaceLister{listers.NewNamespaced[*certificatesv1beta1.PodCertificateRequest](s.ResourceIndexer, namespace)}
}
// PodCertificateRequestNamespaceLister helps list and get PodCertificateRequests.
@@ -56,15 +56,15 @@ func (s *podCertificateRequestLister) PodCertificateRequests(namespace string) P
type PodCertificateRequestNamespaceLister interface {
// List lists all PodCertificateRequests in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*certificatesv1alpha1.PodCertificateRequest, err error)
+ List(selector labels.Selector) (ret []*certificatesv1beta1.PodCertificateRequest, err error)
// Get retrieves the PodCertificateRequest from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
- Get(name string) (*certificatesv1alpha1.PodCertificateRequest, error)
+ Get(name string) (*certificatesv1beta1.PodCertificateRequest, error)
PodCertificateRequestNamespaceListerExpansion
}
// podCertificateRequestNamespaceLister implements the PodCertificateRequestNamespaceLister
// interface.
type podCertificateRequestNamespaceLister struct {
- listers.ResourceIndexer[*certificatesv1alpha1.PodCertificateRequest]
+ listers.ResourceIndexer[*certificatesv1beta1.PodCertificateRequest]
}
diff --git a/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go b/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go
index bde8b620..8617325d 100644
--- a/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go
+++ b/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go
@@ -21,3 +21,11 @@ package v1alpha1
// PriorityClassListerExpansion allows custom methods to be added to
// PriorityClassLister.
type PriorityClassListerExpansion interface{}
+
+// WorkloadListerExpansion allows custom methods to be added to
+// WorkloadLister.
+type WorkloadListerExpansion interface{}
+
+// WorkloadNamespaceListerExpansion allows custom methods to be added to
+// WorkloadNamespaceLister.
+type WorkloadNamespaceListerExpansion interface{}
diff --git a/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/workload.go b/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/workload.go
new file mode 100644
index 00000000..24f51a5e
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/workload.go
@@ -0,0 +1,70 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ listers "k8s.io/client-go/listers"
+ cache "k8s.io/client-go/tools/cache"
+)
+
+// WorkloadLister helps list Workloads.
+// All objects returned here must be treated as read-only.
+type WorkloadLister interface {
+ // List lists all Workloads in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*schedulingv1alpha1.Workload, err error)
+ // Workloads returns an object that can list and get Workloads.
+ Workloads(namespace string) WorkloadNamespaceLister
+ WorkloadListerExpansion
+}
+
+// workloadLister implements the WorkloadLister interface.
+type workloadLister struct {
+ listers.ResourceIndexer[*schedulingv1alpha1.Workload]
+}
+
+// NewWorkloadLister returns a new WorkloadLister.
+func NewWorkloadLister(indexer cache.Indexer) WorkloadLister {
+ return &workloadLister{listers.New[*schedulingv1alpha1.Workload](indexer, schedulingv1alpha1.Resource("workload"))}
+}
+
+// Workloads returns an object that can list and get Workloads.
+func (s *workloadLister) Workloads(namespace string) WorkloadNamespaceLister {
+ return workloadNamespaceLister{listers.NewNamespaced[*schedulingv1alpha1.Workload](s.ResourceIndexer, namespace)}
+}
+
+// WorkloadNamespaceLister helps list and get Workloads.
+// All objects returned here must be treated as read-only.
+type WorkloadNamespaceLister interface {
+ // List lists all Workloads in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*schedulingv1alpha1.Workload, err error)
+ // Get retrieves the Workload from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*schedulingv1alpha1.Workload, error)
+ WorkloadNamespaceListerExpansion
+}
+
+// workloadNamespaceLister implements the WorkloadNamespaceLister
+// interface.
+type workloadNamespaceLister struct {
+ listers.ResourceIndexer[*schedulingv1alpha1.Workload]
+}
diff --git a/operator/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go b/operator/vendor/k8s.io/client-go/listers/storagemigration/v1beta1/expansion_generated.go
similarity index 97%
rename from operator/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go
rename to operator/vendor/k8s.io/client-go/listers/storagemigration/v1beta1/expansion_generated.go
index 92eb5c65..9532c829 100644
--- a/operator/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go
+++ b/operator/vendor/k8s.io/client-go/listers/storagemigration/v1beta1/expansion_generated.go
@@ -16,7 +16,7 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
// StorageVersionMigrationListerExpansion allows custom methods to be added to
// StorageVersionMigrationLister.
diff --git a/operator/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go b/operator/vendor/k8s.io/client-go/listers/storagemigration/v1beta1/storageversionmigration.go
similarity index 78%
rename from operator/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go
rename to operator/vendor/k8s.io/client-go/listers/storagemigration/v1beta1/storageversionmigration.go
index e7d164d0..f96e066a 100644
--- a/operator/vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go
+++ b/operator/vendor/k8s.io/client-go/listers/storagemigration/v1beta1/storageversionmigration.go
@@ -16,10 +16,10 @@ limitations under the License.
// Code generated by lister-gen. DO NOT EDIT.
-package v1alpha1
+package v1beta1
import (
- storagemigrationv1alpha1 "k8s.io/api/storagemigration/v1alpha1"
+ storagemigrationv1beta1 "k8s.io/api/storagemigration/v1beta1"
labels "k8s.io/apimachinery/pkg/labels"
listers "k8s.io/client-go/listers"
cache "k8s.io/client-go/tools/cache"
@@ -30,19 +30,19 @@ import (
type StorageVersionMigrationLister interface {
// List lists all StorageVersionMigrations in the indexer.
// Objects returned here must be treated as read-only.
- List(selector labels.Selector) (ret []*storagemigrationv1alpha1.StorageVersionMigration, err error)
+ List(selector labels.Selector) (ret []*storagemigrationv1beta1.StorageVersionMigration, err error)
// Get retrieves the StorageVersionMigration from the index for a given name.
// Objects returned here must be treated as read-only.
- Get(name string) (*storagemigrationv1alpha1.StorageVersionMigration, error)
+ Get(name string) (*storagemigrationv1beta1.StorageVersionMigration, error)
StorageVersionMigrationListerExpansion
}
// storageVersionMigrationLister implements the StorageVersionMigrationLister interface.
type storageVersionMigrationLister struct {
- listers.ResourceIndexer[*storagemigrationv1alpha1.StorageVersionMigration]
+ listers.ResourceIndexer[*storagemigrationv1beta1.StorageVersionMigration]
}
// NewStorageVersionMigrationLister returns a new StorageVersionMigrationLister.
func NewStorageVersionMigrationLister(indexer cache.Indexer) StorageVersionMigrationLister {
- return &storageVersionMigrationLister{listers.New[*storagemigrationv1alpha1.StorageVersionMigration](indexer, storagemigrationv1alpha1.Resource("storageversionmigration"))}
+ return &storageVersionMigrationLister{listers.New[*storagemigrationv1beta1.StorageVersionMigration](indexer, storagemigrationv1beta1.Resource("storageversionmigration"))}
}
diff --git a/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
index e378b75c..0925da42 100644
--- a/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
+++ b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-model-package=io.k8s.client-go.pkg.apis.clientauthentication.v1
// +groupName=client.authentication.k8s.io
diff --git a/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.model_name.go b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.model_name.go
new file mode 100644
index 00000000..d8fb338e
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.model_name.go
@@ -0,0 +1,42 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Cluster) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1.Cluster"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecCredential) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1.ExecCredential"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecCredentialSpec) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1.ExecCredentialSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecCredentialStatus) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1.ExecCredentialStatus"
+}
diff --git a/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
index 6eb6a981..207ff5c9 100644
--- a/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
+++ b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
@@ -18,6 +18,7 @@ limitations under the License.
// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
// +k8s:openapi-gen=true
// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-model-package=io.k8s.client-go.pkg.apis.clientauthentication.v1beta1
// +groupName=client.authentication.k8s.io
diff --git a/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.model_name.go b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.model_name.go
new file mode 100644
index 00000000..24b2c12f
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.model_name.go
@@ -0,0 +1,42 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by openapi-gen. DO NOT EDIT.
+
+package v1beta1
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in Cluster) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1beta1.Cluster"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecCredential) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1beta1.ExecCredential"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecCredentialSpec) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1beta1.ExecCredentialSpec"
+}
+
+// OpenAPIModelName returns the OpenAPI model name for this type.
+func (in ExecCredentialStatus) OpenAPIModelName() string {
+ return "io.k8s.client-go.pkg.apis.clientauthentication.v1beta1.ExecCredentialStatus"
+}
diff --git a/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
index b471f5cc..1af2afdb 100644
--- a/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
+++ b/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -27,6 +27,7 @@ import (
"net/http"
"os"
"os/exec"
+ "path/filepath"
"reflect"
"strings"
"sync"
@@ -39,6 +40,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/dump"
utilnet "k8s.io/apimachinery/pkg/util/net"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/pkg/apis/clientauthentication"
"k8s.io/client-go/pkg/apis/clientauthentication/install"
clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1"
@@ -177,13 +179,29 @@ func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecC
connTracker,
)
+ if err := ValidatePluginPolicy(config.PluginPolicy); err != nil {
+ return nil, fmt.Errorf("invalid plugin policy: %w", err)
+ }
+
+ allowlistLookup := sets.New[string]()
+ for _, entry := range config.PluginPolicy.Allowlist {
+ if entry.Name != "" {
+ allowlistLookup.Insert(entry.Name)
+ }
+ }
+
a := &Authenticator{
- cmd: config.Command,
+ // Clean is called to normalize the path to facilitate comparison with
+ // the allowlist, when present
+ cmd: filepath.Clean(config.Command),
args: config.Args,
group: gv,
cluster: cluster,
provideClusterInfo: config.ProvideClusterInfo,
+ allowlistLookup: allowlistLookup,
+ execPluginPolicy: config.PluginPolicy,
+
installHint: config.InstallHint,
sometimes: &sometimes{
threshold: 10,
@@ -250,6 +268,9 @@ type Authenticator struct {
cluster *clientauthentication.Cluster
provideClusterInfo bool
+ allowlistLookup sets.Set[string]
+ execPluginPolicy api.PluginPolicy
+
// Used to avoid log spew by rate limiting install hint printing. We didn't do
// this by interval based rate limiting alone since that way may have prevented
// the install hint from showing up for kubectl users.
@@ -441,6 +462,12 @@ func (a *Authenticator) refreshCredsLocked() error {
cmd.Stdin = a.stdin
}
+ err = a.updateCommandAndCheckAllowlistLocked(cmd)
+ incrementPolicyMetric(err)
+ if err != nil {
+ return err
+ }
+
err = cmd.Run()
incrementCallsMetric(err)
if err != nil {
@@ -545,3 +572,131 @@ func (a *Authenticator) wrapCmdRunErrorLocked(err error) error {
return fmt.Errorf("exec: %v", err)
}
}
+
+// `updateCommandAndCheckAllowlistLocked` determines whether or not the specified executable may run
+// according to the credential plugin policy. If the plugin is allowed, `nil`
+// is returned. If the plugin is not allowed, an error must be returned
+// explaining why.
+func (a *Authenticator) updateCommandAndCheckAllowlistLocked(cmd *exec.Cmd) error {
+ switch a.execPluginPolicy.PolicyType {
+ case "", api.PluginPolicyAllowAll:
+ return nil
+ case api.PluginPolicyDenyAll:
+ return fmt.Errorf("plugin %q not allowed: policy set to %q", a.cmd, api.PluginPolicyDenyAll)
+ case api.PluginPolicyAllowlist:
+ return a.checkAllowlistLocked(cmd)
+ default:
+ return fmt.Errorf("unknown plugin policy %q", a.execPluginPolicy.PolicyType)
+ }
+}
+
+// `checkAllowlistLocked` checks the specified plugin against the allowlist,
+// and may update the Authenticator's allowlistLookup set.
+func (a *Authenticator) checkAllowlistLocked(cmd *exec.Cmd) error {
+ // a.cmd is the original command as specified in the configuration, then filepath.Clean().
+ // cmd.Path is the possibly-resolved command.
+ // If either are an exact match in the allowlist, return success.
+ if a.allowlistLookup.Has(a.cmd) || a.allowlistLookup.Has(cmd.Path) {
+ return nil
+ }
+
+ var cmdResolvedPath string
+ var cmdResolvedErr error
+ if cmd.Path != a.cmd {
+ // cmd.Path changed, use the already-resolved LookPath results
+ cmdResolvedPath = cmd.Path
+ cmdResolvedErr = cmd.Err
+ } else {
+ // cmd.Path is unchanged, do LookPath ourselves
+ cmdResolvedPath, cmdResolvedErr = exec.LookPath(cmd.Path)
+ // update cmd.Path to cmdResolvedPath so we only run the resolved path
+ if cmdResolvedPath != "" {
+ cmd.Path = cmdResolvedPath
+ }
+ }
+
+ if cmdResolvedErr != nil {
+ return fmt.Errorf("plugin path %q cannot be resolved for credential plugin allowlist check: %w", cmd.Path, cmdResolvedErr)
+ }
+
+ // cmdResolvedPath may have changed, and the changed value may be in the allowlist
+ if a.allowlistLookup.Has(cmdResolvedPath) {
+ return nil
+ }
+
+ // There is no verbatim match
+ a.resolveAllowListEntriesLocked(cmd.Path)
+
+ // allowlistLookup may have changed, recheck
+ if a.allowlistLookup.Has(cmdResolvedPath) {
+ return nil
+ }
+
+ return fmt.Errorf("plugin path %q is not permitted by the credential plugin allowlist", cmd.Path)
+}
+
+// resolveAllowListEntriesLocked tries to resolve allowlist entries with LookPath,
+// and adds successfully resolved entries to allowlistLookup.
+// The optional commandHint can be used to limit which entries are resolved to ones which match the hint basename.
+func (a *Authenticator) resolveAllowListEntriesLocked(commandHint string) {
+ hintName := filepath.Base(commandHint)
+ for _, entry := range a.execPluginPolicy.Allowlist {
+ entryBasename := filepath.Base(entry.Name)
+ if hintName != "" && hintName != entryBasename {
+ // we got a hint, and this allowlist entry does not match it
+ continue
+ }
+ entryResolvedPath, err := exec.LookPath(entry.Name)
+ if err != nil {
+ klog.V(5).ErrorS(err, "resolving credential plugin allowlist", "name", entry.Name)
+ continue
+ }
+ if entryResolvedPath != "" {
+ a.allowlistLookup.Insert(entryResolvedPath)
+ }
+ }
+}
+
+func ValidatePluginPolicy(policy api.PluginPolicy) error {
+ switch policy.PolicyType {
+ // "" is equivalent to "AllowAll"
+ case "", api.PluginPolicyAllowAll, api.PluginPolicyDenyAll:
+ if policy.Allowlist != nil {
+ return fmt.Errorf("misconfigured credential plugin allowlist: plugin policy is %q but allowlist is non-nil", policy.PolicyType)
+ }
+ return nil
+ case api.PluginPolicyAllowlist:
+ return validateAllowlist(policy.Allowlist)
+ default:
+ return fmt.Errorf("unknown plugin policy: %q", policy.PolicyType)
+ }
+}
+
+var emptyAllowlistEntry api.AllowlistEntry
+
+func validateAllowlist(list []api.AllowlistEntry) error {
+ // This will be the case if the user has misspelled the field name for the
+ // allowlist. Because this is a security knob, fail immediately rather than
+ // proceed when the user has made a mistake.
+ if list == nil {
+ return fmt.Errorf("credential plugin policy set to %q, but allowlist is unspecified", api.PluginPolicyAllowlist)
+ }
+
+ if len(list) == 0 {
+ return fmt.Errorf("credential plugin policy set to %q, but allowlist is empty; use %q policy instead", api.PluginPolicyAllowlist, api.PluginPolicyDenyAll)
+ }
+
+ for i, item := range list {
+ if item == emptyAllowlistEntry {
+ return fmt.Errorf("misconfigured credential plugin allowlist: empty allowlist entry #%d", i+1)
+ }
+
+ if cleaned := filepath.Clean(item.Name); cleaned != item.Name {
+ return fmt.Errorf("non-normalized file path: %q vs %q", item.Name, cleaned)
+ } else if item.Name == "" {
+ return fmt.Errorf("empty file path: %q", item.Name)
+ }
+ }
+
+ return nil
+}
diff --git a/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go b/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
index 51210975..fb300ae1 100644
--- a/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
+++ b/operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
@@ -49,6 +49,13 @@ const (
// used in some failure modes (e.g., plugin not found, client internal error) so that someone
// can more easily monitor all unsuccessful invocations.
failureExitCode = 1
+
+ // pluginAllowed represents an exec plugin invocation that was allowed by
+ // the plugin policy and/or the allowlist
+ pluginAllowed = "allowed"
+ // pluginAllowed represents an exec plugin invocation that was denied by
+ // the plugin policy and/or the allowlist
+ pluginDenied = "denied"
)
type certificateExpirationTracker struct {
@@ -109,3 +116,12 @@ func incrementCallsMetric(err error) {
metrics.ExecPluginCalls.Increment(failureExitCode, clientInternalError)
}
}
+
+func incrementPolicyMetric(err error) {
+ if err != nil {
+ metrics.ExecPluginPolicyCalls.Increment(pluginDenied)
+ return
+ }
+
+ metrics.ExecPluginPolicyCalls.Increment(pluginAllowed)
+}
diff --git a/operator/vendor/k8s.io/client-go/rest/.mockery.yaml b/operator/vendor/k8s.io/client-go/rest/.mockery.yaml
index e21d7b5b..b4df9ca8 100644
--- a/operator/vendor/k8s.io/client-go/rest/.mockery.yaml
+++ b/operator/vendor/k8s.io/client-go/rest/.mockery.yaml
@@ -1,10 +1,11 @@
---
dir: .
-filename: "mock_{{.InterfaceName | snakecase}}_test.go"
-boilerplate-file: ../../../../../hack/boilerplate/boilerplate.generatego.txt
-outpkg: rest
-with-expecter: true
+pkgname: rest
+template: testify
+template-data:
+ boilerplate-file: ../../../../../hack/boilerplate/boilerplate.generatego.txt
+ unroll-variadic: true
packages:
k8s.io/client-go/rest:
interfaces:
- BackoffManager:
+ BackoffManager: {}
diff --git a/operator/vendor/k8s.io/client-go/rest/urlbackoff.go b/operator/vendor/k8s.io/client-go/rest/urlbackoff.go
index 5b7b4e21..a6510e80 100644
--- a/operator/vendor/k8s.io/client-go/rest/urlbackoff.go
+++ b/operator/vendor/k8s.io/client-go/rest/urlbackoff.go
@@ -141,8 +141,8 @@ func (b *URLBackoff) CalculateBackoff(actualURL *url.URL) time.Duration {
// CalculateBackoffWithContext takes a url and back's off exponentially,
// based on its knowledge of existing failures.
-func (b *URLBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
- return b.Backoff.Get(b.baseUrlKey(actualURL))
+func (b *URLBackoff) CalculateBackoffWithContext(_ context.Context, actualURL *url.URL) time.Duration {
+ return b.CalculateBackoff(actualURL)
}
func (b *URLBackoff) Sleep(d time.Duration) {
diff --git a/operator/vendor/k8s.io/client-go/rest/warnings.go b/operator/vendor/k8s.io/client-go/rest/warnings.go
index 713b2d64..62bbdcee 100644
--- a/operator/vendor/k8s.io/client-go/rest/warnings.go
+++ b/operator/vendor/k8s.io/client-go/rest/warnings.go
@@ -96,11 +96,8 @@ var _ WarningHandlerWithContext = NoWarnings{}
// WarningLogger is an implementation of [WarningHandler] and [WarningHandlerWithContext] that logs code 299 warnings
type WarningLogger struct{}
-func (WarningLogger) HandleWarningHeader(code int, agent string, message string) {
- if code != 299 || len(message) == 0 {
- return
- }
- klog.Background().Info("Warning: " + message)
+func (w WarningLogger) HandleWarningHeader(code int, agent string, message string) {
+ w.HandleWarningHeaderWithContext(context.Background(), code, agent, message)
}
func (WarningLogger) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) {
diff --git a/operator/vendor/k8s.io/client-go/testing/fixture.go b/operator/vendor/k8s.io/client-go/testing/fixture.go
index 65c96a47..152a5c1b 100644
--- a/operator/vendor/k8s.io/client-go/testing/fixture.go
+++ b/operator/vendor/k8s.io/client-go/testing/fixture.go
@@ -511,6 +511,17 @@ func (t *tracker) Apply(gvr schema.GroupVersionResource, applyConfiguration runt
return t.add(gvr, obj, ns, true)
}
+// IsWatchListSemanticsUnSupported informs the reflector that this client
+// doesn't support WatchList semantics.
+//
+// This is a synthetic method whose sole purpose is to satisfy the optional
+// interface check performed by the reflector.
+// Returning true signals that WatchList can NOT be used.
+// No additional logic is implemented here.
+func (t *tracker) IsWatchListSemanticsUnSupported() bool {
+ return true
+}
+
func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher {
watches := []*watch.RaceFreeFakeWatcher{}
if t.watchers[gvr] != nil {
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/controller.go b/operator/vendor/k8s.io/client-go/tools/cache/controller.go
index 5f983b6b..51e0a465 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/controller.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -19,13 +19,14 @@ package cache
import (
"context"
"errors"
- clientgofeaturegate "k8s.io/client-go/features"
+ "fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
+ clientgofeaturegate "k8s.io/client-go/features"
"k8s.io/utils/clock"
)
@@ -48,9 +49,20 @@ type Config struct {
// Something that can list and watch your objects.
ListerWatcher
- // Something that can process a popped Deltas.
+ // Process can process a popped Deltas.
Process ProcessFunc
+ // ProcessBatch can process a batch of popped Deltas, which should return `TransactionError` if not all items
+ // in the batch were successfully processed.
+ //
+ // For batch processing to be used:
+ // * ProcessBatch must be non-nil
+ // * Queue must implement QueueWithBatch
+ // * The client InOrderInformersBatchProcess feature gate must be enabled
+ //
+ // If any of those are false, Process is used and no batch processing is done.
+ ProcessBatch ProcessBatchFunc
+
// ObjectType is an example object of the type this controller is
// expected to handle.
ObjectType runtime.Object
@@ -94,6 +106,10 @@ type ShouldResyncFunc func() bool
// ProcessFunc processes a single object.
type ProcessFunc func(obj interface{}, isInInitialList bool) error
+// ProcessBatchFunc processes multiple objects in batch.
+// The deltas must not contain multiple entries for the same object.
+type ProcessBatchFunc func(deltas []Delta, isInInitialList bool) error
+
// `*controller` implements Controller
type controller struct {
config Config
@@ -203,12 +219,23 @@ func (c *controller) LastSyncResourceVersion() string {
// to make sure that we don't end up processing the same object multiple times
// concurrently.
func (c *controller) processLoop(ctx context.Context) {
+ useBatchProcess := false
+ batchQueue, ok := c.config.Queue.(QueueWithBatch)
+ if ok && c.config.ProcessBatch != nil && clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformersBatchProcess) {
+ useBatchProcess = true
+ }
for {
select {
case <-ctx.Done():
return
default:
- _, err := c.config.Pop(PopProcessFunc(c.config.Process))
+ var err error
+ if useBatchProcess {
+ err = batchQueue.PopBatch(c.config.ProcessBatch)
+ } else {
+ // otherwise fallback to non-batch process behavior
+ _, err = c.config.Pop(PopProcessFunc(c.config.Process))
+ }
if err != nil {
if errors.Is(err, ErrFIFOClosed) {
return
@@ -585,6 +612,91 @@ func processDeltas(
return nil
}
+// processDeltasInBatch applies a batch of Delta objects to the given Store and
+// notifies the ResourceEventHandler of add, update, or delete events.
+//
+// If the Store supports transactions (TransactionStore), all Deltas are applied
+// atomically in a single transaction and corresponding handler callbacks are
+// executed afterward. Otherwise, each Delta is processed individually.
+//
+// Returns an error if any Delta or transaction fails. For TransactionError,
+// only successful operations trigger callbacks.
+func processDeltasInBatch(
+ handler ResourceEventHandler,
+ clientState Store,
+ deltas []Delta,
+ isInInitialList bool,
+) error {
+ // from oldest to newest
+ txns := make([]Transaction, 0)
+ callbacks := make([]func(), 0)
+ txnStore, txnSupported := clientState.(TransactionStore)
+ if !txnSupported {
+ var errs []error
+ for _, delta := range deltas {
+ if err := processDeltas(handler, clientState, Deltas{delta}, isInInitialList); err != nil {
+ errs = append(errs, err)
+ }
+ }
+ if len(errs) > 0 {
+ return fmt.Errorf("unexpected error when handling deltas: %v", errs)
+ }
+ return nil
+ }
+ // deltasList is a list of unique objects
+ for _, d := range deltas {
+ obj := d.Object
+ switch d.Type {
+ case Sync, Replaced, Added, Updated:
+ // it will only return one old object for each because items are unique
+ if old, exists, err := clientState.Get(obj); err == nil && exists {
+ txn := Transaction{
+ Type: TransactionTypeUpdate,
+ Object: obj,
+ }
+ txns = append(txns, txn)
+ callbacks = append(callbacks, func() {
+ handler.OnUpdate(old, obj)
+ })
+ } else {
+ txn := Transaction{
+ Type: TransactionTypeAdd,
+ Object: obj,
+ }
+ txns = append(txns, txn)
+ callbacks = append(callbacks, func() {
+ handler.OnAdd(obj, isInInitialList)
+ })
+ }
+ case Deleted:
+ txn := Transaction{
+ Type: TransactionTypeDelete,
+ Object: obj,
+ }
+ txns = append(txns, txn)
+ callbacks = append(callbacks, func() {
+ handler.OnDelete(obj)
+ })
+ }
+ }
+
+ err := txnStore.Transaction(txns...)
+ if err != nil {
+ // if txn had error, only execute the callbacks for the successful ones
+ for _, i := range err.SuccessfulIndices {
+ if i < len(callbacks) {
+ callbacks[i]()
+ }
+ }
+ // formatting the error so txns doesn't escape and keeps allocated in the stack.
+ return fmt.Errorf("not all items in the batch successfully processed: %s", err.Error())
+ }
+ for _, callback := range callbacks {
+ callback()
+ }
+ return nil
+}
+
// newInformer returns a controller for populating the store while also
// providing event notifications.
//
@@ -596,16 +708,7 @@ func newInformer(clientState Store, options InformerOptions) Controller {
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
- var fifo Queue
- if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) {
- fifo = NewRealFIFO(MetaNamespaceKeyFunc, clientState, options.Transform)
- } else {
- fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{
- KnownObjects: clientState,
- EmitDeltaTypeReplaced: true,
- Transformer: options.Transform,
- })
- }
+ fifo := newQueueFIFO(clientState, options.Transform)
cfg := &Config{
Queue: fifo,
@@ -620,6 +723,25 @@ func newInformer(clientState Store, options InformerOptions) Controller {
}
return errors.New("object given as Process argument is not Deltas")
},
+ ProcessBatch: func(deltaList []Delta, isInInitialList bool) error {
+ return processDeltasInBatch(options.Handler, clientState, deltaList, isInInitialList)
+ },
}
return New(cfg)
}
+
+func newQueueFIFO(clientState Store, transform TransformFunc) Queue {
+ if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) {
+ return NewRealFIFOWithOptions(RealFIFOOptions{
+ KeyFunction: MetaNamespaceKeyFunc,
+ KnownObjects: clientState,
+ Transformer: transform,
+ })
+ } else {
+ return NewDeltaFIFOWithOptions(DeltaFIFOOptions{
+ KnownObjects: clientState,
+ EmitDeltaTypeReplaced: true,
+ Transformer: transform,
+ })
+ }
+}
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/operator/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
index 9d9e238c..217bcf8b 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
@@ -270,7 +270,8 @@ func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
}
var (
- _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
+ _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue
+ _ = TransformingStore(&DeltaFIFO{}) // DeltaFIFO implements TransformingStore to allow memory optimizations
)
var (
@@ -554,7 +555,7 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
func (f *DeltaFIFO) Replace(list []interface{}, _ string) error {
f.lock.Lock()
defer f.lock.Unlock()
- keys := make(sets.String, len(list))
+ keys := make(sets.Set[string], len(list))
// keep backwards compat for old clients
action := Sync
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/operator/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
index a16f4735..d716b232 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
@@ -35,7 +35,7 @@ func (c *fakeThreadSafeMap) Delete(key string) {
// FakeExpirationPolicy keeps the list for keys which never expires.
type FakeExpirationPolicy struct {
- NeverExpire sets.String
+ NeverExpire sets.Set[string]
RetrieveKeyFunc KeyFunc
}
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/fifo.go b/operator/vendor/k8s.io/client-go/tools/cache/fifo.go
index 5c2ca900..eb3ef589 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/fifo.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/fifo.go
@@ -60,6 +60,23 @@ type Queue interface {
Close()
}
+// QueueWithBatch extends the Queue interface with support for batch processing.
+//
+// In addition to the standard single-item Pop method, QueueWithBatch provides
+// PopBatch, which allows multiple items to be popped and processed together as
+// a batch. This can be used to improve processing efficiency when it is
+// beneficial to handle multiple queued keys or accumulators in a single
+// operation.
+// TODO: Consider merging this interface into Queue after feature gate GA
+type QueueWithBatch interface {
+ Queue
+
+ // PopBatch behaves similarly to Queue#Pop, but processes multiple keys
+ // as a batch. The implementation determines the batching strategy,
+ // such as the number of keys to include per batch.
+ PopBatch(ProcessBatchFunc) error
+}
+
// Pop is helper function for popping from Queue.
// WARNING: Do NOT use this function in non-test code to avoid races
// unless you really really really really know what you are doing.
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/index.go b/operator/vendor/k8s.io/client-go/tools/cache/index.go
index c5819fb6..395268f6 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/index.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/index.go
@@ -91,10 +91,10 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) {
}
// Index maps the indexed value to a set of keys in the store that match on that value
-type Index map[string]sets.String
+type index map[string]sets.Set[string]
// Indexers maps a name to an IndexFunc
type Indexers map[string]IndexFunc
// Indices maps a name to an Index
-type Indices map[string]Index
+type Indices map[string]index
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/listwatch.go b/operator/vendor/k8s.io/client-go/tools/cache/listwatch.go
index f5b04a19..2c4065f0 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/listwatch.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/listwatch.go
@@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
restclient "k8s.io/client-go/rest"
+ "k8s.io/client-go/util/watchlist"
)
// Lister is any object that knows how to perform an initial list.
@@ -130,6 +131,35 @@ type listerWatcherWrapper struct {
ListerWithContext
WatcherWithContext
}
+type listWatcherWithWatchListSemanticsWrapper struct {
+ *ListWatch
+
+ // unsupportedWatchListSemantics indicates whether a client explicitly does NOT support
+ // WatchList semantics.
+ //
+ // Over the years, unit tests in kube have been written in many different ways.
+ // After enabling the WatchListClient feature by default, existing tests started failing.
+ // To avoid breaking lots of existing client-go users after upgrade,
+ // we introduced this field as an opt-in.
+ //
+ // When true, the reflector disables WatchList even if the feature gate is enabled.
+ unsupportedWatchListSemantics bool
+}
+
+func (lw *listWatcherWithWatchListSemanticsWrapper) IsWatchListSemanticsUnSupported() bool {
+ return lw.unsupportedWatchListSemantics
+}
+
+// ToListWatcherWithWatchListSemantics returns a ListerWatcher
+// that knows whether the provided client explicitly
+// does NOT support the WatchList semantics. This allows Reflectors
+// to adapt their behavior based on client capabilities.
+func ToListWatcherWithWatchListSemantics(lw *ListWatch, client any) ListerWatcher {
+ return &listWatcherWithWatchListSemanticsWrapper{
+ lw,
+ watchlist.DoesClientNotSupportWatchListSemantics(client),
+ }
+}
// ListFunc knows how to list resources
//
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/reflector.go b/operator/vendor/k8s.io/client-go/tools/cache/reflector.go
index ee9be772..af2c7a22 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/reflector.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/reflector.go
@@ -41,6 +41,7 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientfeatures "k8s.io/client-go/features"
"k8s.io/client-go/tools/pager"
+ "k8s.io/client-go/util/watchlist"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
"k8s.io/utils/ptr"
@@ -49,11 +50,9 @@ import (
const defaultExpectedTypeName = ""
-var (
- // We try to spread the load on apiserver by setting timeouts for
- // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
- defaultMinWatchTimeout = 5 * time.Minute
-)
+// We try to spread the load on apiserver by setting timeouts for
+// watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout].
+var defaultMinWatchTimeout = 5 * time.Minute
// ReflectorStore is the subset of cache.Store that the reflector uses
type ReflectorStore interface {
@@ -80,7 +79,7 @@ type ReflectorStore interface {
// TransformingStore is an optional interface that can be implemented by the provided store.
// If implemented on the provided store reflector will use the same transformer in its internal stores.
type TransformingStore interface {
- Store
+ ReflectorStore
Transformer() TransformFunc
}
@@ -299,6 +298,16 @@ func NewReflectorWithOptions(lw ListerWatcher, expectedType interface{}, store R
}
r.useWatchList = clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient)
+ if r.useWatchList && watchlist.DoesClientNotSupportWatchListSemantics(lw) {
+ // Using klog.TODO() here because switching to a caller-provided contextual logger
+ // would require an API change and updating all existing call sites.
+ klog.TODO().V(2).Info(
+ "The provided ListWatcher doesn't support WatchList semantics. The feature will be disabled. If you are using a custom client, check the documentation of watchlist.DoesClientNotSupportWatchListSemantics() method",
+ "listWatcherType", fmt.Sprintf("%T", lw),
+ "feature", clientfeatures.WatchListClient,
+ )
+ r.useWatchList = false
+ }
return r
}
@@ -365,9 +374,6 @@ func (r *Reflector) RunWithContext(ctx context.Context) {
}
var (
- // nothing will ever be sent down this channel
- neverExitWatch <-chan time.Time = make(chan time.Time)
-
// Used to indicate that watching stopped because of a signal from the stop
// channel passed in from a client of the reflector.
errorStopRequested = errors.New("stop requested")
@@ -377,7 +383,8 @@ var (
// required, and a cleanup function.
func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
if r.resyncPeriod == 0 {
- return neverExitWatch, func() bool { return false }
+ // nothing will ever be sent down this channel
+ return nil, func() bool { return false }
}
// The cleanup function is required: imagine the scenario where watches
// always fail so we end up listing frequently. Then, if we don't
@@ -419,7 +426,10 @@ func (r *Reflector) ListAndWatchWithContext(ctx context.Context) error {
return nil
}
if err != nil {
- logger.Error(err, "The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking")
+ logger.V(4).Info(
+ "Data couldn't be fetched in watchlist mode. Falling back to regular list. This is expected if watchlist is not supported or disabled in kube-apiserver.",
+ "err", err,
+ )
fallbackToList = true
// ensure that we won't accidentally pass some garbage down the watch.
w = nil
@@ -726,9 +736,11 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) {
return false
}
+ var transformer TransformFunc
storeOpts := []StoreOption{}
if tr, ok := r.store.(TransformingStore); ok && tr.Transformer() != nil {
- storeOpts = append(storeOpts, WithTransformer(tr.Transformer()))
+ transformer = tr.Transformer()
+ storeOpts = append(storeOpts, WithTransformer(transformer))
}
initTrace := trace.New("Reflector WatchList", trace.Field{Key: "name", Value: r.name})
@@ -788,7 +800,7 @@ func (r *Reflector) watchList(ctx context.Context) (watch.Interface, error) {
// we utilize the temporaryStore to ensure independence from the current store implementation.
// as of today, the store is implemented as a queue and will be drained by the higher-level
// component as soon as it finishes replacing the content.
- checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, temporaryStore.List)
+ checkWatchListDataConsistencyIfRequested(ctx, r.name, resourceVersion, r.listerWatcher.ListWithContext, transformer, temporaryStore.List)
if err := r.store.Replace(temporaryStore.List(), resourceVersion); err != nil {
return nil, fmt.Errorf("unable to sync watch-list result: %w", err)
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go b/operator/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
index a7e0d9c4..4119c78a 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/reflector_data_consistency_detector.go
@@ -33,11 +33,11 @@ import (
//
// Note that this function will panic when data inconsistency is detected.
// This is intentional because we want to catch it in the CI.
-func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) {
+func checkWatchListDataConsistencyIfRequested[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn consistencydetector.ListFunc[T], listItemTransformFunc func(interface{}) (interface{}, error), retrieveItemsFn consistencydetector.RetrieveItemsFunc[U]) {
if !consistencydetector.IsDataConsistencyDetectionForWatchListEnabled() {
return
}
// for informers we pass an empty ListOptions because
// listFn might be wrapped for filtering during informer construction.
- consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, metav1.ListOptions{}, retrieveItemsFn)
+ consistencydetector.CheckDataConsistency(ctx, identity, lastSyncedResourceVersion, listFn, listItemTransformFunc, metav1.ListOptions{}, retrieveItemsFn)
}
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/operator/vendor/k8s.io/client-go/tools/cache/shared_informer.go
index 99e5fcd1..8973a33e 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/shared_informer.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -539,16 +539,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) {
s.startedLock.Lock()
defer s.startedLock.Unlock()
- var fifo Queue
- if clientgofeaturegate.FeatureGates().Enabled(clientgofeaturegate.InOrderInformers) {
- fifo = NewRealFIFO(MetaNamespaceKeyFunc, s.indexer, s.transform)
- } else {
- fifo = NewDeltaFIFOWithOptions(DeltaFIFOOptions{
- KnownObjects: s.indexer,
- EmitDeltaTypeReplaced: true,
- Transformer: s.transform,
- })
- }
+ fifo := newQueueFIFO(s.indexer, s.transform)
cfg := &Config{
Queue: fifo,
@@ -559,6 +550,7 @@ func (s *sharedIndexInformer) RunWithContext(ctx context.Context) {
ShouldResync: s.processor.shouldResync,
Process: s.HandleDeltas,
+ ProcessBatch: s.HandleBatchDeltas,
WatchErrorHandlerWithContext: s.watchErrorHandler,
}
@@ -731,6 +723,12 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}, isInInitialList bool
return errors.New("object given as Process argument is not Deltas")
}
+func (s *sharedIndexInformer) HandleBatchDeltas(deltas []Delta, isInInitialList bool) error {
+ s.blockDeltas.Lock()
+ defer s.blockDeltas.Unlock()
+ return processDeltasInBatch(s, s.indexer, deltas, isInInitialList)
+}
+
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnAdd(obj interface{}, isInInitialList bool) {
// Invocation of this function is locked under s.blockDeltas, so it is
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/store.go b/operator/vendor/k8s.io/client-go/tools/cache/store.go
index 1d068580..a412fd70 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/store.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/store.go
@@ -17,6 +17,7 @@ limitations under the License.
package cache
import (
+ "errors"
"fmt"
"strings"
@@ -71,6 +72,42 @@ type Store interface {
Resync() error
}
+// TransactionType defines the type of a transaction operation. It is used to indicate whether
+// an object is being added, updated, or deleted.
+type TransactionType string
+
+const (
+ TransactionTypeAdd TransactionType = "Add"
+ TransactionTypeUpdate TransactionType = "Update"
+ TransactionTypeDelete TransactionType = "Delete"
+)
+
+// Transaction represents a single operation or event in a process. It holds a generic Object
+// associated with the transaction and a Type indicating the kind of transaction being performed.
+type Transaction struct {
+ Object interface{}
+ Type TransactionType
+}
+
+type TransactionStore interface {
+ // Transaction allows multiple operations to occur within a single lock acquisition to
+ // ensure progress can be made when there is contention.
+ Transaction(txns ...Transaction) *TransactionError
+}
+
+var _ error = &TransactionError{}
+
+type TransactionError struct {
+ SuccessfulIndices []int
+ TotalTransactions int
+ Errors []error
+}
+
+func (t *TransactionError) Error() string {
+ return fmt.Sprintf("failed to execute (%d/%d) transactions failed due to: %v",
+ t.TotalTransactions-len(t.SuccessfulIndices), t.TotalTransactions, t.Errors)
+}
+
// KeyFunc knows how to make a key from an object. Implementations should be deterministic.
type KeyFunc func(obj interface{}) (string, error)
@@ -167,6 +204,40 @@ type cache struct {
var _ Store = &cache{}
+func (c *cache) Transaction(txns ...Transaction) *TransactionError {
+ txnStore, ok := c.cacheStorage.(ThreadSafeStoreWithTransaction)
+ if !ok {
+ return &TransactionError{
+ TotalTransactions: len(txns),
+ Errors: []error{
+ errors.New("transaction not supported"),
+ },
+ }
+ }
+ keyedTxns := make([]ThreadSafeStoreTransaction, 0, len(txns))
+ successfulIndices := make([]int, 0, len(txns))
+ errs := make([]error, 0)
+ for i := range txns {
+ txn := txns[i]
+ key, err := c.keyFunc(txn.Object)
+ if err != nil {
+ errs = append(errs, KeyError{txn.Object, err})
+ continue
+ }
+ successfulIndices = append(successfulIndices, i)
+ keyedTxns = append(keyedTxns, ThreadSafeStoreTransaction{txn, key})
+ }
+ txnStore.Transaction(keyedTxns...)
+ if len(errs) > 0 {
+ return &TransactionError{
+ SuccessfulIndices: successfulIndices,
+ TotalTransactions: len(txns),
+ Errors: errs,
+ }
+ }
+ return nil
+}
+
// Add inserts an item into the cache.
func (c *cache) Add(obj interface{}) error {
key, err := c.keyFunc(obj)
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go b/operator/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go
index ef322bea..933e41bb 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/the_real_fifo.go
@@ -26,6 +26,31 @@ import (
utiltrace "k8s.io/utils/trace"
)
+// RealFIFOOptions is the configuration parameters for RealFIFO.
+type RealFIFOOptions struct {
+ // KeyFunction is used to figure out what key an object should have. (It's
+ // exposed in the returned RealFIFO's keyOf() method, with additional
+ // handling around deleted objects and queue state).
+ // Optional, the default is MetaNamespaceKeyFunc.
+ KeyFunction KeyFunc
+
+ // KnownObjects is expected to return a list of keys that the consumer of
+ // this queue "knows about". It is used to decide which items are missing
+ // when Replace() is called; 'Deleted' deltas are produced for the missing items.
+ // KnownObjects is required.
+ KnownObjects KeyListerGetter
+
+ // If set, will be called for objects before enqueueing them. Please
+ // see the comment on TransformFunc for details.
+ Transformer TransformFunc
+}
+
+const (
+ defaultBatchSize = 1000
+)
+
+var _ QueueWithBatch = &RealFIFO{}
+
// RealFIFO is a Queue in which every notification from the Reflector is passed
// in order to the Queue via Pop.
// This means that it
@@ -58,10 +83,14 @@ type RealFIFO struct {
// Called with every object if non-nil.
transformer TransformFunc
+
+ // batchSize determines the maximum number of objects we can combine into a batch.
+ batchSize int
}
var (
- _ = Queue(&RealFIFO{}) // RealFIFO is a Queue
+ _ = Queue(&RealFIFO{}) // RealFIFO is a Queue
+ _ = TransformingStore(&RealFIFO{}) // RealFIFO implements TransformingStore to allow memory optimizations
)
// Close the queue.
@@ -235,6 +264,74 @@ func (f *RealFIFO) Pop(process PopProcessFunc) (interface{}, error) {
return Deltas{item}, err
}
+func (f *RealFIFO) PopBatch(process ProcessBatchFunc) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ for len(f.items) == 0 {
+ // When the queue is empty, invocation of Pop() is blocked until new item is enqueued.
+ // When Close() is called, the f.closed is set and the condition is broadcasted.
+ // Which causes this loop to continue and return from the Pop().
+ if f.closed {
+ return ErrFIFOClosed
+ }
+
+ f.cond.Wait()
+ }
+
+ isInInitialList := !f.hasSynced_locked()
+ unique := sets.NewString()
+ deltas := make([]Delta, 0, min(len(f.items), f.batchSize))
+ // only bundle unique items into a batch
+ for i := 0; i < f.batchSize && i < len(f.items); i++ {
+ if f.initialPopulationCount > 0 && i >= f.initialPopulationCount {
+ break
+ }
+ item := f.items[i]
+ id, err := f.keyOf(item)
+ if err != nil {
+ // close the batch here if error happens
+ // TODO: log the error when RealFIFOOptions supports passing klog instance like deprecated DeltaFIFO
+ // still pop the broken item out of queue to be compatible with the non-batch behavior it should be safe
+ // when 1st element is broken, however for Nth broken element, there's possible risk that broken item
+ // still can be processed and broke the uniqueness of the batch unexpectedly.
+ deltas = append(deltas, item)
+ // The underlying array still exists and references this object, so the object will not be garbage collected unless we zero the reference.
+ f.items[i] = Delta{}
+ break
+ }
+ if unique.Has(id) {
+ break
+ }
+ unique.Insert(id)
+ deltas = append(deltas, item)
+ // The underlying array still exists and references this object, so the object will not be garbage collected unless we zero the reference.
+ f.items[i] = Delta{}
+ }
+ if f.initialPopulationCount > 0 {
+ f.initialPopulationCount -= len(deltas)
+ }
+ f.items = f.items[len(deltas):]
+
+ // Only log traces if the queue depth is greater than 10 and it takes more than
+ // 100 milliseconds to process one item from the queue (with a max of 1 second for the whole batch)
+ // Queue depth never goes high because processing an item is locking the queue,
+ // and new items can't be added until processing finish.
+ // https://github.com/kubernetes/kubernetes/issues/103789
+ if len(f.items) > 10 {
+ id, _ := f.keyOf(deltas[0])
+ trace := utiltrace.New("RealFIFO PopBatch Process",
+ utiltrace.Field{Key: "ID", Value: id},
+ utiltrace.Field{Key: "Depth", Value: len(f.items)},
+ utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"},
+ utiltrace.Field{Key: "BatchSize", Value: len(deltas)})
+ defer trace.LogIfLong(min(100*time.Millisecond*time.Duration(len(deltas)), time.Second))
+ }
+
+ err := process(deltas, isInInitialList)
+ return err
+}
+
// Replace
// 1. finds those items in f.items that are not in newItems and creates synthetic deletes for them
// 2. finds items in knownObjects that are not in newItems and creates synthetic deletes for them
@@ -398,16 +495,32 @@ func (f *RealFIFO) Transformer() TransformFunc {
// NewRealFIFO returns a Store which can be used to queue up items to
// process.
func NewRealFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter, transformer TransformFunc) *RealFIFO {
- if knownObjects == nil {
+ return NewRealFIFOWithOptions(RealFIFOOptions{
+ KeyFunction: keyFunc,
+ KnownObjects: knownObjects,
+ Transformer: transformer,
+ })
+}
+
+// NewRealFIFOWithOptions returns a Queue which can be used to process changes to
+// items. See also the comment on RealFIFO.
+func NewRealFIFOWithOptions(opts RealFIFOOptions) *RealFIFO {
+ if opts.KeyFunction == nil {
+ opts.KeyFunction = MetaNamespaceKeyFunc
+ }
+
+ if opts.KnownObjects == nil {
panic("coding error: knownObjects must be provided")
}
f := &RealFIFO{
items: make([]Delta, 0, 10),
- keyFunc: keyFunc,
- knownObjects: knownObjects,
- transformer: transformer,
+ keyFunc: opts.KeyFunction,
+ knownObjects: opts.KnownObjects,
+ transformer: opts.Transformer,
+ batchSize: defaultBatchSize,
}
+
f.cond.L = &f.lock
return f
}
diff --git a/operator/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/operator/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
index 7a4df0e1..ef3a599a 100644
--- a/operator/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
+++ b/operator/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
@@ -19,8 +19,10 @@ package cache
import (
"fmt"
"sync"
+ "time"
"k8s.io/apimachinery/pkg/util/sets"
+ utiltrace "k8s.io/utils/trace"
)
// ThreadSafeStore is an interface that allows concurrent indexed
@@ -58,6 +60,19 @@ type ThreadSafeStore interface {
Resync() error
}
+// ThreadSafeStoreWithTransaction is a store that can batch execute multiple transactions.
+type ThreadSafeStoreWithTransaction interface {
+ ThreadSafeStore
+ // Transaction allows performing multiple writes in one call.
+ Transaction(fns ...ThreadSafeStoreTransaction)
+}
+
+// ThreadSafeStoreTransaction embeds a Transaction and includes the specific Key identifying the affected object.
+type ThreadSafeStoreTransaction struct {
+ Transaction
+ Key string
+}
+
// storeIndex implements the indexing functionality for Store interface
type storeIndex struct {
// indexers maps a name to an IndexFunc
@@ -70,7 +85,7 @@ func (i *storeIndex) reset() {
i.indices = Indices{}
}
-func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) {
+func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.Set[string], error) {
indexFunc := i.indexers[indexName]
if indexFunc == nil {
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
@@ -82,7 +97,7 @@ func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.S
}
index := i.indices[indexName]
- var storeKeySet sets.String
+ var storeKeySet sets.Set[string]
if len(indexedValues) == 1 {
// In majority of cases, there is exactly one value matching.
// Optimize the most common path - deduping is not needed here.
@@ -90,7 +105,7 @@ func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.S
} else {
// Need to de-dupe the return list.
// Since multiple keys are allowed, this can happen.
- storeKeySet = sets.String{}
+ storeKeySet = sets.Set[string]{}
for _, indexedValue := range indexedValues {
for key := range index[indexedValue] {
storeKeySet.Insert(key)
@@ -101,7 +116,7 @@ func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.S
return storeKeySet, nil
}
-func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) {
+func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.Set[string], error) {
indexFunc := i.indexers[indexName]
if indexFunc == nil {
return nil, fmt.Errorf("Index with name %s does not exist", indexName)
@@ -121,10 +136,10 @@ func (i *storeIndex) getIndexValues(indexName string) []string {
}
func (i *storeIndex) addIndexers(newIndexers Indexers) error {
- oldKeys := sets.StringKeySet(i.indexers)
- newKeys := sets.StringKeySet(newIndexers)
+ oldKeys := sets.KeySet(i.indexers)
+ newKeys := sets.KeySet(newIndexers)
- if oldKeys.HasAny(newKeys.List()...) {
+ if oldKeys.HasAny(sets.List(newKeys)...) {
return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys))
}
@@ -167,10 +182,10 @@ func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj i
indexValues = indexValues[:0]
}
- index := i.indices[name]
- if index == nil {
- index = Index{}
- i.indices[name] = index
+ idx := i.indices[name]
+ if idx == nil {
+ idx = index{}
+ i.indices[name] = idx
}
if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
@@ -179,10 +194,10 @@ func (i *storeIndex) updateSingleIndex(name string, oldObj interface{}, newObj i
}
for _, value := range oldIndexValues {
- i.deleteKeyFromIndex(key, value, index)
+ i.deleteKeyFromIndex(key, value, idx)
}
for _, value := range indexValues {
- i.addKeyToIndex(key, value, index)
+ i.addKeyToIndex(key, value, idx)
}
}
@@ -197,16 +212,16 @@ func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key s
}
}
-func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) {
+func (i *storeIndex) addKeyToIndex(key, indexValue string, index index) {
set := index[indexValue]
if set == nil {
- set = sets.String{}
+ set = sets.Set[string]{}
index[indexValue] = set
}
set.Insert(key)
}
-func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) {
+func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index index) {
set := index[indexValue]
if set == nil {
return
@@ -229,13 +244,41 @@ type threadSafeMap struct {
index *storeIndex
}
+func (c *threadSafeMap) Transaction(txns ...ThreadSafeStoreTransaction) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ trace := utiltrace.New("ThreadSafeMap Transaction Process",
+ utiltrace.Field{Key: "Size", Value: len(txns)},
+ utiltrace.Field{Key: "Reason", Value: "Slow batch process due to too many items"})
+ defer trace.LogIfLong(min(500*time.Millisecond*time.Duration(len(txns)), 5*time.Second))
+
+ for _, txn := range txns {
+ switch txn.Type {
+ case TransactionTypeAdd:
+ c.addLocked(txn.Key, txn.Object)
+ case TransactionTypeUpdate:
+ c.updateLocked(txn.Key, txn.Object)
+ case TransactionTypeDelete:
+ c.deleteLocked(txn.Key)
+ }
+ }
+}
+
func (c *threadSafeMap) Add(key string, obj interface{}) {
c.Update(key, obj)
}
+func (c *threadSafeMap) addLocked(key string, obj interface{}) {
+ c.updateLocked(key, obj)
+}
+
func (c *threadSafeMap) Update(key string, obj interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
+ c.updateLocked(key, obj)
+}
+
+func (c *threadSafeMap) updateLocked(key string, obj interface{}) {
oldObject := c.items[key]
c.items[key] = obj
c.index.updateIndices(oldObject, obj, key)
@@ -244,6 +287,10 @@ func (c *threadSafeMap) Update(key string, obj interface{}) {
func (c *threadSafeMap) Delete(key string) {
c.lock.Lock()
defer c.lock.Unlock()
+ c.deleteLocked(key)
+}
+
+func (c *threadSafeMap) deleteLocked(key string) {
if obj, exists := c.items[key]; exists {
c.index.updateIndices(obj, nil, key)
delete(c.items, key)
@@ -336,7 +383,7 @@ func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, err
if err != nil {
return nil, err
}
- return set.List(), nil
+ return sets.List(set), nil
}
func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string {
diff --git a/operator/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/operator/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
index 8c64adb8..cb21c040 100644
--- a/operator/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
+++ b/operator/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -283,8 +283,57 @@ type ExecConfig struct {
// read user instructions might set this to "used by my-program to read user instructions".
// +k8s:conversion-gen=false
StdinUnavailableMessage string `json:"-"`
+
+ // PluginPolicy is the policy governing whether or not the configured
+ // `Command` may run.
+ // +k8s:conversion-gen=false
+ PluginPolicy PluginPolicy `json:"-"`
+}
+
+// AllowlistEntry is an entry in the allowlist. For each allowlist item, at
+// least one field must be nonempty. A struct with all empty fields is
+// considered a misconfiguration error. Each field is a criterion for
+// execution. If multiple fields are specified, then the criteria of all
+// specified fields must be met. That is, the result of an individual entry is
+// the logical AND of all checks corresponding to the specified fields within
+// the entry.
+type AllowlistEntry struct {
+ // Name matching is performed by first resolving the absolute path of both
+ // the plugin and the name in the allowlist entry using `exec.LookPath`. It
+ // will be called on both, and the resulting strings must be equal. If
+ // either call to `exec.LookPath` results in an error, the `Name` check
+ // will be considered a failure.
+ Name string `json:"-"`
+}
+
+// PluginPolicy describes the policy type and allowlist (if any) for client-go
+// credential plugins.
+type PluginPolicy struct {
+ // PolicyType specifies the policy governing which, if any, client-go
+ // credential plugins may be executed. It MUST be one of { "", "AllowAll", "DenyAll", "Allowlist" }.
+ // If the policy is "", then it falls back to "AllowAll" (this is required
+ // to maintain backward compatibility). If the policy is DenyAll, no
+ // credential plugins may run. If the policy is Allowlist, only those
+ // plugins meeting the criteria specified in the `credentialPluginAllowlist`
+ // field may run. If the policy is not `Allowlist` but one is provided, it
+ // is considered a configuration error.
+ PolicyType PolicyType `json:"-"`
+
+ // Allowlist is a slice of allowlist entries. If any of them is a match,
+ // then the executable in question may execute. That is, the result is the
+ // logical OR of all entries in the allowlist. This list MUST be nil
+ // whenever the policy is not "Allowlist".
+ Allowlist []AllowlistEntry `json:"-"`
}
+type PolicyType string
+
+const (
+ PluginPolicyAllowAll PolicyType = "AllowAll"
+ PluginPolicyDenyAll PolicyType = "DenyAll"
+ PluginPolicyAllowlist PolicyType = "Allowlist"
+)
+
var _ fmt.Stringer = new(ExecConfig)
var _ fmt.GoStringer = new(ExecConfig)
diff --git a/operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
index bdedc166..ef1e9b8e 100644
--- a/operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
+++ b/operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
@@ -401,6 +401,7 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo
out.InteractiveMode = ExecInteractiveMode(in.InteractiveMode)
// INFO: in.StdinUnavailable opted out of conversion generation
// INFO: in.StdinUnavailableMessage opted out of conversion generation
+ // INFO: in.PluginPolicy opted out of conversion generation
return nil
}
diff --git a/operator/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/operator/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
index 86e4ddef..aba8add9 100644
--- a/operator/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
@@ -25,6 +25,22 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllowlistEntry) DeepCopyInto(out *AllowlistEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowlistEntry.
+func (in *AllowlistEntry) DeepCopy() *AllowlistEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(AllowlistEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
*out = *in
@@ -271,6 +287,7 @@ func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
if in.Config != nil {
out.Config = in.Config.DeepCopyObject()
}
+ in.PluginPolicy.DeepCopyInto(&out.PluginPolicy)
return
}
@@ -300,6 +317,27 @@ func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PluginPolicy) DeepCopyInto(out *PluginPolicy) {
+ *out = *in
+ if in.Allowlist != nil {
+ in, out := &in.Allowlist, &out.Allowlist
+ *out = make([]AllowlistEntry, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginPolicy.
+func (in *PluginPolicy) DeepCopy() *PluginPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Preferences) DeepCopyInto(out *Preferences) {
*out = *in
diff --git a/operator/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/operator/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
index cd0a8649..ed35891e 100644
--- a/operator/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
+++ b/operator/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
@@ -533,6 +533,21 @@ func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil {
return clientcmdapi.AuthInfo{}, err
}
+
+ // Handle ClientKey/ClientKeyData conflict: if override sets ClientKey, also use override's ClientKeyData
+ // otherwise if original config has ClientKeyData set,
+ // validation returns error "client-key-data and client-key are both specified "
+ if len(config.overrides.AuthInfo.ClientKey) > 0 || len(config.overrides.AuthInfo.ClientKeyData) > 0 {
+ mergedAuthInfo.ClientKey = config.overrides.AuthInfo.ClientKey
+ mergedAuthInfo.ClientKeyData = config.overrides.AuthInfo.ClientKeyData
+ }
+ // Handle ClientCertificate/ClientCertificateData conflict, if override sets ClientCertificate, also use override's ClientCertificateData
+ // otherwise if original config has ClientCertificateData set,
+ // validation returns error "client-cert-data and client-cert are both specified "
+ if len(config.overrides.AuthInfo.ClientCertificate) > 0 || len(config.overrides.AuthInfo.ClientCertificateData) > 0 {
+ mergedAuthInfo.ClientCertificate = config.overrides.AuthInfo.ClientCertificate
+ mergedAuthInfo.ClientCertificateData = config.overrides.AuthInfo.ClientCertificateData
+ }
}
return *mergedAuthInfo, nil
diff --git a/operator/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/operator/vendor/k8s.io/client-go/tools/clientcmd/loader.go
index c900e5fd..b127e2e0 100644
--- a/operator/vendor/k8s.io/client-go/tools/clientcmd/loader.go
+++ b/operator/vendor/k8s.io/client-go/tools/clientcmd/loader.go
@@ -331,7 +331,10 @@ func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string {
return []string{rules.ExplicitPath}
}
- return rules.Precedence
+ // Create a copy in case something tries to sort the returned slice.
+ precedence := make([]string, len(rules.Precedence))
+ copy(precedence, rules.Precedence)
+ return precedence
}
// GetStartingConfig implements ConfigAccess
diff --git a/operator/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/operator/vendor/k8s.io/client-go/tools/clientcmd/validation.go
index 088972ef..0389ad6d 100644
--- a/operator/vendor/k8s.io/client-go/tools/clientcmd/validation.go
+++ b/operator/vendor/k8s.io/client-go/tools/clientcmd/validation.go
@@ -25,6 +25,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/validation"
+ authexec "k8s.io/client-go/plugin/pkg/client/auth/exec"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@@ -327,6 +328,10 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err
default:
validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode))
}
+
+ if err := authexec.ValidatePluginPolicy(authInfo.Exec.PluginPolicy); err != nil {
+ validationErrors = append(validationErrors, fmt.Errorf("allowlist misconfiguration: %w", err))
+ }
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
diff --git a/operator/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/operator/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
index 07180630..29d34c4e 100644
--- a/operator/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
+++ b/operator/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
@@ -209,7 +209,7 @@ type LeaderElector struct {
// before leader election loop is stopped by ctx or it has
// stopped holding the leader lease
func (le *LeaderElector) Run(ctx context.Context) {
- defer runtime.HandleCrash()
+ defer runtime.HandleCrashWithContext(ctx)
defer le.config.Callbacks.OnStoppedLeading()
if !le.acquire(ctx) {
@@ -254,7 +254,8 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
defer cancel()
succeeded := false
desc := le.config.Lock.Describe()
- klog.Infof("attempting to acquire leader lease %v...", desc)
+ logger := klog.FromContext(ctx)
+ logger.Info("Attempting to acquire leader lease...", "lock", desc)
wait.JitterUntil(func() {
if !le.config.Coordinated {
succeeded = le.tryAcquireOrRenew(ctx)
@@ -263,12 +264,12 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
}
le.maybeReportTransition()
if !succeeded {
- klog.V(4).Infof("failed to acquire lease %v", desc)
+ logger.V(4).Info("Failed to acquire lease", "lock", desc)
return
}
le.config.Lock.RecordEvent("became leader")
le.metrics.leaderOn(le.config.Name)
- klog.Infof("successfully acquired lease %v", desc)
+ logger.Info("Successfully acquired lease", "lock", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
return succeeded
@@ -279,6 +280,7 @@ func (le *LeaderElector) renew(ctx context.Context) {
defer le.config.Lock.RecordEvent("stopped leading")
ctx, cancel := context.WithCancel(ctx)
defer cancel()
+ logger := klog.FromContext(ctx)
wait.Until(func() {
err := wait.PollUntilContextTimeout(ctx, le.config.RetryPeriod, le.config.RenewDeadline, true, func(ctx context.Context) (done bool, err error) {
if !le.config.Coordinated {
@@ -290,22 +292,22 @@ func (le *LeaderElector) renew(ctx context.Context) {
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
- klog.V(5).Infof("successfully renewed lease %v", desc)
+ logger.V(5).Info("Successfully renewed lease", "lock", desc)
return
}
le.metrics.leaderOff(le.config.Name)
- klog.Infof("failed to renew lease %v: %v", desc, err)
+ logger.Info("Failed to renew lease", "lock", desc, "err", err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
// if we hold the lease, give it up
if le.config.ReleaseOnCancel {
- le.release()
+ le.release(logger)
}
}
// release attempts to release the leader lease if we have acquired it.
-func (le *LeaderElector) release() bool {
+func (le *LeaderElector) release(logger klog.Logger) bool {
ctx := context.Background()
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
defer timeoutCancel()
@@ -313,10 +315,10 @@ func (le *LeaderElector) release() bool {
oldLeaderElectionRecord, _, err := le.config.Lock.Get(timeoutCtx)
if err != nil {
if !errors.IsNotFound(err) {
- klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
+ logger.Error(err, "error retrieving resource lock", "lock", le.config.Lock.Describe())
return false
}
- klog.Infof("lease lock not found: %v", le.config.Lock.Describe())
+ logger.Info("lease lock not found", "lock", le.config.Lock.Describe())
return false
}
@@ -331,7 +333,7 @@ func (le *LeaderElector) release() bool {
AcquireTime: now,
}
if err := le.config.Lock.Update(timeoutCtx, leaderElectionRecord); err != nil {
- klog.Errorf("Failed to release lock: %v", err)
+ logger.Error(err, "Failed to release lease", "lock", le.config.Lock.Describe())
return false
}
@@ -343,6 +345,7 @@ func (le *LeaderElector) release() bool {
// lease if it has already been acquired. Returns true on success else returns
// false.
func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
+ logger := klog.FromContext(ctx)
now := metav1.NewTime(le.clock.Now())
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
@@ -355,10 +358,10 @@ func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx)
if err != nil {
if !errors.IsNotFound(err) {
- klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
+ logger.Error(err, "Error retrieving lease lock", "lock", le.config.Lock.Describe())
return false
}
- klog.Infof("lease lock not found: %v", le.config.Lock.Describe())
+ logger.Info("Lease lock not found", "lock", le.config.Lock.Describe(), "err", err)
return false
}
@@ -371,18 +374,18 @@ func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
hasExpired := le.observedTime.Add(time.Second * time.Duration(oldLeaderElectionRecord.LeaseDurationSeconds)).Before(now.Time)
if hasExpired {
- klog.Infof("lock has expired: %v", le.config.Lock.Describe())
+ logger.Info("Lease has expired", "lock", le.config.Lock.Describe())
return false
}
if !le.IsLeader() {
- klog.V(6).Infof("lock is held by %v and has not yet expired: %v", oldLeaderElectionRecord.HolderIdentity, le.config.Lock.Describe())
+ logger.V(6).Info("Lease is held and has not yet expired", "lock", le.config.Lock.Describe(), "holder", oldLeaderElectionRecord.HolderIdentity)
return false
}
// 2b. If the lease has been marked as "end of term", don't renew it
if le.IsLeader() && oldLeaderElectionRecord.PreferredHolder != "" {
- klog.V(4).Infof("lock is marked as 'end of term': %v", le.config.Lock.Describe())
+ logger.V(4).Info("Lease is marked as 'end of term'", "lock", le.config.Lock.Describe())
// TODO: Instead of letting lease expire, the holder may deleted it directly
// This will not be compatible with all controllers, so it needs to be opt-in behavior.
// We must ensure all code guarded by this lease has successfully completed
@@ -406,7 +409,7 @@ func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
// update the lock itself
if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil {
- klog.Errorf("Failed to update lock: %v", err)
+ logger.Error(err, "Failed to update lock", "lock", le.config.Lock.Describe())
return false
}
@@ -418,6 +421,7 @@ func (le *LeaderElector) tryCoordinatedRenew(ctx context.Context) bool {
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
+ logger := klog.FromContext(ctx)
now := metav1.NewTime(le.clock.Now())
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
@@ -438,18 +442,18 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
le.setObservedRecord(&leaderElectionRecord)
return true
}
- klog.Errorf("Failed to update lock optimistically: %v, falling back to slow path", err)
+ logger.Error(err, "Failed to update lease optimistically, falling back to slow path", "lock", le.config.Lock.Describe())
}
// 2. obtain or create the ElectionRecord
oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx)
if err != nil {
if !errors.IsNotFound(err) {
- klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
+ logger.Error(err, "Error retrieving lease lock", "lock", le.config.Lock.Describe())
return false
}
if err = le.config.Lock.Create(ctx, leaderElectionRecord); err != nil {
- klog.Errorf("error initially creating leader election record: %v", err)
+ logger.Error(err, "Error initially creating lease lock", "lock", le.config.Lock.Describe())
return false
}
@@ -465,7 +469,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
le.observedRawRecord = oldLeaderElectionRawRecord
}
if len(oldLeaderElectionRecord.HolderIdentity) > 0 && le.isLeaseValid(now.Time) && !le.IsLeader() {
- klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
+ logger.V(4).Info("Lease is held by and has not yet expired", "lock", le.config.Lock.Describe(), "holder", oldLeaderElectionRecord.HolderIdentity)
return false
}
@@ -481,7 +485,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
// update the lock itself
if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil {
- klog.Errorf("Failed to update lock: %v", err)
+ logger.Error(err, "Failed to update lease", "lock", le.config.Lock.Describe())
return false
}
diff --git a/operator/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go b/operator/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
index 9aaf779e..b2fa14a5 100644
--- a/operator/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
+++ b/operator/vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go
@@ -120,8 +120,12 @@ func NewCandidate(clientset kubernetes.Interface,
func (c *LeaseCandidate) Run(ctx context.Context) {
defer c.queue.ShutDown()
+ logger := klog.FromContext(ctx)
+ logger = klog.LoggerWithName(logger, "leasecandidate")
+ ctx = klog.NewContext(ctx, logger)
+
c.informerFactory.Start(ctx.Done())
- if !cache.WaitForNamedCacheSync("leasecandidateclient", ctx.Done(), c.hasSynced) {
+ if !cache.WaitForNamedCacheSyncWithContext(ctx, c.hasSynced) {
return
}
@@ -148,7 +152,7 @@ func (c *LeaseCandidate) processNextWorkItem(ctx context.Context) bool {
return true
}
- utilruntime.HandleError(err)
+ utilruntime.HandleErrorWithContext(ctx, err, "Ensuring lease failed")
c.queue.AddRateLimited(key)
return true
@@ -161,20 +165,21 @@ func (c *LeaseCandidate) enqueueLease() {
// ensureLease creates the lease if it does not exist and renew it if it exists. Returns the lease and
// a bool (true if this call created the lease), or any error that occurs.
func (c *LeaseCandidate) ensureLease(ctx context.Context) error {
+ logger := klog.FromContext(ctx)
lease, err := c.leaseClient.Get(ctx, c.name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
- klog.V(2).Infof("Creating lease candidate")
+ logger.V(2).Info("Creating lease candidate")
// lease does not exist, create it.
leaseToCreate := c.newLeaseCandidate()
if _, err := c.leaseClient.Create(ctx, leaseToCreate, metav1.CreateOptions{}); err != nil {
return err
}
- klog.V(2).Infof("Created lease candidate")
+ logger.V(2).Info("Created lease candidate")
return nil
} else if err != nil {
return err
}
- klog.V(2).Infof("lease candidate exists. Renewing.")
+ logger.V(2).Info("Lease candidate exists. Renewing.")
clone := lease.DeepCopy()
clone.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
_, err = c.leaseClient.Update(ctx, clone, metav1.UpdateOptions{})
diff --git a/operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
index 5d205415..79a748b7 100644
--- a/operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
+++ b/operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go
@@ -77,6 +77,9 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
if ll.Labels != nil {
+ if ll.lease.Labels == nil {
+ ll.lease.Labels = map[string]string{}
+ }
// Only overwrite the labels that are specifically set
for k, v := range ll.Labels {
ll.lease.Labels[k] = v
diff --git a/operator/vendor/k8s.io/client-go/tools/metrics/metrics.go b/operator/vendor/k8s.io/client-go/tools/metrics/metrics.go
index 99d3d8e2..e364b7e1 100644
--- a/operator/vendor/k8s.io/client-go/tools/metrics/metrics.go
+++ b/operator/vendor/k8s.io/client-go/tools/metrics/metrics.go
@@ -62,6 +62,12 @@ type CallsMetric interface {
Increment(exitCode int, callStatus string)
}
+// CallsMetric counts the success or failure of execution for exec plugins.
+type PolicyCallsMetric interface {
+ // Increment increments a counter per status { "allowed", "denied" }
+ Increment(status string)
+}
+
// RetryMetric counts the number of retries sent to the server
// partitioned by code, method, and host.
type RetryMetric interface {
@@ -99,6 +105,9 @@ var (
// ExecPluginCalls is the number of calls made to an exec plugin, partitioned by
// exit code and call status.
ExecPluginCalls CallsMetric = noopCalls{}
+ // ExecPluginPolicyCalls is the number of plugin policy check calls, partitioned
+ // by {"allowed", "denied"}
+ ExecPluginPolicyCalls PolicyCallsMetric = noopPolicy{}
// RequestRetry is the retry metric that tracks the number of
// retries sent to the server.
RequestRetry RetryMetric = noopRetry{}
@@ -121,6 +130,7 @@ type RegisterOpts struct {
RateLimiterLatency LatencyMetric
RequestResult ResultMetric
ExecPluginCalls CallsMetric
+ ExecPluginPolicyCalls PolicyCallsMetric
RequestRetry RetryMetric
TransportCacheEntries TransportCacheMetric
TransportCreateCalls TransportCreateCallsMetric
@@ -157,6 +167,9 @@ func Register(opts RegisterOpts) {
if opts.ExecPluginCalls != nil {
ExecPluginCalls = opts.ExecPluginCalls
}
+ if opts.ExecPluginPolicyCalls != nil {
+ ExecPluginCalls = opts.ExecPluginCalls
+ }
if opts.RequestRetry != nil {
RequestRetry = opts.RequestRetry
}
@@ -198,6 +211,10 @@ type noopCalls struct{}
func (noopCalls) Increment(int, string) {}
+type noopPolicy struct{}
+
+func (noopPolicy) Increment(string) {}
+
type noopRetry struct{}
func (noopRetry) IncrementRetry(context.Context, string, string, string) {}
diff --git a/operator/vendor/k8s.io/client-go/tools/record/event.go b/operator/vendor/k8s.io/client-go/tools/record/event.go
index f97c5d61..58322d44 100644
--- a/operator/vendor/k8s.io/client-go/tools/record/event.go
+++ b/operator/vendor/k8s.io/client-go/tools/record/event.go
@@ -334,7 +334,7 @@ func recordEvent(ctx context.Context, sink EventSink, event *v1.Event, patch []b
newEvent, err = sink.Patch(event, patch)
}
// Update can fail because the event may have been removed and it no longer exists.
- if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) {
+ if !updateExistingEvent || util.IsKeyNotFoundError(err) {
// Making sure that ResourceVersion is empty on creation
event.ResourceVersion = ""
newEvent, err = sink.Create(event)
diff --git a/operator/vendor/k8s.io/client-go/tools/record/events_cache.go b/operator/vendor/k8s.io/client-go/tools/record/events_cache.go
index 170074d4..9eae6971 100644
--- a/operator/vendor/k8s.io/client-go/tools/record/events_cache.go
+++ b/operator/vendor/k8s.io/client-go/tools/record/events_cache.go
@@ -223,7 +223,7 @@ func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messag
type aggregateRecord struct {
// we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate
// if the size of this set exceeds the max, we know we need to aggregate
- localKeys sets.String
+ localKeys sets.Set[string]
// The last time at which the aggregate was recorded
lastTimestamp metav1.Time
}
@@ -257,7 +257,7 @@ func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string)
maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second
interval := now.Time.Sub(record.lastTimestamp.Time)
if interval > maxInterval {
- record = aggregateRecord{localKeys: sets.NewString()}
+ record = aggregateRecord{localKeys: sets.New[string]()}
}
// Write the new event into the aggregation record and put it on the cache
diff --git a/operator/vendor/k8s.io/client-go/tools/reference/ref.go b/operator/vendor/k8s.io/client-go/tools/reference/ref.go
index 5d4ec374..7ba73924 100644
--- a/operator/vendor/k8s.io/client-go/tools/reference/ref.go
+++ b/operator/vendor/k8s.io/client-go/tools/reference/ref.go
@@ -20,7 +20,7 @@ import (
"errors"
"fmt"
- "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -63,6 +63,9 @@ func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReferen
//
// TODO: This doesn't work for CRDs, which are not registered in scheme.
if gvk.Empty() {
+ if scheme == nil {
+ return nil, errors.New("scheme is required to look up gvk")
+ }
gvks, _, err := scheme.ObjectKinds(obj)
if err != nil {
return nil, err
diff --git a/operator/vendor/k8s.io/client-go/util/cert/cert.go b/operator/vendor/k8s.io/client-go/util/cert/cert.go
index 12204612..48c78b59 100644
--- a/operator/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/operator/vendor/k8s.io/client-go/util/cert/cert.go
@@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
- DNSNames: []string{cfg.CommonName},
NotBefore: notBefore,
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
IsCA: true,
}
+ if len(cfg.CommonName) > 0 {
+ tmpl.DNSNames = []string{cfg.CommonName}
+ }
certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
if err != nil {
diff --git a/operator/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go b/operator/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
index 06f172d8..72c0124a 100644
--- a/operator/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
+++ b/operator/vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go
@@ -45,16 +45,28 @@ func IsDataConsistencyDetectionForWatchListEnabled() bool {
return dataConsistencyDetectionForWatchListEnabled
}
+// SetDataConsistencyDetectionForWatchListEnabledForTest allows to enable/disable data consistency detection for testing purposes.
+// It returns a function that restores the original value.
+func SetDataConsistencyDetectionForWatchListEnabledForTest(enabled bool) func() {
+ original := dataConsistencyDetectionForWatchListEnabled
+ dataConsistencyDetectionForWatchListEnabled = enabled
+ return func() {
+ dataConsistencyDetectionForWatchListEnabled = original
+ }
+}
+
type RetrieveItemsFunc[U any] func() []U
type ListFunc[T runtime.Object] func(ctx context.Context, options metav1.ListOptions) (T, error)
+type TransformFunc func(interface{}) (interface{}, error)
+
// CheckDataConsistency exists solely for testing purposes.
// we cannot use checkWatchListDataConsistencyIfRequested because
// it is guarded by an environmental variable.
// we cannot manipulate the environmental variable because
// it will affect other tests in this package.
-func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) {
+func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity string, lastSyncedResourceVersion string, listFn ListFunc[T], listItemTransformFunc TransformFunc, listOptions metav1.ListOptions, retrieveItemsFn RetrieveItemsFunc[U]) {
if !canFormAdditionalListCall(lastSyncedResourceVersion, listOptions) {
klog.V(4).Infof("data consistency check for %s is enabled but the parameters (RV, ListOptions) doesn't allow for creating a valid LIST request. Skipping the data consistency check.", identity)
return
@@ -84,6 +96,15 @@ func CheckDataConsistency[T runtime.Object, U any](ctx context.Context, identity
if err != nil {
panic(err) // this should never happen
}
+ if listItemTransformFunc != nil {
+ for i := range rawListItems {
+ obj, err := listItemTransformFunc(rawListItems[i])
+ if err != nil {
+ panic(err)
+ }
+ rawListItems[i] = obj.(runtime.Object)
+ }
+ }
listItems := toMetaObjectSliceOrDie(rawListItems)
sort.Sort(byUID(listItems))
diff --git a/operator/vendor/k8s.io/client-go/util/watchlist/watch_list.go b/operator/vendor/k8s.io/client-go/util/watchlist/watch_list.go
new file mode 100644
index 00000000..1551a49f
--- /dev/null
+++ b/operator/vendor/k8s.io/client-go/util/watchlist/watch_list.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watchlist
+
+import (
+ metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
+ metainternalversionvalidation "k8s.io/apimachinery/pkg/apis/meta/internalversion/validation"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ clientfeatures "k8s.io/client-go/features"
+ "k8s.io/utils/ptr"
+)
+
+var scheme = runtime.NewScheme()
+
+func init() {
+ utilruntime.Must(metainternalversion.AddToScheme(scheme))
+}
+
+// PrepareWatchListOptionsFromListOptions creates a new ListOptions
+// that can be used for a watch-list request from the given listOptions.
+//
+// This function also determines if the given listOptions can be used to form a watch-list request,
+// which would result in streaming semantically equivalent data from the server.
+func PrepareWatchListOptionsFromListOptions(listOptions metav1.ListOptions) (metav1.ListOptions, bool, error) {
+ if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
+ return metav1.ListOptions{}, false, nil
+ }
+
+ internalListOptions := &metainternalversion.ListOptions{}
+ if err := scheme.Convert(&listOptions, internalListOptions, nil); err != nil {
+ return metav1.ListOptions{}, false, err
+ }
+ if errs := metainternalversionvalidation.ValidateListOptions(internalListOptions, true); len(errs) > 0 {
+ return metav1.ListOptions{}, false, nil
+ }
+
+ watchListOptions := listOptions
+ // this is our legacy case, the cache ignores LIMIT for
+ // ResourceVersion == 0 and RVM=unset|NotOlderThan
+ if listOptions.Limit > 0 && listOptions.ResourceVersion != "0" {
+ return metav1.ListOptions{}, false, nil
+ }
+ watchListOptions.Limit = 0
+
+ // to ensure that we can create a watch-list request that returns
+ // semantically equivalent data for the given listOptions,
+ // we need to validate that the RVM for the list is supported by watch-list requests.
+ if listOptions.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
+ return metav1.ListOptions{}, false, nil
+ }
+ watchListOptions.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan
+
+ watchListOptions.Watch = true
+ watchListOptions.AllowWatchBookmarks = true
+ watchListOptions.SendInitialEvents = ptr.To(true)
+
+ internalWatchListOptions := &metainternalversion.ListOptions{}
+ if err := scheme.Convert(&watchListOptions, internalWatchListOptions, nil); err != nil {
+ return metav1.ListOptions{}, false, err
+ }
+ if errs := metainternalversionvalidation.ValidateListOptions(internalWatchListOptions, true); len(errs) > 0 {
+ return metav1.ListOptions{}, false, nil
+ }
+
+ return watchListOptions, true, nil
+}
+
+type unSupportedWatchListSemantics interface {
+ IsWatchListSemanticsUnSupported() bool
+}
+
+// DoesClientNotSupportWatchListSemantics reports whether the given client
+// does NOT support WatchList semantics.
+//
+// A client does NOT support WatchList only if
+// it implements `IsWatchListSemanticsUnSupported` and that returns true.
+func DoesClientNotSupportWatchListSemantics(client any) bool {
+ lw, ok := client.(unSupportedWatchListSemantics)
+ if !ok {
+ return false
+ }
+ return lw.IsWatchListSemanticsUnSupported()
+}
diff --git a/operator/vendor/k8s.io/client-go/util/workqueue/queue.go b/operator/vendor/k8s.io/client-go/util/workqueue/queue.go
index 78b072da..9bffddd6 100644
--- a/operator/vendor/k8s.io/client-go/util/workqueue/queue.go
+++ b/operator/vendor/k8s.io/client-go/util/workqueue/queue.go
@@ -169,12 +169,13 @@ func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMet
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
+ stopCh: make(chan struct{}),
}
// Don't start the goroutine for a type of noMetrics so we don't consume
// resources unnecessarily
if _, ok := metrics.(noMetrics[T]); !ok {
- go t.updateUnfinishedWorkLoop()
+ t.wg.Go(t.updateUnfinishedWorkLoop)
}
return t
@@ -210,6 +211,14 @@ type Typed[t comparable] struct {
unfinishedWorkUpdatePeriod time.Duration
clock clock.WithTicker
+
+ // wg manages goroutines started by the queue to allow graceful shutdown
+ // ShutDown() will wait for goroutines to exit before returning.
+ wg sync.WaitGroup
+
+ stopCh chan struct{}
+ // stopOnce guarantees we only signal shutdown a single time
+ stopOnce sync.Once
}
// Add marks item as needing processing. When the queue is shutdown new
@@ -296,6 +305,11 @@ func (q *Typed[T]) Done(item T) {
// goroutines will continue processing items in the queue until it is
// empty and then receive the shutdown signal.
func (q *Typed[T]) ShutDown() {
+ defer q.wg.Wait()
+ q.stopOnce.Do(func() {
+ defer close(q.stopCh)
+ })
+
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -311,6 +325,10 @@ func (q *Typed[T]) ShutDown() {
// Workers must call Done on an item after processing it, otherwise
// ShutDownWithDrain will block indefinitely.
func (q *Typed[T]) ShutDownWithDrain() {
+ defer q.wg.Wait()
+ q.stopOnce.Do(func() {
+ defer close(q.stopCh)
+ })
q.cond.L.Lock()
defer q.cond.L.Unlock()
@@ -330,20 +348,22 @@ func (q *Typed[T]) ShuttingDown() bool {
return q.shuttingDown
}
+func (q *Typed[T]) updateUnfinishedWork() {
+ q.cond.L.Lock()
+ defer q.cond.L.Unlock()
+ if !q.shuttingDown {
+ q.metrics.updateUnfinishedWork()
+ }
+}
+
func (q *Typed[T]) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
- for range t.C() {
- if !func() bool {
- q.cond.L.Lock()
- defer q.cond.L.Unlock()
- if !q.shuttingDown {
- q.metrics.updateUnfinishedWork()
- return true
- }
- return false
-
- }() {
+ for {
+ select {
+ case <-t.C():
+ q.updateUnfinishedWork()
+ case <-q.stopCh:
return
}
}
diff --git a/operator/vendor/k8s.io/kube-openapi/pkg/util/trie.go b/operator/vendor/k8s.io/kube-openapi/pkg/util/trie.go
new file mode 100644
index 00000000..a9a76c17
--- /dev/null
+++ b/operator/vendor/k8s.io/kube-openapi/pkg/util/trie.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+// A simple trie implementation with Add and HasPrefix methods only.
+type Trie struct {
+ children map[byte]*Trie
+ wordTail bool
+ word string
+}
+
+// NewTrie creates a Trie and add all strings in the provided list to it.
+func NewTrie(list []string) Trie {
+ ret := Trie{
+ children: make(map[byte]*Trie),
+ wordTail: false,
+ }
+ for _, v := range list {
+ ret.Add(v)
+ }
+ return ret
+}
+
+// Add adds a word to this trie
+func (t *Trie) Add(v string) {
+ root := t
+ for _, b := range []byte(v) {
+ child, exists := root.children[b]
+ if !exists {
+ child = &Trie{
+ children: make(map[byte]*Trie),
+ wordTail: false,
+ }
+ root.children[b] = child
+ }
+ root = child
+ }
+ root.wordTail = true
+ root.word = v
+}
+
+// HasPrefix returns true of v has any of the prefixes stored in this trie.
+func (t *Trie) HasPrefix(v string) bool {
+ _, has := t.GetPrefix(v)
+ return has
+}
+
+// GetPrefix is like HasPrefix but return the prefix in case of match or empty string otherwise.
+func (t *Trie) GetPrefix(v string) (string, bool) {
+ root := t
+ if root.wordTail {
+ return root.word, true
+ }
+ for _, b := range []byte(v) {
+ child, exists := root.children[b]
+ if !exists {
+ return "", false
+ }
+ if child.wordTail {
+ return child.word, true
+ }
+ root = child
+ }
+ return "", false
+}
diff --git a/operator/vendor/k8s.io/kube-openapi/pkg/util/util.go b/operator/vendor/k8s.io/kube-openapi/pkg/util/util.go
new file mode 100644
index 00000000..830ec3ca
--- /dev/null
+++ b/operator/vendor/k8s.io/kube-openapi/pkg/util/util.go
@@ -0,0 +1,126 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "reflect"
+ "strings"
+)
+
+// [DEPRECATED] ToCanonicalName converts Golang package/type canonical name into REST friendly OpenAPI name.
+// This method is deprecated because it has a misleading name. Please use ToRESTFriendlyName
+// instead
+//
+// NOTE: actually the "canonical name" in this method should be named "REST friendly OpenAPI name",
+// which is different from "canonical name" defined in GetCanonicalTypeName. The "canonical name" defined
+// in GetCanonicalTypeName means Go type names with full package path.
+//
+// Examples of REST friendly OpenAPI name:
+//
+// Input: k8s.io/api/core/v1.Pod
+// Output: io.k8s.api.core.v1.Pod
+//
+// Input: k8s.io/api/core/v1
+// Output: io.k8s.api.core.v1
+//
+// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
+func ToCanonicalName(name string) string {
+ return ToRESTFriendlyName(name)
+}
+
+// ToRESTFriendlyName converts Golang package/type canonical name into REST friendly OpenAPI name.
+//
+// Examples of REST friendly OpenAPI name:
+//
+// Input: k8s.io/api/core/v1.Pod
+// Output: io.k8s.api.core.v1.Pod
+//
+// Input: k8s.io/api/core/v1
+// Output: io.k8s.api.core.v1
+//
+// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
+func ToRESTFriendlyName(name string) string {
+ nameParts := strings.Split(name, "/")
+ // Reverse first part. e.g., io.k8s... instead of k8s.io...
+ if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
+ parts := strings.Split(nameParts[0], ".")
+ for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
+ parts[i], parts[j] = parts[j], parts[i]
+ }
+ nameParts[0] = strings.Join(parts, ".")
+ }
+ return strings.Join(nameParts, ".")
+}
+
+// OpenAPICanonicalTypeNamer is an interface for models without Go type to seed model name.
+//
+// OpenAPI canonical names are Go type names with full package path, for uniquely indentifying
+// a model / Go type. If a Go type is vendored from another package, only the path after "/vendor/"
+// should be used. For custom resource definition (CRD), the canonical name is expected to be
+//
+// group/version.kind
+//
+// Examples of canonical name:
+//
+// Go type: k8s.io/kubernetes/pkg/apis/core.Pod
+// CRD: csi.storage.k8s.io/v1alpha1.CSINodeInfo
+//
+// Example for vendored Go type:
+//
+// Original full path: k8s.io/kubernetes/vendor/k8s.io/api/core/v1.Pod
+// Canonical name: k8s.io/api/core/v1.Pod
+//
+// Original full path: vendor/k8s.io/api/core/v1.Pod
+// Canonical name: k8s.io/api/core/v1.Pod
+type OpenAPICanonicalTypeNamer interface {
+ OpenAPICanonicalTypeName() string
+}
+
+// OpenAPIModelNamer is an interface Go types may implement to provide an OpenAPI model name.
+//
+// This takes precedence over OpenAPICanonicalTypeNamer, and should be used when a Go type has a model
+// name that differs from its canonical type name as determined by Go package name reflection.
+type OpenAPIModelNamer interface {
+ OpenAPIModelName() string
+}
+
+// GetCanonicalTypeName will find the canonical type name of a sample object, removing
+// the "vendor" part of the path
+func GetCanonicalTypeName(model interface{}) string {
+ switch namer := model.(type) {
+ case OpenAPIModelNamer:
+ return namer.OpenAPIModelName()
+ case OpenAPICanonicalTypeNamer:
+ return namer.OpenAPICanonicalTypeName()
+ }
+ t := reflect.TypeOf(model)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.PkgPath() == "" {
+ return t.Name()
+ }
+ path := t.PkgPath()
+ if strings.Contains(path, "/vendor/") {
+ path = path[strings.Index(path, "/vendor/")+len("/vendor/"):]
+ } else if strings.HasPrefix(path, "vendor/") {
+ path = strings.TrimPrefix(path, "vendor/")
+ }
+ return path + "." + t.Name()
+}
diff --git a/operator/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/operator/vendor/k8s.io/kubernetes/pkg/apis/core/types.go
index f62c0702..1de0cf44 100644
--- a/operator/vendor/k8s.io/kubernetes/pkg/apis/core/types.go
+++ b/operator/vendor/k8s.io/kubernetes/pkg/apis/core/types.go
@@ -398,6 +398,7 @@ type PersistentVolumeSpec struct {
VolumeMode *PersistentVolumeMode
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
+ // This field is mutable if MutablePVNodeAffinity feature gate is enabled.
// +optional
NodeAffinity *VolumeNodeAffinity
// Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
@@ -724,9 +725,6 @@ type PersistentVolumeClaimStatus struct {
// should ignore the update for the purpose it was designed. For example - a controller that
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
// resources associated with PVC.
- //
- // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
- // +featureGate=RecoverVolumeExpansionFailure
// +optional
AllocatedResources ResourceList
// AllocatedResourceStatuses stores status of resource being resized for the given PVC.
@@ -762,9 +760,6 @@ type PersistentVolumeClaimStatus struct {
// should ignore the update for the purpose it was designed. For example - a controller that
// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
// resources associated with PVC.
- //
- // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
- // +featureGate=RecoverVolumeExpansionFailure
// +mapType=granular
// +optional
AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus
@@ -1904,6 +1899,21 @@ type PodCertificateProjection struct {
// Write the certificate chain at this path in the projected volume.
CertificateChainPath string
+
+ // userAnnotations allow pod authors to pass additional information to
+ // the signer implementation. Kubernetes does not restrict or validate this
+ // metadata in any way.
+ //
+ // These values are copied verbatim into the `spec.unverifiedUserAnnotations` field of
+ // the PodCertificateRequest objects that Kubelet creates.
+ //
+ // Entries are subject to the same validation as object metadata annotations,
+ // with the addition that all keys must be domain-prefixed. No restrictions
+ // are placed on values, except an overall size limitation on the entire field.
+ //
+ // Signers should document the keys and values they support. Signers should
+ // deny requests that contain keys they do not recognize.
+ UserAnnotations map[string]string
}
// ProjectedVolumeSource represents a projected volume source
@@ -2153,8 +2163,6 @@ type VolumeMount struct {
// None (or be unspecified, which defaults to None).
//
// If this field is not specified, it is treated as an equivalent of Disabled.
- //
- // +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnly *RecursiveReadOnlyMode
// Required. If the path is not an absolute path (e.g. some/path) it
@@ -2647,6 +2655,7 @@ type Container struct {
// +optional
Resources ResourceRequirements
// Resources resize policy for the container.
+ // This field cannot be set on ephemeral containers.
// +featureGate=InPlacePodVerticalScaling
// +optional
ResizePolicy []ContainerResizePolicy
@@ -2670,7 +2679,6 @@ type Container struct {
// container. Instead, the next init container starts immediately after this
// init container is started, or after any startupProbe has successfully
// completed.
- // +featureGate=SidecarContainers
// +optional
RestartPolicy *ContainerRestartPolicy
// Represents a list of rules to be checked to determine if the
@@ -2758,7 +2766,6 @@ type LifecycleHandler struct {
// +optional
TCPSocket *TCPSocketAction
// Sleep represents the duration that the container should sleep before being terminated.
- // +featureGate=PodLifecycleSleepAction
// +optional
Sleep *SleepAction
}
@@ -3000,7 +3007,6 @@ type ContainerStatus struct {
// Status of volume mounts.
// +listType=atomic
// +optional
- // +featureGate=RecursiveReadOnlyMounts
VolumeMounts []VolumeMountStatus
// User represents user identity information initially attached to the first process of the container
// +featureGate=SupplementalGroupsPolicy
@@ -3148,6 +3154,8 @@ const (
// If both PodResizePending and PodResizeInProgress are set, it means that a new resize was
// requested in the middle of a previous pod resize that is still in progress.
PodResizeInProgress PodConditionType = "PodResizeInProgress"
+ // AllContainersRestarting indicates that all containers of the pod is being restarted.
+ AllContainersRestarting PodConditionType = "AllContainersRestarting"
)
// PodCondition represents pod's condition
@@ -3191,7 +3199,6 @@ type VolumeMountStatus struct {
// RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts).
// An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled,
// depending on the mount result.
- // +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnly *RecursiveReadOnlyMode
}
@@ -3236,9 +3243,15 @@ type ContainerRestartRule struct {
// container exits.
type ContainerRestartRuleAction string
-// The only valid action is Restart.
+// These are valid restart rule actions.
const (
+ // The container will be restarted if the rule matches. Only valid on normal init container and
+ // regular containers. Not valid on sidecar containers and ephemeral containers.
ContainerRestartRuleActionRestart ContainerRestartRuleAction = "Restart"
+ // All containers (except ephemeral containers) inside the pod will be terminated and restarted.
+ // Valid on normal init container, sidecar containers, and regular containers. Not valid on
+ // ephemeral containers.
+ ContainerRestartRuleActionRestartAllContainers ContainerRestartRuleAction = "RestartAllContainers"
)
// ContainerRestartRuleOnExitCodes describes the condition
@@ -3616,9 +3629,10 @@ type Toleration struct {
// +optional
Key string
// Operator represents a key's relationship to the value.
- // Valid operators are Exists and Equal. Defaults to Equal.
+ // Valid operators are Exists, Equal, Lt, and Gt. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
+ // Lt and Gt perform numeric comparisons (requires feature gate TaintTolerationComparisonOperators).
// +optional
Operator TolerationOperator
// Value is the taint value the toleration matches to.
@@ -3644,6 +3658,8 @@ type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
+ TolerationOpLt TolerationOperator = "Lt"
+ TolerationOpGt TolerationOperator = "Gt"
)
// PodReadinessGate contains the reference to a pod condition
@@ -3847,8 +3863,8 @@ type PodSpec struct {
// will be made available to those containers which consume them
// by name.
//
- // This is an alpha field and requires enabling the
- // DynamicResourceAllocation feature gate.
+ // This is a stable field but requires that the
+ // DynamicResourceAllocation feature gate is enabled.
//
// This field is immutable.
//
@@ -3883,6 +3899,17 @@ type PodSpec struct {
// +featureGate=HostnameOverride
// +optional
HostnameOverride *string
+ // WorkloadRef provides a reference to the Workload object that this Pod belongs to.
+ // This field is used by the scheduler to identify the PodGroup and apply the
+ // correct group scheduling policies. The Workload object referenced
+ // by this field may not exist at the time the Pod is created.
+ // This field is immutable, but a Workload object with the same name
+ // may be recreated with different policies. Doing this during pod scheduling
+ // may result in the placement not conforming to the expected policies.
+ //
+ // +featureGate=GenericWorkload
+ // +optional
+ WorkloadRef *WorkloadReference
}
// PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
@@ -3981,6 +4008,36 @@ type PodSchedulingGate struct {
Name string
}
+// WorkloadReference identifies the Workload object and PodGroup membership
+// that a Pod belongs to. The scheduler uses this information to apply
+// workload-aware scheduling semantics.
+type WorkloadReference struct {
+ // Name defines the name of the Workload object this Pod belongs to.
+ // Workload must be in the same namespace as the Pod.
+ // If it doesn't match any existing Workload, the Pod will remain unschedulable
+ // until a Workload object is created and observed by the kube-scheduler.
+ // It must be a DNS subdomain.
+ //
+ // +required
+ Name string
+
+ // PodGroup is the name of the PodGroup within the Workload that this Pod
+ // belongs to. If it doesn't match any existing PodGroup within the Workload,
+ // the Pod will remain unschedulable until the Workload object is recreated
+ // and observed by the kube-scheduler. It must be a DNS label.
+ //
+ // +required
+ PodGroup string
+
+ // PodGroupReplicaKey specifies the replica key of the PodGroup to which this
+ // Pod belongs. It is used to distinguish pods belonging to different replicas
+ // of the same pod group. The pod group policy is applied separately to each replica.
+ // When set, it must be a DNS label.
+ //
+ // +optional
+ PodGroupReplicaKey string
+}
+
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
@@ -4372,7 +4429,6 @@ type EphemeralContainerCommon struct {
// Restart policy for the container to manage the restart behavior of each
// container within a pod. Must be specified if restartPolicyRules are used.
// You cannot set this field on ephemeral containers.
- // +featureGate=SidecarContainers
// +optional
RestartPolicy *ContainerRestartPolicy
// Represents a list of rules to be checked to determine if the
@@ -4461,7 +4517,7 @@ type EphemeralContainer struct {
// state of a system.
type PodStatus struct {
// If set, this represents the .metadata.generation that the pod status was set based upon.
- // This is an alpha field. Enable PodObservedGenerationTracking to be able to use this field.
+ // The PodObservedGenerationTracking feature gate must be enabled to use this field.
// +featureGate=PodObservedGenerationTracking
// +optional
ObservedGeneration int64
@@ -4562,6 +4618,19 @@ type PodStatus struct {
// +featureGate=DRAExtendedResource
// +optional
ExtendedResourceClaimStatus *PodExtendedResourceClaimStatus
+
+ // AllocatedResources is the total requests allocated for this pod by the node.
+ // If pod-level requests are not set, this will be the total requests aggregated
+ // across containers in the pod.
+ // +featureGate=InPlacePodLevelResourcesVerticalScaling
+ // +optional
+ AllocatedResources ResourceList
+
+ // Resources represents the compute resource requests and limits that have been
+ // applied at the pod level
+ // +featureGate=InPlacePodLevelResourcesVerticalScaling
+ // +optional
+ Resources *ResourceRequirements
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -4851,27 +4920,25 @@ const (
// These are valid values for the TrafficDistribution field of a Service.
const (
- // Indicates a preference for routing traffic to endpoints that are in the same
- // zone as the client. Users should not set this value unless they have ensured
- // that clients and endpoints are distributed in such a way that the "same zone"
- // preference will not result in endpoints getting overloaded.
- ServiceTrafficDistributionPreferClose = "PreferClose"
-
- // Indicates a preference for routing traffic to endpoints that are in the same
- // zone as the client. Users should not set this value unless they have ensured
- // that clients and endpoints are distributed in such a way that the "same zone"
- // preference will not result in endpoints getting overloaded.
- // This is an alias for "PreferClose", but it is an Alpha feature and is only
- // recognized if the PreferSameTrafficDistribution feature gate is enabled.
+ // ServiceTrafficDistributionPreferSameZone indicates a preference for routing
+ // traffic to endpoints that are in the same zone as the client. Users should only
+ // set this value if they have ensured that clients and endpoints are distributed
+ // in such a way that the "same zone" preference will not result in endpoints
+ // getting overloaded.
ServiceTrafficDistributionPreferSameZone = "PreferSameZone"
- // Indicates a preference for routing traffic to endpoints that are on the same
- // node as the client. Users should not set this value unless they have ensured
- // that clients and endpoints are distributed in such a way that the "same node"
- // preference will not result in endpoints getting overloaded.
- // This is an Alpha feature and is only recognized if the
- // PreferSameTrafficDistribution feature gate is enabled.
+ // ServiceTrafficDistributionPreferSameNode indicates a preference for routing
+ // traffic to endpoints that are on the same node as the client. Users should only
+ // set this value if they have ensured that clients and endpoints are distributed
+ // in such a way that the "same node" preference will not result in endpoints
+ // getting overloaded.
ServiceTrafficDistributionPreferSameNode = "PreferSameNode"
+
+ // ServiceTrafficDistributionPreferClose is the original name of "PreferSameZone".
+ // Despite the generic-sounding name, it has exactly the same meaning as
+ // "PreferSameZone".
+ // Deprecated: use "PreferSameZone" instead.
+ ServiceTrafficDistributionPreferClose = "PreferClose"
)
// These are the valid conditions of a service.
@@ -5439,7 +5506,6 @@ type NodeDaemonEndpoints struct {
// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
type NodeRuntimeHandlerFeatures struct {
// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
- // +featureGate=RecursiveReadOnlyMounts
// +optional
RecursiveReadOnlyMounts *bool
// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
@@ -5588,7 +5654,6 @@ type NodeStatus struct {
// +optional
Config *NodeConfigStatus
// The available runtime handlers.
- // +featureGate=RecursiveReadOnlyMounts
// +featureGate=UserNamespacesSupport
// +optional
RuntimeHandlers []NodeRuntimeHandler
@@ -5596,6 +5661,10 @@ type NodeStatus struct {
// +featureGate=SupplementalGroupsPolicy
// +optional
Features *NodeFeatures
+ // DeclaredFeatures represents the declared features of a node.
+ // +featureGate=NodeDeclaredFeatures
+ // +optional
+ DeclaredFeatures []string
}
// UniqueVolumeName defines the name of attached volume
diff --git a/operator/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/operator/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
index de1b33b9..017ac70c 100644
--- a/operator/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
+++ b/operator/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go
@@ -3147,6 +3147,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
*out = new(NodeFeatures)
(*in).DeepCopyInto(*out)
}
+ if in.DeclaredFeatures != nil {
+ in, out := &in.DeclaredFeatures, &out.DeclaredFeatures
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
return
}
@@ -3905,6 +3910,13 @@ func (in *PodCertificateProjection) DeepCopyInto(out *PodCertificateProjection)
*out = new(int32)
**out = **in
}
+ if in.UserAnnotations != nil {
+ in, out := &in.UserAnnotations, &out.UserAnnotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
return
}
@@ -4559,6 +4571,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
*out = new(string)
**out = **in
}
+ if in.WorkloadRef != nil {
+ in, out := &in.WorkloadRef, &out.WorkloadRef
+ *out = new(WorkloadReference)
+ **out = **in
+ }
return
}
@@ -4629,6 +4646,18 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) {
*out = new(PodExtendedResourceClaimStatus)
(*in).DeepCopyInto(*out)
}
+ if in.AllocatedResources != nil {
+ in, out := &in.AllocatedResources, &out.AllocatedResources
+ *out = make(ResourceList, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val.DeepCopy()
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = new(ResourceRequirements)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -6834,3 +6863,19 @@ func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptio
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WorkloadReference) DeepCopyInto(out *WorkloadReference) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadReference.
+func (in *WorkloadReference) DeepCopy() *WorkloadReference {
+ if in == nil {
+ return nil
+ }
+ out := new(WorkloadReference)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/operator/vendor/k8s.io/utils/net/multi_listen.go b/operator/vendor/k8s.io/utils/net/multi_listen.go
index 7cb7795b..e5d50805 100644
--- a/operator/vendor/k8s.io/utils/net/multi_listen.go
+++ b/operator/vendor/k8s.io/utils/net/multi_listen.go
@@ -21,6 +21,7 @@ import (
"fmt"
"net"
"sync"
+ "sync/atomic"
)
// connErrPair pairs conn and error which is returned by accept on sub-listeners.
@@ -38,6 +39,7 @@ type multiListener struct {
connCh chan connErrPair
// stopCh communicates from parent to child listeners.
stopCh chan struct{}
+ closed atomic.Bool
}
// compile time check to ensure *multiListener implements net.Listener
@@ -150,10 +152,8 @@ func (ml *multiListener) Accept() (net.Conn, error) {
// the go-routines to exit.
func (ml *multiListener) Close() error {
// Make sure this can be called repeatedly without explosions.
- select {
- case <-ml.stopCh:
+ if !ml.closed.CompareAndSwap(false, true) {
return fmt.Errorf("use of closed network connection")
- default:
}
// Tell all sub-listeners to stop.
diff --git a/operator/vendor/modules.txt b/operator/vendor/modules.txt
index d9eddeed..2b0192ab 100644
--- a/operator/vendor/modules.txt
+++ b/operator/vendor/modules.txt
@@ -2,6 +2,9 @@
## explicit; go 1.16
github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm
+# github.com/Masterminds/semver/v3 v3.4.0
+## explicit; go 1.21
+github.com/Masterminds/semver/v3
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
@@ -34,7 +37,7 @@ github.com/fxamacker/cbor/v2
# github.com/go-errors/errors v1.4.2
## explicit; go 1.14
github.com/go-errors/errors
-# github.com/go-logr/logr v1.4.2
+# github.com/go-logr/logr v1.4.3
## explicit; go 1.18
github.com/go-logr/logr
github.com/go-logr/logr/funcr
@@ -76,8 +79,8 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
-# github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad
-## explicit; go 1.22
+# github.com/google/pprof v0.0.0-20250403155104-27863c87afa6
+## explicit; go 1.23
github.com/google/pprof/profile
# github.com/google/uuid v1.6.0
## explicit
@@ -118,12 +121,13 @@ github.com/monochromegane/go-gitignore
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
-# github.com/onsi/ginkgo/v2 v2.22.2
-## explicit; go 1.22.0
+# github.com/onsi/ginkgo/v2 v2.27.2
+## explicit; go 1.23.0
github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config
github.com/onsi/ginkgo/v2/formatter
github.com/onsi/ginkgo/v2/ginkgo
+github.com/onsi/ginkgo/v2/ginkgo/automaxprocs
github.com/onsi/ginkgo/v2/ginkgo/build
github.com/onsi/ginkgo/v2/ginkgo/command
github.com/onsi/ginkgo/v2/ginkgo/generators
@@ -137,11 +141,12 @@ github.com/onsi/ginkgo/v2/internal
github.com/onsi/ginkgo/v2/internal/global
github.com/onsi/ginkgo/v2/internal/interrupt_handler
github.com/onsi/ginkgo/v2/internal/parallel_support
+github.com/onsi/ginkgo/v2/internal/reporters
github.com/onsi/ginkgo/v2/internal/testingtproxy
github.com/onsi/ginkgo/v2/reporters
github.com/onsi/ginkgo/v2/types
-# github.com/onsi/gomega v1.36.2
-## explicit; go 1.22.0
+# github.com/onsi/gomega v1.38.2
+## explicit; go 1.23.0
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/internal
@@ -156,14 +161,11 @@ github.com/onsi/gomega/types
# github.com/peterbourgon/diskv v2.0.1+incompatible
## explicit
github.com/peterbourgon/diskv
-# github.com/pkg/errors v0.9.1
-## explicit
-github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.22.0
-## explicit; go 1.22
+# github.com/prometheus/client_golang v1.23.2
+## explicit; go 1.23.0
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
@@ -171,15 +173,15 @@ github.com/prometheus/client_golang/prometheus/collectors
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
github.com/prometheus/client_golang/prometheus/promhttp/internal
-# github.com/prometheus/client_model v0.6.1
-## explicit; go 1.19
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.62.0
-## explicit; go 1.21
+# github.com/prometheus/common v0.66.1
+## explicit; go 1.23.0
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.15.1
-## explicit; go 1.20
+# github.com/prometheus/procfs v0.16.1
+## explicit; go 1.23.0
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -195,7 +197,7 @@ github.com/spf13/pflag
# github.com/stretchr/objx v0.5.2
## explicit; go 1.20
github.com/stretchr/objx
-# github.com/stretchr/testify v1.10.0
+# github.com/stretchr/testify v1.11.1
## explicit; go 1.17
github.com/stretchr/testify/assert
github.com/stretchr/testify/assert/yaml
@@ -220,17 +222,17 @@ go.uber.org/zap/internal/exit
go.uber.org/zap/internal/pool
go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
-# go.yaml.in/yaml/v2 v2.4.2
+# go.yaml.in/yaml/v2 v2.4.3
## explicit; go 1.15
go.yaml.in/yaml/v2
# go.yaml.in/yaml/v3 v3.0.4
## explicit; go 1.16
go.yaml.in/yaml/v3
-# golang.org/x/mod v0.22.0
-## explicit; go 1.22.0
+# golang.org/x/mod v0.29.0
+## explicit; go 1.24.0
golang.org/x/mod/semver
-# golang.org/x/net v0.38.0
-## explicit; go 1.23.0
+# golang.org/x/net v0.47.0
+## explicit; go 1.24.0
golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/html/charset
@@ -239,23 +241,23 @@ golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/internal/httpcommon
-# golang.org/x/oauth2 v0.27.0
+# golang.org/x/oauth2 v0.30.0
## explicit; go 1.23.0
golang.org/x/oauth2
golang.org/x/oauth2/internal
-# golang.org/x/sync v0.12.0
-## explicit; go 1.23.0
+# golang.org/x/sync v0.18.0
+## explicit; go 1.24.0
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.31.0
-## explicit; go 1.23.0
+# golang.org/x/sys v0.38.0
+## explicit; go 1.24.0
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/term v0.30.0
-## explicit; go 1.23.0
+# golang.org/x/term v0.37.0
+## explicit; go 1.24.0
golang.org/x/term
-# golang.org/x/text v0.23.0
-## explicit; go 1.23.0
+# golang.org/x/text v0.31.0
+## explicit; go 1.24.0
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex
@@ -279,15 +281,33 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.9.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.28.0
-## explicit; go 1.22.0
+# golang.org/x/tools v0.38.0
+## explicit; go 1.24.0
golang.org/x/tools/cover
+golang.org/x/tools/go/ast/edge
golang.org/x/tools/go/ast/inspector
+golang.org/x/tools/go/gcexportdata
+golang.org/x/tools/go/packages
+golang.org/x/tools/go/types/objectpath
+golang.org/x/tools/go/types/typeutil
+golang.org/x/tools/internal/aliases
+golang.org/x/tools/internal/event
+golang.org/x/tools/internal/event/core
+golang.org/x/tools/internal/event/keys
+golang.org/x/tools/internal/event/label
+golang.org/x/tools/internal/gcimporter
+golang.org/x/tools/internal/gocommand
+golang.org/x/tools/internal/packagesinternal
+golang.org/x/tools/internal/pkgbits
+golang.org/x/tools/internal/stdlib
+golang.org/x/tools/internal/typeparams
+golang.org/x/tools/internal/typesinternal
+golang.org/x/tools/internal/versions
# gomodules.xyz/jsonpatch/v2 v2.4.0
## explicit; go 1.20
gomodules.xyz/jsonpatch/v2
-# google.golang.org/protobuf v1.36.5
-## explicit; go 1.21
+# google.golang.org/protobuf v1.36.8
+## explicit; go 1.23
google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
@@ -319,7 +339,7 @@ google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/timestamppb
-# gopkg.in/evanphx/json-patch.v4 v4.12.0
+# gopkg.in/evanphx/json-patch.v4 v4.13.0
## explicit
gopkg.in/evanphx/json-patch.v4
# gopkg.in/inf.v0 v0.9.1
@@ -328,8 +348,8 @@ gopkg.in/inf.v0
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# k8s.io/api v0.34.1
-## explicit; go 1.24.0
+# k8s.io/api v0.35.0
+## explicit; go 1.25.0
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
k8s.io/api/admissionregistration/v1
@@ -389,7 +409,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
-k8s.io/api/storagemigration/v1alpha1
+k8s.io/api/storagemigration/v1beta1
# k8s.io/apiextensions-apiserver v0.34.1
## explicit; go 1.24.0
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
@@ -401,8 +421,8 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
-# k8s.io/apimachinery v0.34.1
-## explicit; go 1.24.0
+# k8s.io/apimachinery v0.35.0
+## explicit; go 1.25.0
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
k8s.io/apimachinery/pkg/api/meta
@@ -416,6 +436,7 @@ k8s.io/apimachinery/pkg/api/validate/content
k8s.io/apimachinery/pkg/api/validation
k8s.io/apimachinery/pkg/apis/meta/internalversion
k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme
+k8s.io/apimachinery/pkg/apis/meta/internalversion/validation
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme
@@ -458,20 +479,21 @@ k8s.io/apimachinery/pkg/util/strategicpatch
k8s.io/apimachinery/pkg/util/uuid
k8s.io/apimachinery/pkg/util/validation
k8s.io/apimachinery/pkg/util/validation/field
+k8s.io/apimachinery/pkg/util/version
k8s.io/apimachinery/pkg/util/wait
k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/cli-runtime v0.34.1
-## explicit; go 1.24.0
+# k8s.io/cli-runtime v0.35.0
+## explicit; go 1.25.0
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/genericiooptions
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
-# k8s.io/client-go v0.34.1
-## explicit; go 1.24.0
+# k8s.io/client-go v0.35.0
+## explicit; go 1.25.0
k8s.io/client-go/applyconfigurations
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
@@ -525,7 +547,7 @@ k8s.io/client-go/applyconfigurations/scheduling/v1beta1
k8s.io/client-go/applyconfigurations/storage/v1
k8s.io/client-go/applyconfigurations/storage/v1alpha1
k8s.io/client-go/applyconfigurations/storage/v1beta1
-k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1
+k8s.io/client-go/applyconfigurations/storagemigration/v1beta1
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached/disk
k8s.io/client-go/discovery/cached/memory
@@ -604,7 +626,7 @@ k8s.io/client-go/informers/storage/v1
k8s.io/client-go/informers/storage/v1alpha1
k8s.io/client-go/informers/storage/v1beta1
k8s.io/client-go/informers/storagemigration
-k8s.io/client-go/informers/storagemigration/v1alpha1
+k8s.io/client-go/informers/storagemigration/v1beta1
k8s.io/client-go/kubernetes
k8s.io/client-go/kubernetes/fake
k8s.io/client-go/kubernetes/scheme
@@ -716,8 +738,8 @@ k8s.io/client-go/kubernetes/typed/storage/v1alpha1
k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake
k8s.io/client-go/kubernetes/typed/storage/v1beta1
k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake
-k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1
-k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake
+k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1
+k8s.io/client-go/kubernetes/typed/storagemigration/v1beta1/fake
k8s.io/client-go/listers
k8s.io/client-go/listers/admissionregistration/v1
k8s.io/client-go/listers/admissionregistration/v1alpha1
@@ -768,7 +790,7 @@ k8s.io/client-go/listers/scheduling/v1beta1
k8s.io/client-go/listers/storage/v1
k8s.io/client-go/listers/storage/v1alpha1
k8s.io/client-go/listers/storage/v1beta1
-k8s.io/client-go/listers/storagemigration/v1alpha1
+k8s.io/client-go/listers/storagemigration/v1beta1
k8s.io/client-go/metadata
k8s.io/client-go/openapi
k8s.io/client-go/openapi/cached
@@ -814,6 +836,7 @@ k8s.io/client-go/util/homedir
k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
+k8s.io/client-go/util/watchlist
k8s.io/client-go/util/workqueue
# k8s.io/klog/v2 v2.130.1
## explicit; go 1.18
@@ -824,8 +847,8 @@ k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
k8s.io/klog/v2/internal/sloghandler
-# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b
-## explicit; go 1.23
+# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912
+## explicit; go 1.23.0
k8s.io/kube-openapi/pkg/cached
k8s.io/kube-openapi/pkg/common
k8s.io/kube-openapi/pkg/handler3
@@ -833,14 +856,15 @@ k8s.io/kube-openapi/pkg/internal
k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json
k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/spec3
+k8s.io/kube-openapi/pkg/util
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/validation/spec
-# k8s.io/kubernetes v1.34.2
-## explicit; go 1.24.0
+# k8s.io/kubernetes v1.35.0
+## explicit; go 1.25.0
k8s.io/kubernetes/pkg/apis/core
k8s.io/kubernetes/pkg/apis/core/helper
k8s.io/kubernetes/pkg/util/taints
-# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
+# k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
## explicit; go 1.18
k8s.io/utils/buffer
k8s.io/utils/clock
@@ -850,7 +874,7 @@ k8s.io/utils/lru
k8s.io/utils/net
k8s.io/utils/ptr
k8s.io/utils/trace
-# sigs.k8s.io/controller-runtime v0.21.0
+# sigs.k8s.io/controller-runtime v0.22.4
## explicit; go 1.24.0
sigs.k8s.io/controller-runtime
sigs.k8s.io/controller-runtime/pkg/builder
@@ -904,8 +928,9 @@ sigs.k8s.io/controller-runtime/pkg/webhook
sigs.k8s.io/controller-runtime/pkg/webhook/admission
sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics
sigs.k8s.io/controller-runtime/pkg/webhook/conversion
+sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics
sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics
-# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8
+# sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730
## explicit; go 1.23
sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/.golangci.yml b/operator/vendor/sigs.k8s.io/controller-runtime/.golangci.yml
index 7390d202..1741432a 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/.golangci.yml
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/.golangci.yml
@@ -17,6 +17,7 @@ linters:
- errchkjson
- errorlint
- exhaustive
+ - forbidigo
- ginkgolinter
- goconst
- gocritic
@@ -39,6 +40,12 @@ linters:
- unused
- whitespace
settings:
+ forbidigo:
+ forbid:
+ - pattern: context.Background
+ msg: Use ginkgos SpecContext or go testings t.Context instead
+ - pattern: context.TODO
+ msg: Use ginkgos SpecContext or go testings t.Context instead
govet:
disable:
- fieldalignment
@@ -94,6 +101,9 @@ linters:
- zz_generated.*\.go$
- .*conversion.*\.go$
rules:
+ - linters:
+ - forbidigo
+ path-except: _test\.go
- linters:
- gosec
text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof'
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES b/operator/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES
index 5f5b2b66..47bf6eed 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/OWNERS_ALIASES
@@ -4,8 +4,10 @@ aliases:
# active folks who can be contacted to perform admin-related
# tasks on the repo, or otherwise approve any PRS.
controller-runtime-admins:
- - vincepri
+ - alvaroaleman
- joelanford
+ - sbueringer
+ - vincepri
# non-admin folks who have write-access and can approve any PRs in the repo
controller-runtime-maintainers:
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/README.md b/operator/vendor/sigs.k8s.io/controller-runtime/README.md
index 20f7fd81..54bacad4 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/README.md
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/README.md
@@ -25,9 +25,9 @@ The full documentation can be found at [VERSIONING.md](VERSIONING.md), but TL;DR
Users:
-- We follow [Semantic Versioning (semver)](https://semver.org)
-- Use releases with your dependency management to ensure that you get compatible code
-- The main branch contains all the latest code, some of which may break compatibility (so "normal" `go get` is not recommended)
+- We stick to a zero major version
+- We publish a minor version for each Kubernetes minor release and allow breaking changes between minor versions
+- We publish patch versions as needed and we don't allow breaking changes in them
Contributors:
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md b/operator/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md
index 2c0f2f9b..7ad6b142 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/VERSIONING.md
@@ -7,6 +7,16 @@ For the purposes of the aforementioned guidelines, controller-runtime
counts as a "library project", but otherwise follows the guidelines
exactly.
+We stick to a major version of zero and create a minor version for
+each Kubernetes minor version and we allow breaking changes in our
+minor versions. We create patch releases as needed and don't allow
+breaking changes in them.
+
+Publishing a non-zero major version is pointless for us, as the k8s.io/*
+libraries we heavily depend on do breaking changes but use the same
+versioning scheme as described above. Consequently, a project can only
+ever depend on one controller-runtime version.
+
[guidelines]: https://sigs.k8s.io/kubebuilder-release-tools/VERSIONING.md
## Compatibility and Release Support
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go
index 8ec6d58f..6263f030 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/builder/webhook.go
@@ -98,6 +98,7 @@ func (blder *WebhookBuilder) RecoverPanic(recoverPanic bool) *WebhookBuilder {
}
// WithCustomPath overrides the webhook's default path by the customPath
+//
// Deprecated: WithCustomPath should not be used anymore.
// Please use WithValidatorCustomPath or WithDefaulterCustomPath instead.
func (blder *WebhookBuilder) WithCustomPath(customPath string) *WebhookBuilder {
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
index 648d0d75..a94ec6cc 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go
@@ -172,6 +172,15 @@ type Options struct {
// is "done" with an object, and would otherwise not requeue it, i.e., we
// recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`,
// instead of `reconcile.Result{}`.
+ //
+ // SyncPeriod will locally trigger an artificial Update event with the same
+ // object in both ObjectOld and ObjectNew for everything that is in the
+ // cache.
+ //
+ // Predicates or Handlers that expect ObjectOld and ObjectNew to be different
+ // (such as GenerationChangedPredicate) will filter out this event, preventing
+ // it from triggering a reconciliation.
+ // SyncPeriod does not sync between the local cache and the server.
SyncPeriod *time.Duration
// ReaderFailOnMissingInformer configures the cache to return a ErrResourceNotCached error when a user
@@ -299,6 +308,42 @@ type ByObject struct {
//
// Defaults to true.
EnableWatchBookmarks *bool
+
+ // SyncPeriod determines the minimum frequency at which watched resources are
+ // reconciled. A lower period will correct entropy more quickly, but reduce
+ // responsiveness to change if there are many watched resources. Change this
+ // value only if you know what you are doing. Defaults to 10 hours if unset.
+ // there will a 10 percent jitter between the SyncPeriod of all controllers
+ // so that all controllers will not send list requests simultaneously.
+ //
+ // This applies to all controllers.
+ //
+ // A period sync happens for two reasons:
+ // 1. To insure against a bug in the controller that causes an object to not
+ // be requeued, when it otherwise should be requeued.
+ // 2. To insure against an unknown bug in controller-runtime, or its dependencies,
+ // that causes an object to not be requeued, when it otherwise should be
+ // requeued, or to be removed from the queue, when it otherwise should not
+ // be removed.
+ //
+ // If you want
+ // 1. to insure against missed watch events, or
+ // 2. to poll services that cannot be watched,
+ // then we recommend that, instead of changing the default period, the
+ // controller requeue, with a constant duration `t`, whenever the controller
+ // is "done" with an object, and would otherwise not requeue it, i.e., we
+ // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`,
+ // instead of `reconcile.Result{}`.
+ //
+ // SyncPeriod will locally trigger an artificial Update event with the same
+ // object in both ObjectOld and ObjectNew for everything that is in the
+ // cache.
+ //
+ // Predicates or Handlers that expect ObjectOld and ObjectNew to be different
+ // (such as GenerationChangedPredicate) will filter out this event, preventing
+ // it from triggering a reconciliation.
+ // SyncPeriod does not sync between the local cache and the server.
+ SyncPeriod *time.Duration
}
// Config describes all potential options for a given watch.
@@ -334,6 +379,42 @@ type Config struct {
//
// Defaults to true.
EnableWatchBookmarks *bool
+
+ // SyncPeriod determines the minimum frequency at which watched resources are
+ // reconciled. A lower period will correct entropy more quickly, but reduce
+ // responsiveness to change if there are many watched resources. Change this
+ // value only if you know what you are doing. Defaults to 10 hours if unset.
+ // there will a 10 percent jitter between the SyncPeriod of all controllers
+ // so that all controllers will not send list requests simultaneously.
+ //
+ // This applies to all controllers.
+ //
+ // A period sync happens for two reasons:
+ // 1. To insure against a bug in the controller that causes an object to not
+ // be requeued, when it otherwise should be requeued.
+ // 2. To insure against an unknown bug in controller-runtime, or its dependencies,
+ // that causes an object to not be requeued, when it otherwise should be
+ // requeued, or to be removed from the queue, when it otherwise should not
+ // be removed.
+ //
+ // If you want
+ // 1. to insure against missed watch events, or
+ // 2. to poll services that cannot be watched,
+ // then we recommend that, instead of changing the default period, the
+ // controller requeue, with a constant duration `t`, whenever the controller
+ // is "done" with an object, and would otherwise not requeue it, i.e., we
+ // recommend the `Reconcile` function return `reconcile.Result{RequeueAfter: t}`,
+ // instead of `reconcile.Result{}`.
+ //
+ // SyncPeriod will locally trigger an artificial Update event with the same
+ // object in both ObjectOld and ObjectNew for everything that is in the
+ // cache.
+ //
+ // Predicates or Handlers that expect ObjectOld and ObjectNew to be different
+ // (such as GenerationChangedPredicate) will filter out this event, preventing
+ // it from triggering a reconciliation.
+ // SyncPeriod does not sync between the local cache and the server.
+ SyncPeriod *time.Duration
}
// NewCacheFunc - Function for creating a new cache from the options and a rest config.
@@ -404,6 +485,7 @@ func optionDefaultsToConfig(opts *Options) Config {
Transform: opts.DefaultTransform,
UnsafeDisableDeepCopy: opts.DefaultUnsafeDisableDeepCopy,
EnableWatchBookmarks: opts.DefaultEnableWatchBookmarks,
+ SyncPeriod: opts.SyncPeriod,
}
}
@@ -414,6 +496,7 @@ func byObjectToConfig(byObject ByObject) Config {
Transform: byObject.Transform,
UnsafeDisableDeepCopy: byObject.UnsafeDisableDeepCopy,
EnableWatchBookmarks: byObject.EnableWatchBookmarks,
+ SyncPeriod: byObject.SyncPeriod,
}
}
@@ -427,7 +510,7 @@ func newCache(restConfig *rest.Config, opts Options) newCacheFunc {
HTTPClient: opts.HTTPClient,
Scheme: opts.Scheme,
Mapper: opts.Mapper,
- ResyncPeriod: *opts.SyncPeriod,
+ ResyncPeriod: ptr.Deref(config.SyncPeriod, defaultSyncPeriod),
Namespace: namespace,
Selector: internal.Selector{
Label: config.LabelSelector,
@@ -525,6 +608,7 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
byObject.Transform = defaultedConfig.Transform
byObject.UnsafeDisableDeepCopy = defaultedConfig.UnsafeDisableDeepCopy
byObject.EnableWatchBookmarks = defaultedConfig.EnableWatchBookmarks
+ byObject.SyncPeriod = defaultedConfig.SyncPeriod
}
opts.ByObject[obj] = byObject
@@ -546,10 +630,6 @@ func defaultOpts(config *rest.Config, opts Options) (Options, error) {
opts.DefaultNamespaces[namespace] = cfg
}
- // Default the resync period to 10 hours if unset
- if opts.SyncPeriod == nil {
- opts.SyncPeriod = &defaultSyncPeriod
- }
return opts, nil
}
@@ -569,6 +649,9 @@ func defaultConfig(toDefault, defaultFrom Config) Config {
if toDefault.EnableWatchBookmarks == nil {
toDefault.EnableWatchBookmarks = defaultFrom.EnableWatchBookmarks
}
+ if toDefault.SyncPeriod == nil {
+ toDefault.SyncPeriod = defaultFrom.SyncPeriod
+ }
return toDefault
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
index 33ce8a83..eb6b5448 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go
@@ -54,7 +54,10 @@ type CacheReader struct {
}
// Get checks the indexer for the object and writes a copy of it if found.
-func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, _ ...client.GetOption) error {
+func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error {
+ getOpts := client.GetOptions{}
+ getOpts.ApplyOptions(opts)
+
if c.scopeName == apimeta.RESTScopeNameRoot {
key.Namespace = ""
}
@@ -81,7 +84,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob
return fmt.Errorf("cache contained %T, which is not an Object", obj)
}
- if c.disableDeepCopy {
+ if c.disableDeepCopy || (getOpts.UnsafeDisableDeepCopy != nil && *getOpts.UnsafeDisableDeepCopy) {
// skip deep copy which might be unsafe
// you must DeepCopy any object before mutating it outside
} else {
@@ -97,7 +100,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob
return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type())
}
reflect.Indirect(outVal).Set(reflect.Indirect(objVal))
- if !c.disableDeepCopy {
+ if !c.disableDeepCopy && (getOpts.UnsafeDisableDeepCopy == nil || !*getOpts.UnsafeDisableDeepCopy) {
out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind)
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
index 4bf832b2..f216be0d 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers.go
@@ -518,7 +518,7 @@ func (ip *Informers) makeListWatcher(gvk schema.GroupVersionKind, obj runtime.Ob
// Structured.
//
default:
- client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs, ip.httpClient)
+ client, err := apiutil.RESTClientForGVK(gvk, false, false, ip.config, ip.codecs, ip.httpClient)
if err != nil {
return nil, err
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go
index c3232409..2362d020 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/certwatcher/certwatcher.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/fsnotify/fsnotify"
+ "github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
@@ -47,6 +48,7 @@ type CertWatcher struct {
currentCert *tls.Certificate
watcher *fsnotify.Watcher
interval time.Duration
+ log logr.Logger
certPath string
keyPath string
@@ -65,6 +67,7 @@ func New(certPath, keyPath string) (*CertWatcher, error) {
certPath: certPath,
keyPath: keyPath,
interval: defaultWatchInterval,
+ log: log.WithValues("cert", certPath, "key", keyPath),
}
// Initial read of certificate and key.
@@ -130,14 +133,14 @@ func (cw *CertWatcher) Start(ctx context.Context) error {
ticker := time.NewTicker(cw.interval)
defer ticker.Stop()
- log.Info("Starting certificate poll+watcher", "interval", cw.interval)
+ cw.log.Info("Starting certificate poll+watcher", "interval", cw.interval)
for {
select {
case <-ctx.Done():
return cw.watcher.Close()
case <-ticker.C:
if err := cw.ReadCertificate(); err != nil {
- log.Error(err, "failed read certificate")
+ cw.log.Error(err, "failed read certificate")
}
}
}
@@ -160,7 +163,7 @@ func (cw *CertWatcher) Watch() {
return
}
- log.Error(err, "certificate watch error")
+ cw.log.Error(err, "certificate watch error")
}
}
}
@@ -174,7 +177,7 @@ func (cw *CertWatcher) updateCachedCertificate(cert *tls.Certificate, keyPEMBloc
if cw.currentCert != nil &&
bytes.Equal(cw.currentCert.Certificate[0], cert.Certificate[0]) &&
bytes.Equal(cw.cachedKeyPEMBlock, keyPEMBlock) {
- log.V(7).Info("certificate already cached")
+ cw.log.V(7).Info("certificate already cached")
return false
}
cw.currentCert = cert
@@ -208,7 +211,7 @@ func (cw *CertWatcher) ReadCertificate() error {
return nil
}
- log.Info("Updated current TLS certificate")
+ cw.log.Info("Updated current TLS certificate")
// If a callback is registered, invoke it with the new certificate.
cw.RLock()
@@ -229,14 +232,20 @@ func (cw *CertWatcher) handleEvent(event fsnotify.Event) {
case event.Op.Has(fsnotify.Chmod), event.Op.Has(fsnotify.Remove):
// If the file was removed or renamed, re-add the watch to the previous name
if err := cw.watcher.Add(event.Name); err != nil {
- log.Error(err, "error re-watching file")
+ cw.log.Error(err, "error re-watching file")
}
default:
return
}
- log.V(1).Info("certificate event", "event", event)
+ cw.log.V(1).Info("certificate event", "event", event)
if err := cw.ReadCertificate(); err != nil {
- log.Error(err, "error re-reading certificate")
+ cw.log.Error(err, "error re-reading certificate")
}
}
+
+// NeedLeaderElection indicates that the cert-manager
+// does not need leader election.
+func (cw *CertWatcher) NeedLeaderElection() bool {
+ return false
+}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
index 1d4ce264..b132cb2d 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go
@@ -161,15 +161,27 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi
// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated
// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from
// baseConfig, if set, otherwise a default serializer will be set.
-func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, httpClient *http.Client) (rest.Interface, error) {
+func RESTClientForGVK(
+ gvk schema.GroupVersionKind,
+ forceDisableProtoBuf bool,
+ isUnstructured bool,
+ baseConfig *rest.Config,
+ codecs serializer.CodecFactory,
+ httpClient *http.Client,
+) (rest.Interface, error) {
if httpClient == nil {
return nil, fmt.Errorf("httpClient must not be nil, consider using rest.HTTPClientFor(c) to create a client")
}
- return rest.RESTClientForConfigAndClient(createRestConfig(gvk, isUnstructured, baseConfig, codecs), httpClient)
+ return rest.RESTClientForConfigAndClient(createRestConfig(gvk, forceDisableProtoBuf, isUnstructured, baseConfig, codecs), httpClient)
}
// createRestConfig copies the base config and updates needed fields for a new rest config.
-func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config {
+func createRestConfig(gvk schema.GroupVersionKind,
+ forceDisableProtoBuf bool,
+ isUnstructured bool,
+ baseConfig *rest.Config,
+ codecs serializer.CodecFactory,
+) *rest.Config {
gv := gvk.GroupVersion()
cfg := rest.CopyConfig(baseConfig)
@@ -183,7 +195,7 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf
cfg.UserAgent = rest.DefaultKubernetesUserAgent()
}
// TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true.
- if cfg.ContentType == "" && !isUnstructured {
+ if cfg.ContentType == "" && !forceDisableProtoBuf {
protobufSchemeLock.RLock()
if protobufScheme.Recognizes(gvk) {
cfg.ContentType = runtime.ContentTypeProtobuf
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/applyconfigurations.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/applyconfigurations.go
new file mode 100644
index 00000000..97192050
--- /dev/null
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/applyconfigurations.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/utils/ptr"
+)
+
+type unstructuredApplyConfiguration struct {
+ *unstructured.Unstructured
+}
+
+func (u *unstructuredApplyConfiguration) IsApplyConfiguration() {}
+
+// ApplyConfigurationFromUnstructured creates a runtime.ApplyConfiguration from an *unstructured.Unstructured object.
+//
+// Do not use Unstructured objects here that were generated from API objects, as its impossible to tell
+// if a zero value was explicitly set.
+func ApplyConfigurationFromUnstructured(u *unstructured.Unstructured) runtime.ApplyConfiguration {
+ return &unstructuredApplyConfiguration{Unstructured: u}
+}
+
+type applyconfigurationRuntimeObject struct {
+ runtime.ApplyConfiguration
+}
+
+func (a *applyconfigurationRuntimeObject) GetObjectKind() schema.ObjectKind {
+ return a
+}
+
+func (a *applyconfigurationRuntimeObject) GroupVersionKind() schema.GroupVersionKind {
+ return schema.GroupVersionKind{}
+}
+
+func (a *applyconfigurationRuntimeObject) SetGroupVersionKind(gvk schema.GroupVersionKind) {}
+
+func (a *applyconfigurationRuntimeObject) DeepCopyObject() runtime.Object {
+ panic("applyconfigurationRuntimeObject does not support DeepCopyObject")
+}
+
+func runtimeObjectFromApplyConfiguration(ac runtime.ApplyConfiguration) runtime.Object {
+ return &applyconfigurationRuntimeObject{ApplyConfiguration: ac}
+}
+
+func gvkFromApplyConfiguration(ac applyConfiguration) (schema.GroupVersionKind, error) {
+ var gvk schema.GroupVersionKind
+ gv, err := schema.ParseGroupVersion(ptr.Deref(ac.GetAPIVersion(), ""))
+ if err != nil {
+ return gvk, fmt.Errorf("failed to parse %q as GroupVersion: %w", ptr.Deref(ac.GetAPIVersion(), ""), err)
+ }
+ gvk.Group = gv.Group
+ gvk.Version = gv.Version
+ gvk.Kind = ptr.Deref(ac.GetKind(), "")
+
+ return gvk, nil
+}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
index 50b0ebf3..e9f73145 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go
@@ -151,8 +151,7 @@ func newClient(config *rest.Config, options Options) (*client, error) {
mapper: options.Mapper,
codecs: serializer.NewCodecFactory(options.Scheme),
- structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
- unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
+ resourceByType: make(map[cacheKey]*resourceMeta),
}
rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient)
@@ -329,6 +328,16 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat
}
}
+func (c *client) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ switch obj := obj.(type) {
+ case *unstructuredApplyConfiguration:
+ defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind())
+ return c.unstructuredClient.Apply(ctx, obj, opts...)
+ default:
+ return c.typedClient.Apply(ctx, obj, opts...)
+ }
+}
+
// Get implements client.Client.
func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
if isUncached, err := c.shouldBypassCache(obj); err != nil {
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go
index 2d078795..d75d685c 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_rest_resources.go
@@ -17,16 +17,17 @@ limitations under the License.
package client
import (
+ "fmt"
"net/http"
"strings"
"sync"
"k8s.io/apimachinery/pkg/api/meta"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/client-go/rest"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
@@ -47,22 +48,30 @@ type clientRestResources struct {
// codecs are used to create a REST client for a gvk
codecs serializer.CodecFactory
- // structuredResourceByType stores structured type metadata
- structuredResourceByType map[schema.GroupVersionKind]*resourceMeta
- // unstructuredResourceByType stores unstructured type metadata
- unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta
- mu sync.RWMutex
+ // resourceByType stores type metadata
+ resourceByType map[cacheKey]*resourceMeta
+
+ mu sync.RWMutex
+}
+
+type cacheKey struct {
+ gvk schema.GroupVersionKind
+ forceDisableProtoBuf bool
}
// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource.
// If the object is a list, the resource represents the item's type instead.
-func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) {
+func (c *clientRestResources) newResource(gvk schema.GroupVersionKind,
+ isList bool,
+ forceDisableProtoBuf bool,
+ isUnstructured bool,
+) (*resourceMeta, error) {
if strings.HasSuffix(gvk.Kind, "List") && isList {
// if this was a list, treat it as a request for the item's resource
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
}
- client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs, c.httpClient)
+ client, err := apiutil.RESTClientForGVK(gvk, forceDisableProtoBuf, isUnstructured, c.config, c.codecs, c.httpClient)
if err != nil {
return nil, err
}
@@ -73,52 +82,96 @@ func (c *clientRestResources) newResource(gvk schema.GroupVersionKind, isList, i
return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil
}
+type applyConfiguration interface {
+ GetName() *string
+ GetNamespace() *string
+ GetKind() *string
+ GetAPIVersion() *string
+}
+
// getResource returns the resource meta information for the given type of object.
// If the object is a list, the resource represents the item's type instead.
-func (c *clientRestResources) getResource(obj runtime.Object) (*resourceMeta, error) {
- gvk, err := apiutil.GVKForObject(obj, c.scheme)
- if err != nil {
- return nil, err
+func (c *clientRestResources) getResource(obj any) (*resourceMeta, error) {
+ var gvk schema.GroupVersionKind
+ var err error
+ var isApplyConfiguration bool
+ switch o := obj.(type) {
+ case runtime.Object:
+ gvk, err = apiutil.GVKForObject(o, c.scheme)
+ if err != nil {
+ return nil, err
+ }
+ case runtime.ApplyConfiguration:
+ ac, ok := o.(applyConfiguration)
+ if !ok {
+ return nil, fmt.Errorf("%T is a runtime.ApplyConfiguration but not an applyConfiguration", o)
+ }
+ gvk, err = gvkFromApplyConfiguration(ac)
+ if err != nil {
+ return nil, err
+ }
+ isApplyConfiguration = true
+ default:
+ return nil, fmt.Errorf("bug: %T is neither a runtime.Object nor a runtime.ApplyConfiguration", o)
}
_, isUnstructured := obj.(runtime.Unstructured)
+ forceDisableProtoBuf := isUnstructured || isApplyConfiguration
// It's better to do creation work twice than to not let multiple
// people make requests at once
c.mu.RLock()
- resourceByType := c.structuredResourceByType
- if isUnstructured {
- resourceByType = c.unstructuredResourceByType
- }
- r, known := resourceByType[gvk]
+
+ cacheKey := cacheKey{gvk: gvk, forceDisableProtoBuf: forceDisableProtoBuf}
+
+ r, known := c.resourceByType[cacheKey]
+
c.mu.RUnlock()
if known {
return r, nil
}
+ var isList bool
+ if runtimeObject, ok := obj.(runtime.Object); ok && meta.IsListType(runtimeObject) {
+ isList = true
+ }
+
// Initialize a new Client
c.mu.Lock()
defer c.mu.Unlock()
- r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured)
+ r, err = c.newResource(gvk, isList, forceDisableProtoBuf, isUnstructured)
if err != nil {
return nil, err
}
- resourceByType[gvk] = r
+ c.resourceByType[cacheKey] = r
return r, err
}
// getObjMeta returns objMeta containing both type and object metadata and state.
-func (c *clientRestResources) getObjMeta(obj runtime.Object) (*objMeta, error) {
+func (c *clientRestResources) getObjMeta(obj any) (*objMeta, error) {
r, err := c.getResource(obj)
if err != nil {
return nil, err
}
- m, err := meta.Accessor(obj)
- if err != nil {
- return nil, err
+ objMeta := &objMeta{resourceMeta: r}
+
+ switch o := obj.(type) {
+ case runtime.Object:
+ m, err := meta.Accessor(obj)
+ if err != nil {
+ return nil, err
+ }
+ objMeta.namespace = m.GetNamespace()
+ objMeta.name = m.GetName()
+ case applyConfiguration:
+ objMeta.namespace = ptr.Deref(o.GetNamespace(), "")
+ objMeta.name = ptr.Deref(o.GetName(), "")
+ default:
+ return nil, fmt.Errorf("object %T is neither a runtime.Object nor a runtime.ApplyConfiguration", obj)
}
- return &objMeta{resourceMeta: r, Object: m}, err
+
+ return objMeta, nil
}
// resourceMeta stores state for a Kubernetes type.
@@ -146,6 +199,6 @@ type objMeta struct {
// resourceMeta contains type information for the object
*resourceMeta
- // Object contains meta data for the object instance
- metav1.Object
+ namespace string
+ name string
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
index bbcdd383..a185860d 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go
@@ -82,6 +82,10 @@ func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts
return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...)
}
+func (c *dryRunClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ return c.client.Apply(ctx, obj, append(opts, DryRunAll)...)
+}
+
// Get implements client.Client.
func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
return c.client.Get(ctx, key, obj, opts...)
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go
index 16e2cba5..f88a44ed 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go
@@ -41,6 +41,7 @@ import (
https://github.com/kubernetes/kubernetes/pull/120326 (v5.6.0+incompatible
missing a critical fix)
*/
+
jsonpatch "gopkg.in/evanphx/json-patch.v4"
appsv1 "k8s.io/api/apps/v1"
authenticationv1 "k8s.io/api/authentication/v1"
@@ -52,17 +53,21 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/apimachinery/pkg/util/managedfields"
utilrand "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/watch"
+ clientgoapplyconfigurations "k8s.io/client-go/applyconfigurations"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/testing"
"k8s.io/utils/ptr"
@@ -76,8 +81,9 @@ import (
type versionedTracker struct {
testing.ObjectTracker
- scheme *runtime.Scheme
- withStatusSubresource sets.Set[schema.GroupVersionKind]
+ scheme *runtime.Scheme
+ withStatusSubresource sets.Set[schema.GroupVersionKind]
+ usesFieldManagedObjectTracker bool
}
type fakeClient struct {
@@ -98,6 +104,8 @@ type fakeClient struct {
indexes map[schema.GroupVersionKind]map[string]client.IndexerFunc
// indexesLock must be held when accessing indexes.
indexesLock sync.RWMutex
+
+ returnManagedFields bool
}
var _ client.WithWatch = &fakeClient{}
@@ -131,6 +139,9 @@ type ClientBuilder struct {
withStatusSubresource []client.Object
objectTracker testing.ObjectTracker
interceptorFuncs *interceptor.Funcs
+ typeConverters []managedfields.TypeConverter
+ returnManagedFields bool
+ isBuilt bool
// indexes maps each GroupVersionKind (GVK) to the indexes registered for that GVK.
// The inner map maps from index name to IndexerFunc.
@@ -172,6 +183,8 @@ func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *C
}
// WithObjectTracker can be optionally used to initialize this fake client with testing.ObjectTracker.
+// Setting this is incompatible with setting WithTypeConverters, as they are a setting on the
+// tracker.
func (f *ClientBuilder) WithObjectTracker(ot testing.ObjectTracker) *ClientBuilder {
f.objectTracker = ot
return f
@@ -228,8 +241,36 @@ func (f *ClientBuilder) WithInterceptorFuncs(interceptorFuncs interceptor.Funcs)
return f
}
+// WithTypeConverters sets the type converters for the fake client. The list is ordered and the first
+// non-erroring converter is used. A type converter must be provided for all types the client is used
+// for, otherwise it will error.
+//
+// This setting is incompatible with WithObjectTracker, as the type converters are a setting on the tracker.
+//
+// If unset, this defaults to:
+// * clientgoapplyconfigurations.NewTypeConverter(scheme.Scheme),
+// * managedfields.NewDeducedTypeConverter(),
+//
+// Be aware that the behavior of the `NewDeducedTypeConverter` might not match the behavior of the
+// Kubernetes APIServer, it is recommended to provide a type converter for your types. TypeConverters
+// are generated along with ApplyConfigurations.
+func (f *ClientBuilder) WithTypeConverters(typeConverters ...managedfields.TypeConverter) *ClientBuilder {
+ f.typeConverters = append(f.typeConverters, typeConverters...)
+ return f
+}
+
+// WithReturnManagedFields configures the fake client to return managedFields
+// on objects.
+func (f *ClientBuilder) WithReturnManagedFields() *ClientBuilder {
+ f.returnManagedFields = true
+ return f
+}
+
// Build builds and returns a new fake client.
func (f *ClientBuilder) Build() client.WithWatch {
+ if f.isBuilt {
+ panic("Build() must not be called multiple times when creating a ClientBuilder")
+ }
if f.scheme == nil {
f.scheme = scheme.Scheme
}
@@ -237,8 +278,6 @@ func (f *ClientBuilder) Build() client.WithWatch {
f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{})
}
- var tracker versionedTracker
-
withStatusSubResource := sets.New(inTreeResourcesWithStatus()...)
for _, o := range f.withStatusSubresource {
gvk, err := apiutil.GVKForObject(o, f.scheme)
@@ -248,10 +287,36 @@ func (f *ClientBuilder) Build() client.WithWatch {
withStatusSubResource.Insert(gvk)
}
+ if f.objectTracker != nil && len(f.typeConverters) > 0 {
+ panic(errors.New("WithObjectTracker and WithTypeConverters are incompatible"))
+ }
+
+ var usesFieldManagedObjectTracker bool
if f.objectTracker == nil {
- tracker = versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme, withStatusSubresource: withStatusSubResource}
- } else {
- tracker = versionedTracker{ObjectTracker: f.objectTracker, scheme: f.scheme, withStatusSubresource: withStatusSubResource}
+ if len(f.typeConverters) == 0 {
+ // Use corresponding scheme to ensure the converter error
+ // for types it can't handle.
+ clientGoScheme := runtime.NewScheme()
+ if err := scheme.AddToScheme(clientGoScheme); err != nil {
+ panic(fmt.Sprintf("failed to construct client-go scheme: %v", err))
+ }
+ f.typeConverters = []managedfields.TypeConverter{
+ clientgoapplyconfigurations.NewTypeConverter(clientGoScheme),
+ managedfields.NewDeducedTypeConverter(),
+ }
+ }
+ f.objectTracker = testing.NewFieldManagedObjectTracker(
+ f.scheme,
+ serializer.NewCodecFactory(f.scheme).UniversalDecoder(),
+ multiTypeConverter{upstream: f.typeConverters},
+ )
+ usesFieldManagedObjectTracker = true
+ }
+ tracker := versionedTracker{
+ ObjectTracker: f.objectTracker,
+ scheme: f.scheme,
+ withStatusSubresource: withStatusSubResource,
+ usesFieldManagedObjectTracker: usesFieldManagedObjectTracker,
}
for _, obj := range f.initObject {
@@ -276,12 +341,14 @@ func (f *ClientBuilder) Build() client.WithWatch {
restMapper: f.restMapper,
indexes: f.indexes,
withStatusSubresource: withStatusSubResource,
+ returnManagedFields: f.returnManagedFields,
}
if f.interceptorFuncs != nil {
result = interceptor.NewClient(result, *f.interceptorFuncs)
}
+ f.isBuilt = true
return result
}
@@ -318,6 +385,16 @@ func (t versionedTracker) Add(obj runtime.Object) error {
if err != nil {
return err
}
+
+ // If the fieldManager can not decode fields, it will just silently clear them. This is pretty
+ // much guaranteed not to be what someone that initializes a fake client with objects that
+ // have them set wants, so validate them here.
+ // Ref https://github.com/kubernetes/kubernetes/blob/a956ef4862993b825bcd524a19260192ff1da72d/staging/src/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go#L105
+ if t.usesFieldManagedObjectTracker {
+ if err := managedfields.ValidateManagedFields(accessor.GetManagedFields()); err != nil {
+ return fmt.Errorf("invalid managedFields on %T: %w", obj, err)
+ }
+ }
if err := t.ObjectTracker.Add(obj); err != nil {
return err
}
@@ -332,8 +409,9 @@ func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Ob
return fmt.Errorf("failed to get accessor for object: %w", err)
}
if accessor.GetName() == "" {
+ gvk, _ := apiutil.GVKForObject(obj, t.scheme)
return apierrors.NewInvalid(
- obj.GetObjectKind().GroupVersionKind().GroupKind(),
+ gvk.GroupKind(),
accessor.GetName(),
field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")})
}
@@ -372,6 +450,9 @@ func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (ru
if err != nil {
return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", gvk, err)
}
+ if _, isTypedUnstructured := typed.(runtime.Unstructured); isTypedUnstructured {
+ return o, nil
+ }
unstructuredSerialized, err := json.Marshal(u)
if err != nil {
@@ -394,7 +475,11 @@ func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Ob
}
func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Object, ns string, isStatus, deleting bool, opts metav1.UpdateOptions) error {
- obj, err := t.updateObject(gvr, obj, ns, isStatus, deleting, opts.DryRun)
+ gvk, err := apiutil.GVKForObject(obj, t.scheme)
+ if err != nil {
+ return err
+ }
+ obj, err = t.updateObject(gvr, obj, ns, isStatus, deleting, opts.DryRun)
if err != nil {
return err
}
@@ -402,6 +487,10 @@ func (t versionedTracker) update(gvr schema.GroupVersionResource, obj runtime.Ob
return nil
}
+ if u, unstructured := obj.(*unstructured.Unstructured); unstructured {
+ u.SetGroupVersionKind(gvk)
+ }
+
return t.ObjectTracker.Update(gvr, obj, ns, opts)
}
@@ -433,8 +522,9 @@ func (t versionedTracker) updateObject(gvr schema.GroupVersionResource, obj runt
}
if accessor.GetName() == "" {
+ gvk, _ := apiutil.GVKForObject(obj, t.scheme)
return nil, apierrors.NewInvalid(
- obj.GetObjectKind().GroupVersionKind().GroupKind(),
+ gvk.GroupKind(),
accessor.GetName(),
field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")})
}
@@ -521,42 +611,60 @@ func (t versionedTracker) updateObject(gvr schema.GroupVersionResource, obj runt
}
func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
c.schemeLock.RLock()
defer c.schemeLock.RUnlock()
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
}
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
o, err := c.tracker.Get(gvr, key.Namespace, key.Name)
if err != nil {
return err
}
- _, isUnstructured := obj.(runtime.Unstructured)
- _, isPartialObject := obj.(*metav1.PartialObjectMetadata)
-
- if isUnstructured || isPartialObject {
- gvk, err := apiutil.GVKForObject(obj, c.scheme)
- if err != nil {
- return err
- }
- ta, err := meta.TypeAccessor(o)
- if err != nil {
- return err
- }
- ta.SetKind(gvk.Kind)
- ta.SetAPIVersion(gvk.GroupVersion().String())
+ ta, err := meta.TypeAccessor(o)
+ if err != nil {
+ return err
}
+ // If the final object is unstructuctured, the json
+ // representation must contain GVK or the apimachinery
+ // json serializer will error out.
+ ta.SetAPIVersion(gvk.GroupVersion().String())
+ ta.SetKind(gvk.Kind)
+
j, err := json.Marshal(o)
if err != nil {
return err
}
zero(obj)
- return json.Unmarshal(j, obj)
+ if err := json.Unmarshal(j, obj); err != nil {
+ return err
+ }
+
+ if !c.returnManagedFields {
+ obj.SetManagedFields(nil)
+ }
+
+ return ensureTypeMeta(obj, gvk)
}
func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(list); err != nil {
+ return nil, err
+ }
+
+ c.schemeLock.RLock()
+ defer c.schemeLock.RUnlock()
+
gvk, err := apiutil.GVKForObject(list, c.scheme)
if err != nil {
return nil, err
@@ -572,6 +680,10 @@ func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...
}
func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
c.schemeLock.RLock()
defer c.schemeLock.RUnlock()
gvk, err := apiutil.GVKForObject(obj, c.scheme)
@@ -579,11 +691,12 @@ func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...cl
return err
}
- originalKind := gvk.Kind
-
+ originalGVK := gvk
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+ listGVK := gvk
+ listGVK.Kind += "List"
- if _, isUnstructuredList := obj.(runtime.Unstructured); isUnstructuredList && !c.scheme.Recognizes(gvk) {
+ if _, isUnstructuredList := obj.(runtime.Unstructured); isUnstructuredList && !c.scheme.Recognizes(listGVK) {
// We need to register the ListKind with UnstructuredList:
// https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51
c.schemeLock.RUnlock()
@@ -602,39 +715,34 @@ func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...cl
return err
}
- if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured {
- ta, err := meta.TypeAccessor(o)
- if err != nil {
- return err
- }
- ta.SetKind(originalKind)
- ta.SetAPIVersion(gvk.GroupVersion().String())
- }
-
j, err := json.Marshal(o)
if err != nil {
return err
}
zero(obj)
+ if err := ensureTypeMeta(obj, originalGVK); err != nil {
+ return err
+ }
objCopy := obj.DeepCopyObject().(client.ObjectList)
if err := json.Unmarshal(j, objCopy); err != nil {
return err
}
- if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured {
- ta, err := meta.TypeAccessor(obj)
- if err != nil {
- return err
- }
- ta.SetKind(originalKind)
- ta.SetAPIVersion(gvk.GroupVersion().String())
- }
-
objs, err := meta.ExtractList(objCopy)
if err != nil {
return err
}
+ for _, o := range objs {
+ if err := ensureTypeMeta(o, gvk); err != nil {
+ return err
+ }
+
+ if !c.returnManagedFields {
+ o.(metav1.Object).SetManagedFields(nil)
+ }
+ }
+
if listOpts.LabelSelector == nil && listOpts.FieldSelector == nil {
return meta.SetList(obj, objs)
}
@@ -741,8 +849,13 @@ func (c *fakeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) {
}
func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
c.schemeLock.RLock()
defer c.schemeLock.RUnlock()
+
createOptions := &client.CreateOptions{}
createOptions.ApplyOptions(opts)
@@ -773,14 +886,35 @@ func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...clie
accessor.SetDeletionTimestamp(nil)
}
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+
c.trackerWriteLock.Lock()
defer c.trackerWriteLock.Unlock()
- return c.tracker.Create(gvr, obj, accessor.GetNamespace())
+
+ if err := c.tracker.Create(gvr, obj, accessor.GetNamespace(), *createOptions.AsCreateOptions()); err != nil {
+ // The managed fields tracker sets gvk even on errors
+ _ = ensureTypeMeta(obj, gvk)
+ return err
+ }
+
+ if !c.returnManagedFields {
+ obj.SetManagedFields(nil)
+ }
+
+ return ensureTypeMeta(obj, gvk)
}
func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
c.schemeLock.RLock()
defer c.schemeLock.RUnlock()
+
gvr, err := getGVRFromObject(obj, c.scheme)
if err != nil {
return err
@@ -826,8 +960,13 @@ func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...clie
}
func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
c.schemeLock.RLock()
defer c.schemeLock.RUnlock()
+
gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return err
@@ -877,8 +1016,13 @@ func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...clie
}
func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.UpdateOption) error {
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
c.schemeLock.RLock()
defer c.schemeLock.RUnlock()
+
updateOptions := &client.UpdateOptions{}
updateOptions.ApplyOptions(opts)
@@ -892,6 +1036,10 @@ func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.Upd
if err != nil {
return err
}
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
accessor, err := meta.Accessor(obj)
if err != nil {
return err
@@ -899,19 +1047,100 @@ func (c *fakeClient) update(obj client.Object, isStatus bool, opts ...client.Upd
c.trackerWriteLock.Lock()
defer c.trackerWriteLock.Unlock()
- return c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions())
+
+ // Retain managed fields
+ // We can ignore all errors here since update will fail if we encounter an error.
+ obj.SetManagedFields(nil)
+ current, _ := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName())
+ if currentMetaObj, ok := current.(metav1.Object); ok {
+ obj.SetManagedFields(currentMetaObj.GetManagedFields())
+ }
+
+ if err := c.tracker.update(gvr, obj, accessor.GetNamespace(), isStatus, false, *updateOptions.AsUpdateOptions()); err != nil {
+ return err
+ }
+
+ if !c.returnManagedFields {
+ obj.SetManagedFields(nil)
+ }
+
+ return ensureTypeMeta(obj, gvk)
}
func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
return c.patch(obj, patch, opts...)
}
+func (c *fakeClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error {
+ applyOpts := &client.ApplyOptions{}
+ applyOpts.ApplyOptions(opts)
+
+ data, err := json.Marshal(obj)
+ if err != nil {
+ return fmt.Errorf("failed to marshal apply configuration: %w", err)
+ }
+
+ u := &unstructured.Unstructured{}
+ if err := json.Unmarshal(data, u); err != nil {
+ return fmt.Errorf("failed to unmarshal apply configuration: %w", err)
+ }
+
+ applyPatch := &fakeApplyPatch{}
+
+ patchOpts := &client.PatchOptions{}
+ patchOpts.Raw = applyOpts.AsPatchOptions()
+
+ if err := c.patch(u, applyPatch, patchOpts); err != nil {
+ return err
+ }
+
+ acJSON, err := json.Marshal(u)
+ if err != nil {
+ return fmt.Errorf("failed to marshal patched object: %w", err)
+ }
+
+ // We have to zero the object in case it contained a status and there is a
+ // status subresource. If its the private `unstructuredApplyConfiguration`
+ // we can not zero all of it, as that will cause the embedded Unstructured
+ // to be nil which then causes a NPD in the json.Unmarshal below.
+ switch reflect.TypeOf(obj).String() {
+ case "*client.unstructuredApplyConfiguration":
+ zero(reflect.ValueOf(obj).Elem().FieldByName("Unstructured").Interface())
+ default:
+ zero(obj)
+ }
+ if err := json.Unmarshal(acJSON, obj); err != nil {
+ return fmt.Errorf("failed to unmarshal patched object: %w", err)
+ }
+
+ return nil
+}
+
+type fakeApplyPatch struct{}
+
+func (p *fakeApplyPatch) Type() types.PatchType {
+ return types.ApplyPatchType
+}
+
+func (p *fakeApplyPatch) Data(obj client.Object) ([]byte, error) {
+ return json.Marshal(obj)
+}
+
func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
- c.schemeLock.RLock()
- defer c.schemeLock.RUnlock()
+ if err := c.addToSchemeIfUnknownAndUnstructuredOrPartial(obj); err != nil {
+ return err
+ }
+
patchOptions := &client.PatchOptions{}
patchOptions.ApplyOptions(opts)
+ if errs := validation.ValidatePatchOptions(patchOptions.AsPatchOptions(), patch.Type()); len(errs) > 0 {
+ return apierrors.NewInvalid(schema.GroupKind{Group: "meta.k8s.io", Kind: "PatchOptions"}, "", errs)
+ }
+
+ c.schemeLock.RLock()
+ defer c.schemeLock.RUnlock()
+
for _, dryRunOpt := range patchOptions.DryRun {
if dryRunOpt == metav1.DryRunAll {
return nil
@@ -922,51 +1151,77 @@ func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client
if err != nil {
return err
}
- accessor, err := meta.Accessor(obj)
- if err != nil {
- return err
- }
- data, err := patch.Data(obj)
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
if err != nil {
return err
}
-
- gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ accessor, err := meta.Accessor(obj)
if err != nil {
return err
}
+ var isApplyCreate bool
c.trackerWriteLock.Lock()
defer c.trackerWriteLock.Unlock()
oldObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName())
if err != nil {
- return err
+ if !apierrors.IsNotFound(err) || patch.Type() != types.ApplyPatchType {
+ return err
+ }
+ oldObj = &unstructured.Unstructured{}
+ isApplyCreate = true
}
oldAccessor, err := meta.Accessor(oldObj)
if err != nil {
return err
}
- // Apply patch without updating object.
- // To remain in accordance with the behavior of k8s api behavior,
- // a patch must not allow for changes to the deletionTimestamp of an object.
- // The reaction() function applies the patch to the object and calls Update(),
- // whereas dryPatch() replicates this behavior but skips the call to Update().
- // This ensures that the patch may be rejected if a deletionTimestamp is modified, prior
- // to updating the object.
- action := testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)
- o, err := dryPatch(action, c.tracker)
- if err != nil {
- return err
+ // SSA deletionTimestamp updates are silently ignored
+ if patch.Type() == types.ApplyPatchType && !isApplyCreate {
+ obj.SetDeletionTimestamp(oldAccessor.GetDeletionTimestamp())
}
- newObj, err := meta.Accessor(o)
+
+ data, err := patch.Data(obj)
if err != nil {
return err
}
- // Validate that deletionTimestamp has not been changed
- if !deletionTimestampEqual(newObj, oldAccessor) {
- return fmt.Errorf("rejected patch, metadata.deletionTimestamp immutable")
+ action := testing.NewPatchActionWithOptions(
+ gvr,
+ accessor.GetNamespace(),
+ accessor.GetName(),
+ patch.Type(),
+ data,
+ *patchOptions.AsPatchOptions(),
+ )
+
+ // Apply is implemented in the tracker and calling it has side-effects
+ // such as bumping RV and updating managedFields timestamps, hence we
+ // can not dry-run it. Luckily, the only validation we use it for
+ // doesn't apply to SSA - Creating objects with non-nil deletionTimestamp
+ // through SSA is possible and updating the deletionTimestamp is valid,
+ // but has no effect.
+ if patch.Type() != types.ApplyPatchType {
+ // Apply patch without updating object.
+ // To remain in accordance with the behavior of k8s api behavior,
+ // a patch must not allow for changes to the deletionTimestamp of an object.
+ // The reaction() function applies the patch to the object and calls Update(),
+ // whereas dryPatch() replicates this behavior but skips the call to Update().
+ // This ensures that the patch may be rejected if a deletionTimestamp is modified, prior
+ // to updating the object.
+ o, err := dryPatch(action, c.tracker)
+ if err != nil {
+ return err
+ }
+ newObj, err := meta.Accessor(o)
+ if err != nil {
+ return err
+ }
+
+ // Validate that deletionTimestamp has not been changed
+ if !deletionTimestampEqual(newObj, oldAccessor) {
+ return fmt.Errorf("rejected patch, metadata.deletionTimestamp immutable")
+ }
}
reaction := testing.ObjectReaction(c.tracker)
@@ -978,21 +1233,28 @@ func (c *fakeClient) patch(obj client.Object, patch client.Patch, opts ...client
panic("tracker could not handle patch method")
}
- if _, isUnstructured := obj.(runtime.Unstructured); isUnstructured {
- ta, err := meta.TypeAccessor(o)
- if err != nil {
- return err
- }
- ta.SetKind(gvk.Kind)
- ta.SetAPIVersion(gvk.GroupVersion().String())
+ ta, err := meta.TypeAccessor(o)
+ if err != nil {
+ return err
}
+ ta.SetAPIVersion(gvk.GroupVersion().String())
+ ta.SetKind(gvk.Kind)
+
j, err := json.Marshal(o)
if err != nil {
return err
}
zero(obj)
- return json.Unmarshal(j, obj)
+ if err := json.Unmarshal(j, obj); err != nil {
+ return err
+ }
+
+ if !c.returnManagedFields {
+ obj.SetManagedFields(nil)
+ }
+
+ return ensureTypeMeta(obj, gvk)
}
// Applying a patch results in a deletionTimestamp that is truncated to the nearest second.
@@ -1020,6 +1282,9 @@ func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (ru
obj, err := tracker.Get(gvr, ns, action.GetName())
if err != nil {
+ if apierrors.IsNotFound(err) && action.GetPatchType() == types.ApplyPatchType {
+ return &unstructured.Unstructured{}, nil
+ }
return nil, err
}
@@ -1064,10 +1329,10 @@ func dryPatch(action testing.PatchActionImpl, tracker testing.ObjectTracker) (ru
if err = json.Unmarshal(mergedByte, obj); err != nil {
return nil, err
}
- case types.ApplyPatchType:
- return nil, errors.New("apply patches are not supported in the fake client. Follow https://github.com/kubernetes/kubernetes/issues/115598 for the current status")
case types.ApplyCBORPatchType:
return nil, errors.New("apply CBOR patches are not supported in the fake client")
+ case types.ApplyPatchType:
+ return nil, errors.New("bug in controller-runtime: should not end up in dryPatch for SSA")
default:
return nil, fmt.Errorf("%s PatchType is not supported", action.GetPatchType())
}
@@ -1600,3 +1865,47 @@ func AddIndex(c client.Client, obj runtime.Object, field string, extractValue cl
return nil
}
+
+func (c *fakeClient) addToSchemeIfUnknownAndUnstructuredOrPartial(obj runtime.Object) error {
+ c.schemeLock.Lock()
+ defer c.schemeLock.Unlock()
+
+ _, isUnstructured := obj.(*unstructured.Unstructured)
+ _, isUnstructuredList := obj.(*unstructured.UnstructuredList)
+ _, isPartial := obj.(*metav1.PartialObjectMetadata)
+ _, isPartialList := obj.(*metav1.PartialObjectMetadataList)
+ if !isUnstructured && !isUnstructuredList && !isPartial && !isPartialList {
+ return nil
+ }
+
+ gvk, err := apiutil.GVKForObject(obj, c.scheme)
+ if err != nil {
+ return err
+ }
+
+ if !c.scheme.Recognizes(gvk) {
+ c.scheme.AddKnownTypeWithName(gvk, obj)
+ }
+
+ return nil
+}
+
+func ensureTypeMeta(obj runtime.Object, gvk schema.GroupVersionKind) error {
+ ta, err := meta.TypeAccessor(obj)
+ if err != nil {
+ return err
+ }
+ _, isUnstructured := obj.(runtime.Unstructured)
+ _, isPartialObject := obj.(*metav1.PartialObjectMetadata)
+ _, isPartialObjectList := obj.(*metav1.PartialObjectMetadataList)
+ if !isUnstructured && !isPartialObject && !isPartialObjectList {
+ ta.SetKind("")
+ ta.SetAPIVersion("")
+ return nil
+ }
+
+ ta.SetKind(gvk.Kind)
+ ta.SetAPIVersion(gvk.GroupVersion().String())
+
+ return nil
+}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/typeconverter.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/typeconverter.go
new file mode 100644
index 00000000..3cb3a0dc
--- /dev/null
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/typeconverter.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fake
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/managedfields"
+ "sigs.k8s.io/structured-merge-diff/v6/typed"
+)
+
+type multiTypeConverter struct {
+ upstream []managedfields.TypeConverter
+}
+
+func (m multiTypeConverter) ObjectToTyped(r runtime.Object, o ...typed.ValidationOptions) (*typed.TypedValue, error) {
+ var errs []error
+ for _, u := range m.upstream {
+ res, err := u.ObjectToTyped(r, o...)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+
+ return res, nil
+ }
+
+ return nil, fmt.Errorf("failed to convert Object to TypedValue: %w", kerrors.NewAggregate(errs))
+}
+
+func (m multiTypeConverter) TypedToObject(v *typed.TypedValue) (runtime.Object, error) {
+ var errs []error
+ for _, u := range m.upstream {
+ res, err := u.TypedToObject(v)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+
+ return res, nil
+ }
+
+ return nil, fmt.Errorf("failed to convert TypedValue to Object: %w", kerrors.NewAggregate(errs))
+}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go
index 07183cd1..93274f95 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go
@@ -54,6 +54,10 @@ func (f *clientWithFieldManager) Patch(ctx context.Context, obj Object, patch Pa
return f.c.Patch(ctx, obj, patch, append([]PatchOption{FieldOwner(f.owner)}, opts...)...)
}
+func (f *clientWithFieldManager) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ return f.c.Apply(ctx, obj, append([]ApplyOption{FieldOwner(f.owner)}, opts...)...)
+}
+
func (f *clientWithFieldManager) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
return f.c.Delete(ctx, obj, opts...)
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go
index 659b3d44..ce8d0576 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go
@@ -53,6 +53,10 @@ func (c *clientWithFieldValidation) Patch(ctx context.Context, obj Object, patch
return c.client.Patch(ctx, obj, patch, append([]PatchOption{c.validation}, opts...)...)
}
+func (c *clientWithFieldValidation) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ return c.client.Apply(ctx, obj, opts...)
+}
+
func (c *clientWithFieldValidation) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error {
return c.client.Delete(ctx, obj, opts...)
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go
index 3d3f3cb0..7ff73bd8 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interceptor/intercept.go
@@ -19,6 +19,7 @@ type Funcs struct {
DeleteAllOf func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error
Update func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.UpdateOption) error
Patch func(ctx context.Context, client client.WithWatch, obj client.Object, patch client.Patch, opts ...client.PatchOption) error
+ Apply func(ctx context.Context, client client.WithWatch, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error
Watch func(ctx context.Context, client client.WithWatch, obj client.ObjectList, opts ...client.ListOption) (watch.Interface, error)
SubResource func(client client.WithWatch, subResource string) client.SubResourceClient
SubResourceGet func(ctx context.Context, client client.Client, subResourceName string, obj client.Object, subResource client.Object, opts ...client.SubResourceGetOption) error
@@ -92,6 +93,14 @@ func (c interceptor) Patch(ctx context.Context, obj client.Object, patch client.
return c.client.Patch(ctx, obj, patch, opts...)
}
+func (c interceptor) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error {
+ if c.funcs.Apply != nil {
+ return c.funcs.Apply(ctx, c.client, obj, opts...)
+ }
+
+ return c.client.Apply(ctx, obj, opts...)
+}
+
func (c interceptor) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
if c.funcs.DeleteAllOf != nil {
return c.funcs.DeleteAllOf(ctx, c.client, obj, opts...)
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
index 3b282fc2..61559ecb 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go
@@ -61,6 +61,9 @@ type Reader interface {
// Writer knows how to create, delete, and update Kubernetes objects.
type Writer interface {
+ // Apply applies the given apply configuration to the Kubernetes cluster.
+ Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error
+
// Create saves the object obj in the Kubernetes cluster. obj must be a
// struct pointer so that obj can be updated with the content returned by the Server.
Create(ctx context.Context, obj Object, opts ...CreateOption) error
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
index 222dc795..d4223eda 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go
@@ -19,10 +19,13 @@ package client
import (
"context"
"fmt"
+ "reflect"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
// NewNamespacedClient wraps an existing client enforcing the namespace value.
@@ -147,6 +150,52 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o
return n.client.Patch(ctx, obj, patch, opts...)
}
+func (n *namespacedClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ var gvk schema.GroupVersionKind
+ switch o := obj.(type) {
+ case applyConfiguration:
+ var err error
+ gvk, err = gvkFromApplyConfiguration(o)
+ if err != nil {
+ return err
+ }
+ case *unstructuredApplyConfiguration:
+ gvk = o.GroupVersionKind()
+ default:
+ return fmt.Errorf("object %T is not a valid apply configuration", obj)
+ }
+ isNamespaceScoped, err := apiutil.IsGVKNamespaced(gvk, n.RESTMapper())
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+ if isNamespaceScoped {
+ switch o := obj.(type) {
+ case applyConfiguration:
+ if o.GetNamespace() != nil && *o.GetNamespace() != "" && *o.GetNamespace() != n.namespace {
+ return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client",
+ *o.GetNamespace(), ptr.Deref(o.GetName(), ""), n.namespace)
+ }
+ v := reflect.ValueOf(o)
+ withNamespace := v.MethodByName("WithNamespace")
+ if !withNamespace.IsValid() {
+ return fmt.Errorf("ApplyConfiguration %T does not have a WithNamespace method", o)
+ }
+ if tp := withNamespace.Type(); tp.NumIn() != 1 || tp.In(0).Kind() != reflect.String {
+ return fmt.Errorf("WithNamespace method of ApplyConfiguration %T must take a single string argument", o)
+ }
+ withNamespace.Call([]reflect.Value{reflect.ValueOf(n.namespace)})
+ case *unstructuredApplyConfiguration:
+ if o.GetNamespace() != "" && o.GetNamespace() != n.namespace {
+ return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client",
+ o.GetNamespace(), o.GetName(), n.namespace)
+ }
+ o.SetNamespace(n.namespace)
+ }
+ }
+
+ return n.client.Apply(ctx, obj, opts...)
+}
+
// Get implements client.Client.
func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
isNamespaceScoped, err := n.IsObjectNamespaced(obj)
@@ -164,7 +213,12 @@ func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, o
// List implements client.Client.
func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error {
- if n.namespace != "" {
+ isNamespaceScoped, err := n.IsObjectNamespaced(obj)
+ if err != nil {
+ return fmt.Errorf("error finding the scope of the object: %w", err)
+ }
+
+ if isNamespaceScoped && n.namespace != "" {
opts = append(opts, InNamespace(n.namespace))
}
return n.client.List(ctx, obj, opts...)
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
index db50ed8f..33c46073 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go
@@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
+ "k8s.io/utils/ptr"
)
// {{{ "Functional" Option Interfaces
@@ -61,6 +62,12 @@ type PatchOption interface {
ApplyToPatch(*PatchOptions)
}
+// ApplyOption is some configuration that modifies options for an apply request.
+type ApplyOption interface {
+ // ApplyToApply applies this configuration to the given apply options.
+ ApplyToApply(*ApplyOptions)
+}
+
// DeleteAllOfOption is some configuration that modifies options for a delete request.
type DeleteAllOfOption interface {
// ApplyToDeleteAllOf applies this configuration to the given deletecollection options.
@@ -115,7 +122,12 @@ func (dryRunAll) ApplyToPatch(opts *PatchOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
-// ApplyToPatch applies this configuration to the given delete options.
+// ApplyToApply applies this configuration to the given apply options.
+func (dryRunAll) ApplyToApply(opts *ApplyOptions) {
+ opts.DryRun = []string{metav1.DryRunAll}
+}
+
+// ApplyToDelete applies this configuration to the given delete options.
func (dryRunAll) ApplyToDelete(opts *DeleteOptions) {
opts.DryRun = []string{metav1.DryRunAll}
}
@@ -154,6 +166,11 @@ func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) {
opts.FieldManager = string(f)
}
+// ApplyToApply applies this configuration to the given apply options.
+func (f FieldOwner) ApplyToApply(opts *ApplyOptions) {
+ opts.FieldManager = string(f)
+}
+
// ApplyToSubResourcePatch applies this configuration to the given patch options.
func (f FieldOwner) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) {
opts.FieldManager = string(f)
@@ -431,6 +448,12 @@ type GetOptions struct {
// Raw represents raw GetOptions, as passed to the API server. Note
// that these may not be respected by all implementations of interface.
Raw *metav1.GetOptions
+
+ // UnsafeDisableDeepCopy indicates not to deep copy objects during get object.
+ // Be very careful with this, when enabled you must DeepCopy any object before mutating it,
+ // otherwise you will mutate the object in the cache.
+ // +optional
+ UnsafeDisableDeepCopy *bool
}
var _ GetOption = &GetOptions{}
@@ -440,6 +463,9 @@ func (o *GetOptions) ApplyToGet(lo *GetOptions) {
if o.Raw != nil {
lo.Raw = o.Raw
}
+ if o.UnsafeDisableDeepCopy != nil {
+ lo.UnsafeDisableDeepCopy = o.UnsafeDisableDeepCopy
+ }
}
// AsGetOptions returns these options as a flattened metav1.GetOptions.
@@ -618,6 +644,9 @@ type MatchingLabelsSelector struct {
// ApplyToList applies this configuration to the given list options.
func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) {
+ if m.Selector == nil {
+ m.Selector = labels.Nothing()
+ }
opts.LabelSelector = m
}
@@ -651,6 +680,9 @@ type MatchingFieldsSelector struct {
// ApplyToList applies this configuration to the given list options.
func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) {
+ if m.Selector == nil {
+ m.Selector = fields.Nothing()
+ }
opts.FieldSelector = m
}
@@ -692,15 +724,14 @@ func (l Limit) ApplyToList(opts *ListOptions) {
// otherwise you will mutate the object in the cache.
type UnsafeDisableDeepCopyOption bool
+// ApplyToGet applies this configuration to the given an Get options.
+func (d UnsafeDisableDeepCopyOption) ApplyToGet(opts *GetOptions) {
+ opts.UnsafeDisableDeepCopy = ptr.To(bool(d))
+}
+
// ApplyToList applies this configuration to the given an List options.
func (d UnsafeDisableDeepCopyOption) ApplyToList(opts *ListOptions) {
- definitelyTrue := true
- definitelyFalse := false
- if d {
- opts.UnsafeDisableDeepCopy = &definitelyTrue
- } else {
- opts.UnsafeDisableDeepCopy = &definitelyFalse
- }
+ opts.UnsafeDisableDeepCopy = ptr.To(bool(d))
}
// UnsafeDisableDeepCopy indicates not to deep copy objects during list objects.
@@ -863,10 +894,18 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions {
o.Raw = &metav1.PatchOptions{}
}
- o.Raw.DryRun = o.DryRun
- o.Raw.Force = o.Force
- o.Raw.FieldManager = o.FieldManager
- o.Raw.FieldValidation = o.FieldValidation
+ if o.DryRun != nil {
+ o.Raw.DryRun = o.DryRun
+ }
+ if o.Force != nil {
+ o.Raw.Force = o.Force
+ }
+ if o.FieldManager != "" {
+ o.Raw.FieldManager = o.FieldManager
+ }
+ if o.FieldValidation != "" {
+ o.Raw.FieldValidation = o.FieldValidation
+ }
return o.Raw
}
@@ -899,13 +938,15 @@ var ForceOwnership = forceOwnership{}
type forceOwnership struct{}
func (forceOwnership) ApplyToPatch(opts *PatchOptions) {
- definitelyTrue := true
- opts.Force = &definitelyTrue
+ opts.Force = ptr.To(true)
}
func (forceOwnership) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) {
- definitelyTrue := true
- opts.Force = &definitelyTrue
+ opts.Force = ptr.To(true)
+}
+
+func (forceOwnership) ApplyToApply(opts *ApplyOptions) {
+ opts.Force = ptr.To(true)
}
// }}}
@@ -939,3 +980,57 @@ func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) {
}
// }}}
+
+// ApplyOptions are the options for an apply request.
+type ApplyOptions struct {
+ // When present, indicates that modifications should not be
+ // persisted. An invalid or unrecognized dryRun directive will
+ // result in an error response and no further processing of the
+ // request. Valid values are:
+ // - All: all dry run stages will be processed
+ DryRun []string
+
+ // Force is going to "force" Apply requests. It means user will
+ // re-acquire conflicting fields owned by other people.
+ Force *bool
+
+ // fieldManager is a name associated with the actor or entity
+ // that is making these changes. The value must be less than or
+ // 128 characters long, and only contain printable characters,
+ // as defined by https://golang.org/pkg/unicode/#IsPrint. This
+ // field is required.
+ //
+ // +required
+ FieldManager string
+}
+
+// ApplyOptions applies the given opts onto the ApplyOptions
+func (o *ApplyOptions) ApplyOptions(opts []ApplyOption) *ApplyOptions {
+ for _, opt := range opts {
+ opt.ApplyToApply(o)
+ }
+ return o
+}
+
+// ApplyToApply applies the given opts onto the ApplyOptions
+func (o *ApplyOptions) ApplyToApply(opts *ApplyOptions) {
+ if o.DryRun != nil {
+ opts.DryRun = o.DryRun
+ }
+ if o.Force != nil {
+ opts.Force = o.Force
+ }
+
+ if o.FieldManager != "" {
+ opts.FieldManager = o.FieldManager
+ }
+}
+
+// AsPatchOptions constructs patch options from the given ApplyOptions
+func (o *ApplyOptions) AsPatchOptions() *metav1.PatchOptions {
+ return &metav1.PatchOptions{
+ DryRun: o.DryRun,
+ Force: o.Force,
+ FieldManager: o.FieldManager,
+ }
+}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
index 11d60838..b99d7663 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go
@@ -27,6 +27,11 @@ import (
var (
// Apply uses server-side apply to patch the given object.
+ //
+ // This should now only be used to patch sub resources, e.g. with client.Client.Status().Patch().
+ // Use client.Client.Apply() instead of client.Client.Patch(..., client.Apply, ...)
+ // This will be deprecated once the Apply method has been added for sub resources.
+ // See the following issue for more details: https://github.com/kubernetes-sigs/controller-runtime/issues/3183
Apply Patch = applyPatch{}
// Merge uses the raw object as a merge patch, without modifications.
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
index 92afd9a9..3bd762a6 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go
@@ -18,8 +18,10 @@ package client
import (
"context"
+ "fmt"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/util/apply"
)
var _ Reader = &typedClient{}
@@ -41,7 +43,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti
createOpts.ApplyOptions(opts)
return o.Post().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
Body(obj).
VersionedParams(createOpts.AsCreateOptions(), c.paramCodec).
@@ -60,9 +62,9 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti
updateOpts.ApplyOptions(opts)
return o.Put().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
Body(obj).
VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec).
Do(ctx).
@@ -80,9 +82,9 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti
deleteOpts.ApplyOptions(opts)
return o.Delete().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
Body(deleteOpts.AsDeleteOptions()).
Do(ctx).
Error()
@@ -123,15 +125,40 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts .
patchOpts.ApplyOptions(opts)
return o.Patch(patch.Type()).
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec).
Body(data).
Do(ctx).
Into(obj)
}
+func (c *typedClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ o, err := c.resources.getObjMeta(obj)
+ if err != nil {
+ return err
+ }
+ req, err := apply.NewRequest(o, obj)
+ if err != nil {
+ return fmt.Errorf("failed to create apply request: %w", err)
+ }
+ applyOpts := &ApplyOptions{}
+ applyOpts.ApplyOptions(opts)
+
+ return req.
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.name).
+ VersionedParams(applyOpts.AsPatchOptions(), c.paramCodec).
+ Do(ctx).
+ // This is hacky, it is required because `Into` takes a `runtime.Object` and
+ // that is not implemented by the ApplyConfigurations. The generated clients
+ // don't have this problem because they deserialize into the api type, not the
+ // apply configuration: https://github.com/kubernetes/kubernetes/blob/22f5e01a37c0bc6a5f494dec14dd4e3688ee1d55/staging/src/k8s.io/client-go/gentype/type.go#L296-L317
+ Into(runtimeObjectFromApplyConfiguration(obj))
+}
+
// Get implements client.Client.
func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
r, err := c.resources.getResource(obj)
@@ -179,9 +206,9 @@ func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Ob
getOpts.ApplyOptions(opts)
return o.Get().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
VersionedParams(getOpts.AsGetOptions(), c.paramCodec).
Do(ctx).
@@ -202,9 +229,9 @@ func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subReso
createOpts.ApplyOptions(opts)
return o.Post().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
Body(subResourceObj).
VersionedParams(createOpts.AsCreateOptions(), c.paramCodec).
@@ -237,9 +264,9 @@ func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subReso
}
return o.Put().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
Body(body).
VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec).
@@ -268,9 +295,9 @@ func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResou
}
return o.Patch(patch.Type()).
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
Body(data).
VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec).
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
index 0d969517..e636c3be 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go
@@ -22,6 +22,7 @@ import (
"strings"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/util/apply"
)
var _ Reader = &unstructuredClient{}
@@ -50,7 +51,7 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr
createOpts.ApplyOptions(opts)
result := o.Post().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
Body(obj).
VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec).
@@ -79,9 +80,9 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up
updateOpts.ApplyOptions(opts)
result := o.Put().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
Body(obj).
VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec).
Do(ctx).
@@ -106,9 +107,9 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De
deleteOpts.ApplyOptions(opts)
return o.Delete().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
Body(deleteOpts.AsDeleteOptions()).
Do(ctx).
Error()
@@ -157,15 +158,41 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch
patchOpts.ApplyOptions(opts)
return o.Patch(patch.Type()).
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec).
Body(data).
Do(ctx).
Into(obj)
}
+func (uc *unstructuredClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...ApplyOption) error {
+ unstructuredApplyConfig, ok := obj.(*unstructuredApplyConfiguration)
+ if !ok {
+ return fmt.Errorf("bug: unstructured client got an applyconfiguration that was not %T but %T", &unstructuredApplyConfiguration{}, obj)
+ }
+ o, err := uc.resources.getObjMeta(unstructuredApplyConfig.Unstructured)
+ if err != nil {
+ return err
+ }
+
+ req, err := apply.NewRequest(o, obj)
+ if err != nil {
+ return fmt.Errorf("failed to create apply request: %w", err)
+ }
+ applyOpts := &ApplyOptions{}
+ applyOpts.ApplyOptions(opts)
+
+ return req.
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
+ Resource(o.resource()).
+ Name(o.name).
+ VersionedParams(applyOpts.AsPatchOptions(), uc.paramCodec).
+ Do(ctx).
+ Into(unstructuredApplyConfig.Unstructured)
+}
+
// Get implements client.Client.
func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error {
u, ok := obj.(runtime.Unstructured)
@@ -244,9 +271,9 @@ func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResour
getOpts.ApplyOptions(opts)
return o.Get().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
VersionedParams(getOpts.AsGetOptions(), uc.paramCodec).
Do(ctx).
@@ -275,9 +302,9 @@ func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subRes
createOpts.ApplyOptions(opts)
return o.Post().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
Body(subResourceObj).
VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec).
@@ -310,9 +337,9 @@ func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object,
}
return o.Put().
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
Body(body).
VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec).
@@ -347,9 +374,9 @@ func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object,
}
result := o.Patch(patch.Type()).
- NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()).
+ NamespaceIfScoped(o.namespace, o.isNamespaced()).
Resource(o.resource()).
- Name(o.GetName()).
+ Name(o.name).
SubResource(subResource).
Body(data).
VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec).
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go
index a5655593..3dafaef9 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/config/controller.go
@@ -60,12 +60,33 @@ type Controller struct {
// Defaults to true, which means the controller will use leader election.
NeedLeaderElection *bool
+ // EnableWarmup specifies whether the controller should start its sources when the manager is not
+ // the leader. This is useful for cases where sources take a long time to start, as it allows
+ // for the controller to warm up its caches even before it is elected as the leader. This
+ // improves leadership failover time, as the caches will be prepopulated before the controller
+ // transitions to be leader.
+ //
+ // Setting EnableWarmup to true and NeedLeaderElection to true means the controller will start its
+ // sources without waiting to become leader.
+ // Setting EnableWarmup to true and NeedLeaderElection to false is a no-op as controllers without
+ // leader election do not wait on leader election to start their sources.
+ // Defaults to false.
+ //
+ // Note: This feature is currently in beta and subject to change.
+ // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/3220.
+ EnableWarmup *bool
+
// UsePriorityQueue configures the controllers queue to use the controller-runtime provided
// priority queue.
//
- // Note: This flag is disabled by default until a future version. It's currently in beta.
+ // Note: This flag is disabled by default until a future version. This feature is currently in beta.
+ // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/2374.
UsePriorityQueue *bool
// Logger is the logger controllers should use.
Logger logr.Logger
+
+ // ReconciliationTimeout is used as the timeout passed to the context of each Reconcile call.
+ // By default, there is no timeout.
+ ReconciliationTimeout time.Duration
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
index 9de959b4..afa15aeb 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go
@@ -91,8 +91,29 @@ type TypedOptions[request comparable] struct {
// UsePriorityQueue configures the controllers queue to use the controller-runtime provided
// priority queue.
//
- // Note: This flag is disabled by default until a future version. It's currently in beta.
+ // Note: This flag is disabled by default until a future version. This feature is currently in beta.
+ // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/2374.
UsePriorityQueue *bool
+
+ // EnableWarmup specifies whether the controller should start its sources when the manager is not
+ // the leader. This is useful for cases where sources take a long time to start, as it allows
+ // for the controller to warm up its caches even before it is elected as the leader. This
+ // improves leadership failover time, as the caches will be prepopulated before the controller
+ // transitions to be leader.
+ //
+ // Setting EnableWarmup to true and NeedLeaderElection to true means the controller will start its
+ // sources without waiting to become leader.
+ // Setting EnableWarmup to true and NeedLeaderElection to false is a no-op as controllers without
+ // leader election do not wait on leader election to start their sources.
+ // Defaults to false.
+ //
+ // Note: This feature is currently in beta and subject to change.
+ // For more details, see: https://github.com/kubernetes-sigs/controller-runtime/issues/3220.
+ EnableWarmup *bool
+
+ // ReconciliationTimeout is used as the timeout passed to the context of each Reconcile call.
+ // By default, there is no timeout.
+ ReconciliationTimeout time.Duration
}
// DefaultFromConfig defaults the config from a config.Controller
@@ -124,6 +145,14 @@ func (options *TypedOptions[request]) DefaultFromConfig(config config.Controller
if options.NeedLeaderElection == nil {
options.NeedLeaderElection = config.NeedLeaderElection
}
+
+ if options.EnableWarmup == nil {
+ options.EnableWarmup = config.EnableWarmup
+ }
+
+ if options.ReconciliationTimeout == 0 {
+ options.ReconciliationTimeout = config.ReconciliationTimeout
+ }
}
// Controller implements an API. A Controller manages a work queue fed reconcile.Requests
@@ -243,7 +272,7 @@ func NewTypedUnmanaged[request comparable](name string, options TypedOptions[req
}
// Create controller with dependencies set
- return &controller.Controller[request]{
+ return controller.New[request](controller.Options[request]{
Do: options.Reconciler,
RateLimiter: options.RateLimiter,
NewQueue: options.NewQueue,
@@ -253,7 +282,9 @@ func NewTypedUnmanaged[request comparable](name string, options TypedOptions[req
LogConstructor: options.LogConstructor,
RecoverPanic: options.RecoverPanic,
LeaderElected: options.NeedLeaderElection,
- }, nil
+ EnableWarmup: options.EnableWarmup,
+ ReconciliationTimeout: options.ReconciliationTimeout,
+ }), nil
}
// ReconcileIDFromContext gets the reconcileID from the current context.
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go
index c3f77a6f..71363f0d 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go
@@ -1,6 +1,7 @@
package priorityqueue
import (
+ "math"
"sync"
"sync/atomic"
"time"
@@ -19,7 +20,10 @@ import (
type AddOpts struct {
After time.Duration
RateLimited bool
- Priority int
+ // Priority is the priority of the item. Higher values
+ // indicate higher priority.
+ // Defaults to zero if unset.
+ Priority *int
}
// PriorityQueue is a priority queue for a controller. It
@@ -120,8 +124,8 @@ type priorityqueue[T comparable] struct {
get chan item[T]
// waiters is the number of routines blocked in Get, we use it to determine
- // if we can push items.
- waiters atomic.Int64
+ // if we can push items. Every manipulation has to be protected with the lock.
+ waiters int64
// Configurable for testing
now func() time.Time
@@ -129,6 +133,10 @@ type priorityqueue[T comparable] struct {
}
func (w *priorityqueue[T]) AddWithOpts(o AddOpts, items ...T) {
+ if w.shutdown.Load() {
+ return
+ }
+
w.lock.Lock()
defer w.lock.Unlock()
@@ -150,7 +158,7 @@ func (w *priorityqueue[T]) AddWithOpts(o AddOpts, items ...T) {
item := &item[T]{
Key: key,
AddedCounter: w.addedCounter,
- Priority: o.Priority,
+ Priority: ptr.Deref(o.Priority, 0),
ReadyAt: readyAt,
}
w.items[key] = item
@@ -165,12 +173,12 @@ func (w *priorityqueue[T]) AddWithOpts(o AddOpts, items ...T) {
// The b-tree de-duplicates based on ordering and any change here
// will affect the order - Just delete and re-add.
item, _ := w.queue.Delete(w.items[key])
- if o.Priority > item.Priority {
+ if newPriority := ptr.Deref(o.Priority, 0); newPriority > item.Priority {
// Update depth metric only if the item in the queue was already added to the depth metric.
if item.ReadyAt == nil || w.becameReady.Has(key) {
- w.metrics.updateDepthWithPriorityMetric(item.Priority, o.Priority)
+ w.metrics.updateDepthWithPriorityMetric(item.Priority, newPriority)
}
- item.Priority = o.Priority
+ item.Priority = newPriority
}
if item.ReadyAt != nil && (readyAt == nil || readyAt.Before(*item.ReadyAt)) {
@@ -199,6 +207,7 @@ func (w *priorityqueue[T]) spin() {
blockForever := make(chan time.Time)
var nextReady <-chan time.Time
nextReady = blockForever
+ var nextItemReadyAt time.Time
for {
select {
@@ -206,10 +215,10 @@ func (w *priorityqueue[T]) spin() {
return
case <-w.itemOrWaiterAdded:
case <-nextReady:
+ nextReady = blockForever
+ nextItemReadyAt = time.Time{}
}
- nextReady = blockForever
-
func() {
w.lock.Lock()
defer w.lock.Unlock()
@@ -220,39 +229,67 @@ func (w *priorityqueue[T]) spin() {
// manipulating the tree from within Ascend might lead to panics, so
// track what we want to delete and do it after we are done ascending.
var toDelete []*item[T]
- w.queue.Ascend(func(item *item[T]) bool {
- if item.ReadyAt != nil {
- if readyAt := item.ReadyAt.Sub(w.now()); readyAt > 0 {
- nextReady = w.tick(readyAt)
- return false
+
+ var key T
+
+ // Items in the queue tree are sorted first by priority and second by readiness, so
+ // items with a lower priority might be ready further down in the queue.
+ // We iterate through the priorities high to low until we find a ready item
+ pivot := item[T]{
+ Key: key,
+ AddedCounter: 0,
+ Priority: math.MaxInt,
+ ReadyAt: nil,
+ }
+
+ for {
+ pivotChange := false
+
+ w.queue.AscendGreaterOrEqual(&pivot, func(item *item[T]) bool {
+ // Item is locked, we can not hand it out
+ if w.locked.Has(item.Key) {
+ return true
}
- if !w.becameReady.Has(item.Key) {
- w.metrics.add(item.Key, item.Priority)
- w.becameReady.Insert(item.Key)
+
+ if item.ReadyAt != nil {
+ if readyAt := item.ReadyAt.Sub(w.now()); readyAt > 0 {
+ if nextItemReadyAt.After(*item.ReadyAt) || nextItemReadyAt.IsZero() {
+ nextReady = w.tick(readyAt)
+ nextItemReadyAt = *item.ReadyAt
+ }
+
+ // Adjusting the pivot item moves the ascend to the next lower priority
+ pivot.Priority = item.Priority - 1
+ pivotChange = true
+ return false
+ }
+ if !w.becameReady.Has(item.Key) {
+ w.metrics.add(item.Key, item.Priority)
+ w.becameReady.Insert(item.Key)
+ }
}
- }
- if w.waiters.Load() == 0 {
- // Have to keep iterating here to ensure we update metrics
- // for further items that became ready and set nextReady.
- return true
- }
+ if w.waiters == 0 {
+ // Have to keep iterating here to ensure we update metrics
+ // for further items that became ready and set nextReady.
+ return true
+ }
- // Item is locked, we can not hand it out
- if w.locked.Has(item.Key) {
- return true
- }
+ w.metrics.get(item.Key, item.Priority)
+ w.locked.Insert(item.Key)
+ w.waiters--
+ delete(w.items, item.Key)
+ toDelete = append(toDelete, item)
+ w.becameReady.Delete(item.Key)
+ w.get <- *item
- w.metrics.get(item.Key, item.Priority)
- w.locked.Insert(item.Key)
- w.waiters.Add(-1)
- delete(w.items, item.Key)
- toDelete = append(toDelete, item)
- w.becameReady.Delete(item.Key)
- w.get <- *item
+ return true
+ })
- return true
- })
+ if !pivotChange {
+ break
+ }
+ }
for _, item := range toDelete {
w.queue.Delete(item)
@@ -274,12 +311,29 @@ func (w *priorityqueue[T]) AddRateLimited(item T) {
}
func (w *priorityqueue[T]) GetWithPriority() (_ T, priority int, shutdown bool) {
- w.waiters.Add(1)
+ if w.shutdown.Load() {
+ var zero T
+ return zero, 0, true
+ }
+
+ w.lock.Lock()
+ w.waiters++
+ w.lock.Unlock()
w.notifyItemOrWaiterAdded()
- item := <-w.get
- return item.Key, item.Priority, w.shutdown.Load()
+ select {
+ case <-w.done:
+ // Return if the queue was shutdown while we were already waiting for an item here.
+ // For example controller workers are continuously calling GetWithPriority and
+ // GetWithPriority is blocking the workers if there are no items in the queue.
+ // If the controller and accordingly the queue is then shut down, without this code
+ // branch the controller workers remain blocked here and are unable to shut down.
+ var zero T
+ return zero, 0, true
+ case item := <-w.get:
+ return item.Key, item.Priority, w.shutdown.Load()
+ }
}
func (w *priorityqueue[T]) Get() (item T, shutdown bool) {
@@ -365,6 +419,9 @@ func (w *priorityqueue[T]) logState() {
}
func less[T comparable](a, b *item[T]) bool {
+ if a.Priority != b.Priority {
+ return a.Priority > b.Priority
+ }
if a.ReadyAt == nil && b.ReadyAt != nil {
return true
}
@@ -374,9 +431,6 @@ func less[T comparable](a, b *item[T]) bool {
if a.ReadyAt != nil && b.ReadyAt != nil && !a.ReadyAt.Equal(*b.ReadyAt) {
return a.ReadyAt.Before(*b.ReadyAt)
}
- if a.Priority != b.Priority {
- return a.Priority > b.Priority
- }
return a.AddedCounter < b.AddedCounter
}
@@ -404,4 +458,5 @@ type bTree[T any] interface {
ReplaceOrInsert(item T) (_ T, _ bool)
Delete(item T) (T, bool)
Ascend(iterator btree.ItemIteratorG[T])
+ AscendGreaterOrEqual(pivot T, iterator btree.ItemIteratorG[T])
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/binaries.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/binaries.go
index 4c9b1dae..5110d326 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/binaries.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/binaries.go
@@ -32,10 +32,9 @@ import (
"path"
"path/filepath"
"runtime"
- "sort"
"strings"
- "github.com/blang/semver/v4"
+ "k8s.io/apimachinery/pkg/util/version"
"sigs.k8s.io/yaml"
)
@@ -111,6 +110,25 @@ type archive struct {
SelfLink string `json:"selfLink"`
}
+// parseKubernetesVersion returns:
+// 1. the SemVer form of s when it refers to a specific Kubernetes release, or
+// 2. the major and minor portions of s when it refers to a release series, or
+// 3. an error
+func parseKubernetesVersion(s string) (exact string, major, minor uint, err error) {
+ if v, err := version.ParseSemantic(s); err == nil {
+ return v.String(), 0, 0, nil
+ }
+
+ // See two parseable components and nothing else.
+ if v, err := version.ParseGeneric(s); err == nil && len(v.Components()) == 2 {
+ if v.String() == strings.TrimPrefix(s, "v") {
+ return "", v.Major(), v.Minor(), nil
+ }
+ }
+
+ return "", 0, 0, fmt.Errorf("could not parse %q as version", s)
+}
+
func downloadBinaryAssets(ctx context.Context, binaryAssetsDirectory, binaryAssetsVersion, binaryAssetsIndexURL string) (string, string, string, error) {
if binaryAssetsIndexURL == "" {
binaryAssetsIndexURL = DefaultBinaryAssetsIndexURL
@@ -125,14 +143,23 @@ func downloadBinaryAssets(ctx context.Context, binaryAssetsDirectory, binaryAsse
}
var binaryAssetsIndex *index
- if binaryAssetsVersion == "" {
- var err error
+ switch exact, major, minor, err := parseKubernetesVersion(binaryAssetsVersion); {
+ case binaryAssetsVersion != "" && err != nil:
+ return "", "", "", err
+
+ case binaryAssetsVersion != "" && exact != "":
+ // Look for these specific binaries locally before downloading them from the release index.
+ // Use the canonical form of the version from here on.
+ binaryAssetsVersion = "v" + exact
+
+ case binaryAssetsVersion == "" || major != 0 || minor != 0:
+ // Select a stable version from the release index before continuing.
binaryAssetsIndex, err = getIndex(ctx, binaryAssetsIndexURL)
if err != nil {
return "", "", "", err
}
- binaryAssetsVersion, err = latestStableVersionFromIndex(binaryAssetsIndex)
+ binaryAssetsVersion, err = latestStableVersionFromIndex(binaryAssetsIndex, major, minor)
if err != nil {
return "", "", "", err
}
@@ -252,34 +279,50 @@ func downloadBinaryAssetsArchive(ctx context.Context, index *index, version stri
return readBody(resp, out, archiveName, archive.Hash)
}
-func latestStableVersionFromIndex(index *index) (string, error) {
+// latestStableVersionFromIndex returns the version with highest [precedence] in index that is not a prerelease.
+// When either major or minor are not zero, the returned version will have those major and minor versions.
+// Note that the version cannot be limited to 0.0.x this way.
+//
+// It is an error when there is no appropriate version in index.
+//
+// [precedence]: https://semver.org/spec/v2.0.0.html#spec-item-11
+func latestStableVersionFromIndex(index *index, major, minor uint) (string, error) {
if len(index.Releases) == 0 {
return "", fmt.Errorf("failed to find latest stable version from index: index is empty")
}
- parsedVersions := []semver.Version{}
+ var found *version.Version
for releaseVersion := range index.Releases {
- v, err := semver.ParseTolerant(releaseVersion)
+ v, err := version.ParseSemantic(releaseVersion)
if err != nil {
return "", fmt.Errorf("failed to parse version %q: %w", releaseVersion, err)
}
// Filter out pre-releases.
- if len(v.Pre) > 0 {
+ if len(v.PreRelease()) > 0 {
continue
}
- parsedVersions = append(parsedVersions, v)
+ // Filter on release series, if any.
+ if (major != 0 || minor != 0) && (v.Major() != major || v.Minor() != minor) {
+ continue
+ }
+
+ if found == nil || v.GreaterThan(found) {
+ found = v
+ }
}
- if len(parsedVersions) == 0 {
- return "", fmt.Errorf("failed to find latest stable version from index: index does not have stable versions")
+ if found == nil {
+ search := "any"
+ if major != 0 || minor != 0 {
+ search = fmt.Sprint(major, ".", minor)
+ }
+
+ return "", fmt.Errorf("failed to find latest stable version from index: index does not have %s stable versions", search)
}
- sort.Slice(parsedVersions, func(i, j int) bool {
- return parsedVersions[i].GT(parsedVersions[j])
- })
- return "v" + parsedVersions[0].String(), nil
+ return "v" + found.String(), nil
}
func getIndex(ctx context.Context, indexURL string) (*index, error) {
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go
index 9bb81ed2..c9f19da9 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go
@@ -109,7 +109,11 @@ var (
// Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and
// install extension APIs.
type Environment struct {
- // ControlPlane is the ControlPlane including the apiserver and etcd
+ // ControlPlane is the ControlPlane including the apiserver and etcd.
+ // Binary paths (APIServer.Path, Etcd.Path, KubectlPath) can be pre-configured in ControlPlane.
+ // If DownloadBinaryAssets is true, the downloaded paths will always be used.
+ // If DownloadBinaryAssets is false and paths are not pre-configured (default is empty), they will be
+ // automatically resolved using BinaryAssetsDirectory.
ControlPlane controlplane.ControlPlane
// Scheme is used to determine if conversion webhooks should be enabled
@@ -211,6 +215,40 @@ func (te *Environment) Stop() error {
return te.ControlPlane.Stop()
}
+// configureBinaryPaths configures the binary paths for the API server, etcd, and kubectl.
+// If DownloadBinaryAssets is true, it downloads and uses those paths.
+// If DownloadBinaryAssets is false, it only sets paths that are not already configured (empty).
+func (te *Environment) configureBinaryPaths() error {
+ apiServer := te.ControlPlane.GetAPIServer()
+
+ if te.ControlPlane.Etcd == nil {
+ te.ControlPlane.Etcd = &controlplane.Etcd{}
+ }
+
+ if te.DownloadBinaryAssets {
+ apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(),
+ te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL)
+ if err != nil {
+ return err
+ }
+
+ apiServer.Path = apiServerPath
+ te.ControlPlane.Etcd.Path = etcdPath
+ te.ControlPlane.KubectlPath = kubectlPath
+ } else {
+ if apiServer.Path == "" {
+ apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory)
+ }
+ if te.ControlPlane.Etcd.Path == "" {
+ te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory)
+ }
+ if te.ControlPlane.KubectlPath == "" {
+ te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory)
+ }
+ }
+ return nil
+}
+
// Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on.
func (te *Environment) Start() (*rest.Config, error) {
if te.useExistingCluster() {
@@ -229,10 +267,6 @@ func (te *Environment) Start() (*rest.Config, error) {
} else {
apiServer := te.ControlPlane.GetAPIServer()
- if te.ControlPlane.Etcd == nil {
- te.ControlPlane.Etcd = &controlplane.Etcd{}
- }
-
if os.Getenv(envAttachOutput) == "true" {
te.AttachControlPlaneOutput = true
}
@@ -243,6 +277,9 @@ func (te *Environment) Start() (*rest.Config, error) {
if apiServer.Err == nil {
apiServer.Err = os.Stderr
}
+ if te.ControlPlane.Etcd == nil {
+ te.ControlPlane.Etcd = &controlplane.Etcd{}
+ }
if te.ControlPlane.Etcd.Out == nil {
te.ControlPlane.Etcd.Out = os.Stdout
}
@@ -251,20 +288,8 @@ func (te *Environment) Start() (*rest.Config, error) {
}
}
- if te.DownloadBinaryAssets {
- apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(),
- te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL)
- if err != nil {
- return nil, err
- }
-
- apiServer.Path = apiServerPath
- te.ControlPlane.Etcd.Path = etcdPath
- te.ControlPlane.KubectlPath = kubectlPath
- } else {
- apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory)
- te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory)
- te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory)
+ if err := te.configureBinaryPaths(); err != nil {
+ return nil, fmt.Errorf("failed to configure binary paths: %w", err)
}
if err := te.defaultTimeouts(); err != nil {
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
index fe78f21a..62d67281 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go
@@ -20,6 +20,7 @@ import (
"context"
"k8s.io/client-go/util/workqueue"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
@@ -141,7 +142,7 @@ func (e *enqueueRequestsFromMapFunc[object, request]) mapAndEnqueue(
if !ok {
if lowPriority {
q.(priorityqueue.PriorityQueue[request]).AddWithOpts(priorityqueue.AddOpts{
- Priority: LowPriority,
+ Priority: ptr.To(LowPriority),
}, req)
} else {
q.Add(req)
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
index 29e755cb..88510d29 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go
@@ -19,8 +19,10 @@ package handler
import (
"context"
"reflect"
+ "time"
"k8s.io/client-go/util/workqueue"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue"
"sigs.k8s.io/controller-runtime/pkg/event"
@@ -126,20 +128,14 @@ func (h TypedFuncs[object, request]) Create(ctx context.Context, e event.TypedCr
h.CreateFunc(ctx, e, q)
return
}
- wq := workqueueWithCustomAddFunc[request]{
- TypedRateLimitingInterface: q,
+
+ wq := workqueueWithDefaultPriority[request]{
// We already know that we have a priority queue, that event.Object implements
// client.Object and that its not nil
- addFunc: func(item request, q workqueue.TypedRateLimitingInterface[request]) {
- var priority int
- if e.IsInInitialList {
- priority = LowPriority
- }
- q.(priorityqueue.PriorityQueue[request]).AddWithOpts(
- priorityqueue.AddOpts{Priority: priority},
- item,
- )
- },
+ PriorityQueue: q.(priorityqueue.PriorityQueue[request]),
+ }
+ if e.IsInInitialList {
+ wq.priority = ptr.To(LowPriority)
}
h.CreateFunc(ctx, e, wq)
}
@@ -160,20 +156,13 @@ func (h TypedFuncs[object, request]) Update(ctx context.Context, e event.TypedUp
return
}
- wq := workqueueWithCustomAddFunc[request]{
- TypedRateLimitingInterface: q,
+ wq := workqueueWithDefaultPriority[request]{
// We already know that we have a priority queue, that event.ObjectOld and ObjectNew implement
// client.Object and that they are not nil
- addFunc: func(item request, q workqueue.TypedRateLimitingInterface[request]) {
- var priority int
- if any(e.ObjectOld).(client.Object).GetResourceVersion() == any(e.ObjectNew).(client.Object).GetResourceVersion() {
- priority = LowPriority
- }
- q.(priorityqueue.PriorityQueue[request]).AddWithOpts(
- priorityqueue.AddOpts{Priority: priority},
- item,
- )
- },
+ PriorityQueue: q.(priorityqueue.PriorityQueue[request]),
+ }
+ if any(e.ObjectOld).(client.Object).GetResourceVersion() == any(e.ObjectNew).(client.Object).GetResourceVersion() {
+ wq.priority = ptr.To(LowPriority)
}
h.UpdateFunc(ctx, e, wq)
}
@@ -201,13 +190,28 @@ func WithLowPriorityWhenUnchanged[object client.Object, request comparable](u Ty
}
}
-type workqueueWithCustomAddFunc[request comparable] struct {
- workqueue.TypedRateLimitingInterface[request]
- addFunc func(item request, q workqueue.TypedRateLimitingInterface[request])
+type workqueueWithDefaultPriority[request comparable] struct {
+ priorityqueue.PriorityQueue[request]
+ priority *int
+}
+
+func (w workqueueWithDefaultPriority[request]) Add(item request) {
+ w.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: w.priority}, item)
}
-func (w workqueueWithCustomAddFunc[request]) Add(item request) {
- w.addFunc(item, w.TypedRateLimitingInterface)
+func (w workqueueWithDefaultPriority[request]) AddAfter(item request, after time.Duration) {
+ w.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: w.priority, After: after}, item)
+}
+
+func (w workqueueWithDefaultPriority[request]) AddRateLimited(item request) {
+ w.PriorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: w.priority, RateLimited: true}, item)
+}
+
+func (w workqueueWithDefaultPriority[request]) AddWithOpts(o priorityqueue.AddOpts, items ...request) {
+ if o.Priority == nil {
+ o.Priority = w.priority
+ }
+ w.PriorityQueue.AddWithOpts(o, items...)
}
// addToQueueCreate adds the reconcile.Request to the priorityqueue in the handler
@@ -219,9 +223,9 @@ func addToQueueCreate[T client.Object, request comparable](q workqueue.TypedRate
return
}
- var priority int
+ var priority *int
if evt.IsInInitialList {
- priority = LowPriority
+ priority = ptr.To(LowPriority)
}
priorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: priority}, item)
}
@@ -235,9 +239,9 @@ func addToQueueUpdate[T client.Object, request comparable](q workqueue.TypedRate
return
}
- var priority int
+ var priority *int
if evt.ObjectOld.GetResourceVersion() == evt.ObjectNew.GetResourceVersion() {
- priority = LowPriority
+ priority = ptr.To(LowPriority)
}
priorityQueue.AddWithOpts(priorityqueue.AddOpts{Priority: priority}, item)
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
index 9fa7ec71..ea796818 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go
@@ -30,6 +30,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/util/workqueue"
+ "k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics"
@@ -38,6 +39,55 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)
+// Options are the arguments for creating a new Controller.
+type Options[request comparable] struct {
+ // Reconciler is a function that can be called at any time with the Name / Namespace of an object and
+ // ensures that the state of the system matches the state specified in the object.
+ // Defaults to the DefaultReconcileFunc.
+ Do reconcile.TypedReconciler[request]
+
+ // RateLimiter is used to limit how frequently requests may be queued into the work queue.
+ RateLimiter workqueue.TypedRateLimiter[request]
+
+ // NewQueue constructs the queue for this controller once the controller is ready to start.
+ // This is a func because the standard Kubernetes work queues start themselves immediately, which
+ // leads to goroutine leaks if something calls controller.New repeatedly.
+ NewQueue func(controllerName string, rateLimiter workqueue.TypedRateLimiter[request]) workqueue.TypedRateLimitingInterface[request]
+
+ // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1.
+ MaxConcurrentReconciles int
+
+ // CacheSyncTimeout refers to the time limit set on waiting for cache to sync
+ // Defaults to 2 minutes if not set.
+ CacheSyncTimeout time.Duration
+
+ // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required.
+ Name string
+
+ // LogConstructor is used to construct a logger to then log messages to users during reconciliation,
+ // or for example when a watch is started.
+ // Note: LogConstructor has to be able to handle nil requests as we are also using it
+ // outside the context of a reconciliation.
+ LogConstructor func(request *request) logr.Logger
+
+ // RecoverPanic indicates whether the panic caused by reconcile should be recovered.
+ // Defaults to true.
+ RecoverPanic *bool
+
+ // LeaderElected indicates whether the controller is leader elected or always running.
+ LeaderElected *bool
+
+ // EnableWarmup specifies whether the controller should start its sources
+ // when the manager is not the leader.
+ // Defaults to false, which means that the controller will wait for leader election to start
+ // before starting sources.
+ EnableWarmup *bool
+
+ // ReconciliationTimeout is used as the timeout passed to the context of each Reconcile call.
+ // By default, there is no timeout.
+ ReconciliationTimeout time.Duration
+}
+
// Controller implements controller.Controller.
type Controller[request comparable] struct {
// Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required.
@@ -83,6 +133,14 @@ type Controller[request comparable] struct {
// startWatches maintains a list of sources, handlers, and predicates to start when the controller is started.
startWatches []source.TypedSource[request]
+ // startedEventSourcesAndQueue is used to track if the event sources have been started.
+ // It ensures that we append sources to c.startWatches only until we call Start() / Warmup()
+ // It is true if startEventSourcesAndQueueLocked has been called at least once.
+ startedEventSourcesAndQueue bool
+
+ // didStartEventSourcesOnce is used to ensure that the event sources are only started once.
+ didStartEventSourcesOnce sync.Once
+
// LogConstructor is used to construct a logger to then log messages to users during reconciliation,
// or for example when a watch is started.
// Note: LogConstructor has to be able to handle nil requests as we are also using it
@@ -95,6 +153,38 @@ type Controller[request comparable] struct {
// LeaderElected indicates whether the controller is leader elected or always running.
LeaderElected *bool
+
+ // EnableWarmup specifies whether the controller should start its sources when the manager is not
+ // the leader. This is useful for cases where sources take a long time to start, as it allows
+ // for the controller to warm up its caches even before it is elected as the leader. This
+ // improves leadership failover time, as the caches will be prepopulated before the controller
+ // transitions to be leader.
+ //
+ // Setting EnableWarmup to true and NeedLeaderElection to true means the controller will start its
+ // sources without waiting to become leader.
+ // Setting EnableWarmup to true and NeedLeaderElection to false is a no-op as controllers without
+ // leader election do not wait on leader election to start their sources.
+ // Defaults to false.
+ EnableWarmup *bool
+
+ ReconciliationTimeout time.Duration
+}
+
+// New returns a new Controller configured with the given options.
+func New[request comparable](options Options[request]) *Controller[request] {
+ return &Controller[request]{
+ Do: options.Do,
+ RateLimiter: options.RateLimiter,
+ NewQueue: options.NewQueue,
+ MaxConcurrentReconciles: options.MaxConcurrentReconciles,
+ CacheSyncTimeout: options.CacheSyncTimeout,
+ Name: options.Name,
+ LogConstructor: options.LogConstructor,
+ RecoverPanic: options.RecoverPanic,
+ LeaderElected: options.LeaderElected,
+ EnableWarmup: options.EnableWarmup,
+ ReconciliationTimeout: options.ReconciliationTimeout,
+ }
}
// Reconcile implements reconcile.Reconciler.
@@ -116,6 +206,13 @@ func (c *Controller[request]) Reconcile(ctx context.Context, req request) (_ rec
panic(r)
}
}()
+
+ if c.ReconciliationTimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, c.ReconciliationTimeout)
+ defer cancel()
+ }
+
return c.Do.Reconcile(ctx, req)
}
@@ -124,10 +221,9 @@ func (c *Controller[request]) Watch(src source.TypedSource[request]) error {
c.mu.Lock()
defer c.mu.Unlock()
- // Controller hasn't started yet, store the watches locally and return.
- //
- // These watches are going to be held on the controller struct until the manager or user calls Start(...).
- if !c.Started {
+ // Sources weren't started yet, store the watches locally and return.
+ // These sources are going to be held until either Warmup() or Start(...) is called.
+ if !c.startedEventSourcesAndQueue {
c.startWatches = append(c.startWatches, src)
return nil
}
@@ -144,6 +240,21 @@ func (c *Controller[request]) NeedLeaderElection() bool {
return *c.LeaderElected
}
+// Warmup implements the manager.WarmupRunnable interface.
+func (c *Controller[request]) Warmup(ctx context.Context) error {
+ if c.EnableWarmup == nil || !*c.EnableWarmup {
+ return nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Set the ctx so later calls to watch use this internal context
+ c.ctx = ctx
+
+ return c.startEventSourcesAndQueueLocked(ctx)
+}
+
// Start implements controller.Controller.
func (c *Controller[request]) Start(ctx context.Context) error {
// use an IIFE to get proper lock handling
@@ -158,17 +269,6 @@ func (c *Controller[request]) Start(ctx context.Context) error {
// Set the internal context.
c.ctx = ctx
- queue := c.NewQueue(c.Name, c.RateLimiter)
- if priorityQueue, isPriorityQueue := queue.(priorityqueue.PriorityQueue[request]); isPriorityQueue {
- c.Queue = priorityQueue
- } else {
- c.Queue = &priorityQueueWrapper[request]{TypedRateLimitingInterface: queue}
- }
- go func() {
- <-ctx.Done()
- c.Queue.ShutDown()
- }()
-
wg := &sync.WaitGroup{}
err := func() error {
defer c.mu.Unlock()
@@ -179,18 +279,12 @@ func (c *Controller[request]) Start(ctx context.Context) error {
// NB(directxman12): launch the sources *before* trying to wait for the
// caches to sync so that they have a chance to register their intended
// caches.
- if err := c.startEventSources(ctx); err != nil {
+ if err := c.startEventSourcesAndQueueLocked(ctx); err != nil {
return err
}
c.LogConstructor(nil).Info("Starting Controller")
- // All the watches have been started, we can reset the local slice.
- //
- // We should never hold watches more than necessary, each watch source can hold a backing cache,
- // which won't be garbage collected if we hold a reference to it.
- c.startWatches = nil
-
// Launch workers to process resources
c.LogConstructor(nil).Info("Starting workers", "worker count", c.MaxConcurrentReconciles)
wg.Add(c.MaxConcurrentReconciles)
@@ -218,63 +312,90 @@ func (c *Controller[request]) Start(ctx context.Context) error {
return nil
}
-// startEventSources launches all the sources registered with this controller and waits
+// startEventSourcesAndQueueLocked launches all the sources registered with this controller and waits
// for them to sync. It returns an error if any of the sources fail to start or sync.
-func (c *Controller[request]) startEventSources(ctx context.Context) error {
- errGroup := &errgroup.Group{}
- for _, watch := range c.startWatches {
- log := c.LogConstructor(nil)
- _, ok := watch.(interface {
- String() string
- })
-
- if !ok {
- log = log.WithValues("source", fmt.Sprintf("%T", watch))
+func (c *Controller[request]) startEventSourcesAndQueueLocked(ctx context.Context) error {
+ var retErr error
+
+ c.didStartEventSourcesOnce.Do(func() {
+ queue := c.NewQueue(c.Name, c.RateLimiter)
+ if priorityQueue, isPriorityQueue := queue.(priorityqueue.PriorityQueue[request]); isPriorityQueue {
+ c.Queue = priorityQueue
} else {
- log = log.WithValues("source", fmt.Sprintf("%s", watch))
+ c.Queue = &priorityQueueWrapper[request]{TypedRateLimitingInterface: queue}
}
- didStartSyncingSource := &atomic.Bool{}
- errGroup.Go(func() error {
- // Use a timeout for starting and syncing the source to avoid silently
- // blocking startup indefinitely if it doesn't come up.
- sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
- defer cancel()
-
- sourceStartErrChan := make(chan error, 1) // Buffer chan to not leak goroutine if we time out
- go func() {
- defer close(sourceStartErrChan)
- log.Info("Starting EventSource")
- if err := watch.Start(ctx, c.Queue); err != nil {
- sourceStartErrChan <- err
- return
- }
- syncingSource, ok := watch.(source.TypedSyncingSource[request])
- if !ok {
- return
- }
- didStartSyncingSource.Store(true)
- if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
- err := fmt.Errorf("failed to wait for %s caches to sync %v: %w", c.Name, syncingSource, err)
- log.Error(err, "Could not wait for Cache to sync")
- sourceStartErrChan <- err
+ go func() {
+ <-ctx.Done()
+ c.Queue.ShutDown()
+ }()
+
+ errGroup := &errgroup.Group{}
+ for _, watch := range c.startWatches {
+ log := c.LogConstructor(nil)
+ _, ok := watch.(interface {
+ String() string
+ })
+ if !ok {
+ log = log.WithValues("source", fmt.Sprintf("%T", watch))
+ } else {
+ log = log.WithValues("source", fmt.Sprintf("%s", watch))
+ }
+ didStartSyncingSource := &atomic.Bool{}
+ errGroup.Go(func() error {
+ // Use a timeout for starting and syncing the source to avoid silently
+ // blocking startup indefinitely if it doesn't come up.
+ sourceStartCtx, cancel := context.WithTimeout(ctx, c.CacheSyncTimeout)
+ defer cancel()
+
+ sourceStartErrChan := make(chan error, 1) // Buffer chan to not leak goroutine if we time out
+ go func() {
+ defer close(sourceStartErrChan)
+ log.Info("Starting EventSource")
+
+ if err := watch.Start(ctx, c.Queue); err != nil {
+ sourceStartErrChan <- err
+ return
+ }
+ syncingSource, ok := watch.(source.TypedSyncingSource[request])
+ if !ok {
+ return
+ }
+ didStartSyncingSource.Store(true)
+ if err := syncingSource.WaitForSync(sourceStartCtx); err != nil {
+ err := fmt.Errorf("failed to wait for %s caches to sync %v: %w", c.Name, syncingSource, err)
+ log.Error(err, "Could not wait for Cache to sync")
+ sourceStartErrChan <- err
+ }
+ }()
+
+ select {
+ case err := <-sourceStartErrChan:
+ return err
+ case <-sourceStartCtx.Done():
+ if didStartSyncingSource.Load() { // We are racing with WaitForSync, wait for it to let it tell us what happened
+ return <-sourceStartErrChan
+ }
+ if ctx.Err() != nil { // Don't return an error if the root context got cancelled
+ return nil
+ }
+ return fmt.Errorf("timed out waiting for source %s to Start. Please ensure that its Start() method is non-blocking", watch)
}
- }()
+ })
+ }
+ retErr = errGroup.Wait()
- select {
- case err := <-sourceStartErrChan:
- return err
- case <-sourceStartCtx.Done():
- if didStartSyncingSource.Load() { // We are racing with WaitForSync, wait for it to let it tell us what happened
- return <-sourceStartErrChan
- }
- if ctx.Err() != nil { // Don't return an error if the root context got cancelled
- return nil
- }
- return fmt.Errorf("timed out waiting for source %s to Start. Please ensure that its Start() method is non-blocking", watch)
- }
- })
- }
- return errGroup.Wait()
+ // All the watches have been started, we can reset the local slice.
+ //
+ // We should never hold watches more than necessary, each watch source can hold a backing cache,
+ // which won't be garbage collected if we hold a reference to it.
+ c.startWatches = nil
+
+ // Mark event sources as started after resetting the startWatches slice so that watches from
+ // a new Watch() call are immediately started.
+ c.startedEventSourcesAndQueue = true
+ })
+
+ return retErr
}
// processNextWorkItem will read a single work item off the workqueue and
@@ -343,7 +464,7 @@ func (c *Controller[request]) reconcileHandler(ctx context.Context, req request,
if errors.Is(err, reconcile.TerminalError(nil)) {
ctrlmetrics.TerminalReconcileErrors.WithLabelValues(c.Name).Inc()
} else {
- c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: priority}, req)
+ c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: ptr.To(priority)}, req)
}
ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc()
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc()
@@ -358,11 +479,11 @@ func (c *Controller[request]) reconcileHandler(ctx context.Context, req request,
// We need to drive to stable reconcile loops before queuing due
// to result.RequestAfter
c.Queue.Forget(req)
- c.Queue.AddWithOpts(priorityqueue.AddOpts{After: result.RequeueAfter, Priority: priority}, req)
+ c.Queue.AddWithOpts(priorityqueue.AddOpts{After: result.RequeueAfter, Priority: ptr.To(priority)}, req)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc()
case result.Requeue: //nolint: staticcheck // We have to handle it until it is removed
log.V(5).Info("Reconcile done, requeueing")
- c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: priority}, req)
+ c.Queue.AddWithOpts(priorityqueue.AddOpts{RateLimited: true, Priority: ptr.To(priority)}, req)
ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc()
default:
log.V(5).Info("Reconcile successful")
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go
index bbd2eff6..aadb69e8 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/apiserver.go
@@ -374,7 +374,12 @@ func (s *APIServer) populateAPIServerCerts() error {
return err
}
- servingCerts, err := ca.NewServingCert()
+ servingAddresses := []string{"localhost"}
+ if s.SecureServing.ListenAddr.Address != "" {
+ servingAddresses = append(servingAddresses, s.SecureServing.ListenAddr.Address)
+ }
+
+ servingCerts, err := ca.NewServingCert(servingAddresses...)
if err != nil {
return err
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go
index c30d2132..98ffe3ac 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/testing/controlplane/etcd.go
@@ -159,6 +159,10 @@ func (e *Etcd) setProcessState() error {
// Stop stops this process gracefully, waits for its termination, and cleans up
// the DataDir if necessary.
func (e *Etcd) Stop() error {
+ if e.processState == nil {
+ return nil
+ }
+
if e.processState.DirNeedsCleaning {
e.DataDir = "" // reset the directory if it was randomly allocated, so that we can safely restart
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go
index 5cc25391..6c013e79 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go
@@ -56,6 +56,10 @@ type Options struct {
// Without that, a single slow response from the API server can result
// in losing leadership.
RenewDeadline time.Duration
+
+ // LeaderLabels are an optional set of labels that will be set on the lease object
+ // when this replica becomes leader
+ LeaderLabels map[string]string
}
// NewResourceLock creates a new resource lock for use in a leader election loop.
@@ -63,7 +67,6 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
if !options.LeaderElection {
return nil, nil
}
-
// Default resource lock to "leases". The previous default (from v0.7.0 to v0.11.x) was configmapsleases, which was
// used to migrate from configmaps to leases. Since the default was "configmapsleases" for over a year, spanning
// five minor releases, any actively maintained operators are very likely to have a released version that uses
@@ -93,22 +96,21 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
}
id = id + "_" + string(uuid.NewUUID())
- // Construct clients for leader election
- rest.AddUserAgent(config, "leader-election")
+ // Construct config for leader election
+ config = rest.AddUserAgent(config, "leader-election")
+ // Timeout set for a client used to contact to Kubernetes should be lower than
+ // RenewDeadline to keep a single hung request from forcing a leader loss.
+ // Setting it to max(time.Second, RenewDeadline/2) as a reasonable heuristic.
if options.RenewDeadline != 0 {
- return resourcelock.NewFromKubeconfig(options.LeaderElectionResourceLock,
- options.LeaderElectionNamespace,
- options.LeaderElectionID,
- resourcelock.ResourceLockConfig{
- Identity: id,
- EventRecorder: recorderProvider.GetEventRecorderFor(id),
- },
- config,
- options.RenewDeadline,
- )
+ timeout := options.RenewDeadline / 2
+ if timeout < time.Second {
+ timeout = time.Second
+ }
+ config.Timeout = timeout
}
+ // Construct clients for leader election
corev1Client, err := corev1client.NewForConfig(config)
if err != nil {
return nil, err
@@ -118,7 +120,8 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
if err != nil {
return nil, err
}
- return resourcelock.New(options.LeaderElectionResourceLock,
+
+ return resourcelock.NewWithLabels(options.LeaderElectionResourceLock,
options.LeaderElectionNamespace,
options.LeaderElectionID,
corev1Client,
@@ -127,6 +130,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op
Identity: id,
EventRecorder: recorderProvider.GetEventRecorderFor(id),
},
+ options.LeaderLabels,
)
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
index e5204a75..a9f91cbd 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go
@@ -439,6 +439,11 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) {
return fmt.Errorf("failed to start other runnables: %w", err)
}
+ // Start WarmupRunnables and wait for warmup to complete.
+ if err := cm.runnables.Warmup.Start(cm.internalCtx); err != nil {
+ return fmt.Errorf("failed to start warmup runnables: %w", err)
+ }
+
// Start the leader election and all required runnables.
{
ctx, cancel := context.WithCancel(context.Background())
@@ -534,6 +539,18 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
}()
go func() {
+ go func() {
+ // Stop the warmup runnables in a separate goroutine to avoid blocking.
+ // It is important to stop the warmup runnables in parallel with the other runnables
+ // since we cannot assume ordering of whether or not one of the warmup runnables or one
+ // of the other runnables is holding a lock.
+ // Cancelling the wrong runnable (one that is not holding the lock) will cause the
+ // shutdown sequence to block indefinitely as it will wait for the runnable that is
+ // holding the lock to finish.
+ cm.logger.Info("Stopping and waiting for warmup runnables")
+ cm.runnables.Warmup.StopAndWait(cm.shutdownCtx)
+ }()
+
// First stop the non-leader election runnables.
cm.logger.Info("Stopping and waiting for non leader election runnables")
cm.runnables.Others.StopAndWait(cm.shutdownCtx)
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
index c3ae317b..e0e94245 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go
@@ -201,10 +201,15 @@ type Options struct {
// LeaseDuration time first.
LeaderElectionReleaseOnCancel bool
+ // LeaderElectionLabels allows a controller to supplement all leader election api calls with a set of custom labels based on
+ // the replica attempting to acquire leader status.
+ LeaderElectionLabels map[string]string
+
// LeaderElectionResourceLockInterface allows to provide a custom resourcelock.Interface that was created outside
// of the controller-runtime. If this value is set the options LeaderElectionID, LeaderElectionNamespace,
- // LeaderElectionResourceLock, LeaseDuration, RenewDeadline and RetryPeriod will be ignored. This can be useful if you
- // want to use a locking mechanism that is currently not supported, like a MultiLock across two Kubernetes clusters.
+ // LeaderElectionResourceLock, LeaseDuration, RenewDeadline, RetryPeriod and LeaderElectionLeases will be ignored.
+ // This can be useful if you want to use a locking mechanism that is currently not supported, like a MultiLock across
+ // two Kubernetes clusters.
LeaderElectionResourceLockInterface resourcelock.Interface
// LeaseDuration is the duration that non-leader candidates will
@@ -314,6 +319,15 @@ type LeaderElectionRunnable interface {
NeedLeaderElection() bool
}
+// warmupRunnable knows if a Runnable requires warmup. A warmup runnable is a runnable
+// that should be run when the manager is started but before it becomes leader.
+// Note: Implementing this interface is only useful when LeaderElection can be enabled, as the
+// behavior when leaderelection is not enabled is to run LeaderElectionRunnables immediately.
+type warmupRunnable interface {
+ // Warmup will be called when the manager is started but before it becomes leader.
+ Warmup(context.Context) error
+}
+
// New returns a new Manager for creating Controllers.
// Note that if ContentType in the given config is not set, "application/vnd.kubernetes.protobuf"
// will be used for all built-in resources of Kubernetes, and "application/json" is for other types
@@ -390,6 +404,7 @@ func New(config *rest.Config, options Options) (Manager, error) {
LeaderElectionID: options.LeaderElectionID,
LeaderElectionNamespace: options.LeaderElectionNamespace,
RenewDeadline: *options.RenewDeadline,
+ LeaderLabels: options.LeaderElectionLabels,
})
if err != nil {
return nil, err
@@ -417,7 +432,7 @@ func New(config *rest.Config, options Options) (Manager, error) {
}
errChan := make(chan error, 1)
- runnables := newRunnables(options.BaseContext, errChan)
+ runnables := newRunnables(options.BaseContext, errChan).withLogger(options.Logger)
return &controllerManager{
stopProcedureEngaged: ptr.To(int64(0)),
cluster: cluster,
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
index db5cda7c..53e29fc5 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/runnable_group.go
@@ -5,6 +5,7 @@ import (
"errors"
"sync"
+ "github.com/go-logr/logr"
"sigs.k8s.io/controller-runtime/pkg/webhook"
)
@@ -32,6 +33,7 @@ type runnables struct {
Webhooks *runnableGroup
Caches *runnableGroup
LeaderElection *runnableGroup
+ Warmup *runnableGroup
Others *runnableGroup
}
@@ -42,10 +44,21 @@ func newRunnables(baseContext BaseContextFunc, errChan chan error) *runnables {
Webhooks: newRunnableGroup(baseContext, errChan),
Caches: newRunnableGroup(baseContext, errChan),
LeaderElection: newRunnableGroup(baseContext, errChan),
+ Warmup: newRunnableGroup(baseContext, errChan),
Others: newRunnableGroup(baseContext, errChan),
}
}
+// withLogger returns the runnables with the logger set for all runnable groups.
+func (r *runnables) withLogger(logger logr.Logger) *runnables {
+ r.HTTPServers.withLogger(logger)
+ r.Webhooks.withLogger(logger)
+ r.Caches.withLogger(logger)
+ r.LeaderElection.withLogger(logger)
+ r.Others.withLogger(logger)
+ return r
+}
+
// Add adds a runnable to closest group of runnable that they belong to.
//
// Add should be able to be called before and after Start, but not after StopAndWait.
@@ -65,8 +78,20 @@ func (r *runnables) Add(fn Runnable) error {
})
case webhook.Server:
return r.Webhooks.Add(fn, nil)
- case LeaderElectionRunnable:
- if !runnable.NeedLeaderElection() {
+ case warmupRunnable, LeaderElectionRunnable:
+ if warmupRunnable, ok := fn.(warmupRunnable); ok {
+ if err := r.Warmup.Add(RunnableFunc(warmupRunnable.Warmup), nil); err != nil {
+ return err
+ }
+ }
+
+ leaderElectionRunnable, ok := fn.(LeaderElectionRunnable)
+ if !ok {
+ // If the runnable is not a LeaderElectionRunnable, add it to the leader election group for backwards compatibility
+ return r.LeaderElection.Add(fn, nil)
+ }
+
+ if !leaderElectionRunnable.NeedLeaderElection() {
return r.Others.Add(fn, nil)
}
return r.LeaderElection.Add(fn, nil)
@@ -105,6 +130,9 @@ type runnableGroup struct {
// wg is an internal sync.WaitGroup that allows us to properly stop
// and wait for all the runnables to finish before returning.
wg *sync.WaitGroup
+
+ // logger is used for logging when errors are dropped during shutdown
+ logger logr.Logger
}
func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnableGroup {
@@ -113,12 +141,18 @@ func newRunnableGroup(baseContext BaseContextFunc, errChan chan error) *runnable
errChan: errChan,
ch: make(chan *readyRunnable),
wg: new(sync.WaitGroup),
+ logger: logr.Discard(), // Default to no-op logger
}
r.ctx, r.cancel = context.WithCancel(baseContext())
return r
}
+// withLogger sets the logger for this runnable group.
+func (r *runnableGroup) withLogger(logger logr.Logger) {
+ r.logger = logger
+}
+
// Started returns true if the group has started.
func (r *runnableGroup) Started() bool {
r.start.Lock()
@@ -224,7 +258,27 @@ func (r *runnableGroup) reconcile() {
// Start the runnable.
if err := rn.Start(r.ctx); err != nil {
- r.errChan <- err
+ // Check if we're during the shutdown process.
+ r.stop.RLock()
+ isStopped := r.stopped
+ r.stop.RUnlock()
+
+ if isStopped {
+ // During shutdown, try to send error first (error drain goroutine might still be running)
+ // but drop if it would block to prevent goroutine leaks
+ select {
+ case r.errChan <- err:
+ // Error sent successfully (error drain goroutine is still running)
+ default:
+ // Error drain goroutine has exited, drop error to prevent goroutine leak
+ if !errors.Is(err, context.Canceled) { // don't log context.Canceled errors as they are expected during shutdown
+ r.logger.Info("error dropped during shutdown to prevent goroutine leak", "error", err)
+ }
+ }
+ } else {
+ // During normal operation, always try to send errors (may block briefly)
+ r.errChan <- err
+ }
}
}(runnable)
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
index 76f6165b..1983165d 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/server.go
@@ -70,7 +70,7 @@ func (s *Server) Start(ctx context.Context) error {
shutdownCtx := context.Background()
if s.ShutdownTimeout != nil {
var shutdownCancel context.CancelFunc
- shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), *s.ShutdownTimeout)
+ shutdownCtx, shutdownCancel = context.WithTimeout(shutdownCtx, *s.ShutdownTimeout)
defer shutdownCancel()
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
index ce33975f..9f24cb17 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go
@@ -47,13 +47,15 @@ type TypedPredicate[object any] interface {
Generic(event.TypedGenericEvent[object]) bool
}
-var _ Predicate = Funcs{}
-var _ Predicate = ResourceVersionChangedPredicate{}
-var _ Predicate = GenerationChangedPredicate{}
-var _ Predicate = AnnotationChangedPredicate{}
-var _ Predicate = or[client.Object]{}
-var _ Predicate = and[client.Object]{}
-var _ Predicate = not[client.Object]{}
+var (
+ _ Predicate = Funcs{}
+ _ Predicate = ResourceVersionChangedPredicate{}
+ _ Predicate = GenerationChangedPredicate{}
+ _ Predicate = AnnotationChangedPredicate{}
+ _ Predicate = or[client.Object]{}
+ _ Predicate = and[client.Object]{}
+ _ Predicate = not[client.Object]{}
+)
// Funcs is a function that implements Predicate.
type Funcs = TypedFuncs[client.Object]
@@ -259,11 +261,10 @@ func (TypedAnnotationChangedPredicate[object]) Update(e event.TypedUpdateEvent[o
// This predicate will skip update events that have no change in the object's label.
// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example:
//
-// Controller.Watch(
-//
-// &source.Kind{Type: v1.MyCustomKind},
-// &handler.EnqueueRequestForObject{},
-// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))
+// Controller.Watch(
+// &source.Kind{Type: v1.MyCustomKind},
+// &handler.EnqueueRequestForObject{},
+// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{}))
//
// This will be helpful when object's labels is carrying some extra specification information beyond object's spec,
// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens.
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go
index 249a364b..a26fa348 100644
--- a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/conversion.go
@@ -22,7 +22,9 @@ See pkg/conversion for interface definitions required to ensure an API Type is c
package conversion
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
"net/http"
@@ -31,8 +33,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/controller-runtime/pkg/conversion"
logf "sigs.k8s.io/controller-runtime/pkg/log"
+ conversionmetrics "sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics"
)
var (
@@ -53,6 +57,8 @@ type webhook struct {
var _ http.Handler = &webhook{}
func (wh *webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
convertReview := &apix.ConversionReview{}
err := json.NewDecoder(r.Body).Decode(convertReview)
if err != nil {
@@ -69,7 +75,7 @@ func (wh *webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// TODO(droot): may be move the conversion logic to a separate module to
// decouple it from the http layer ?
- resp, err := wh.handleConvertRequest(convertReview.Request)
+ resp, err := wh.handleConvertRequest(ctx, convertReview.Request)
if err != nil {
log.Error(err, "failed to convert", "request", convertReview.Request.UID)
convertReview.Response = errored(err)
@@ -87,7 +93,18 @@ func (wh *webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// handles a version conversion request.
-func (wh *webhook) handleConvertRequest(req *apix.ConversionRequest) (*apix.ConversionResponse, error) {
+func (wh *webhook) handleConvertRequest(ctx context.Context, req *apix.ConversionRequest) (_ *apix.ConversionResponse, retErr error) {
+ defer func() {
+ if r := recover(); r != nil {
+ conversionmetrics.WebhookPanics.WithLabelValues().Inc()
+
+ for _, fn := range utilruntime.PanicHandlers {
+ fn(ctx, r)
+ }
+ retErr = errors.New("internal error occurred during conversion")
+ return
+ }
+ }()
if req == nil {
return nil, fmt.Errorf("conversion request is nil")
}
diff --git a/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics/metrics.go b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics/metrics.go
new file mode 100644
index 00000000..c825f17f
--- /dev/null
+++ b/operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics/metrics.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2025 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "sigs.k8s.io/controller-runtime/pkg/metrics"
+)
+
+var (
+ // WebhookPanics is a prometheus counter metrics which holds the total
+ // number of panics from conversion webhooks.
+ WebhookPanics = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Name: "controller_runtime_conversion_webhook_panics_total",
+ Help: "Total number of conversion webhook panics",
+ }, []string{})
+)
+
+func init() {
+ metrics.Registry.MustRegister(
+ WebhookPanics,
+ )
+ // Init metric.
+ WebhookPanics.WithLabelValues().Add(0)
+}
diff --git a/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
index d538ac11..3fe528bb 100644
--- a/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
+++ b/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
@@ -52,8 +52,8 @@ import (
// - bool, for JSON booleans
// - float64, for JSON numbers
// - string, for JSON strings
-// - []interface{}, for JSON arrays
-// - map[string]interface{}, for JSON objects
+// - []any, for JSON arrays
+// - map[string]any, for JSON objects
// - nil for JSON null
//
// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
@@ -117,9 +117,6 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error {
// The input can be assumed to be a valid encoding of
// a JSON value. UnmarshalJSON must copy the JSON data
// if it wishes to retain the data after returning.
-//
-// By convention, to approximate the behavior of [Unmarshal] itself,
-// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
type Unmarshaler interface {
UnmarshalJSON([]byte) error
}
@@ -132,7 +129,7 @@ type UnmarshalTypeError struct {
Type reflect.Type // type of Go value it could not be assigned to
Offset int64 // error occurred after reading Offset bytes
Struct string // name of the struct type containing the field
- Field string // the full path from root node to the field
+ Field string // the full path from root node to the field, include embedded struct
}
func (e *UnmarshalTypeError) Error() string {
@@ -281,7 +278,11 @@ func (d *decodeState) addErrorContext(err error) error {
switch err := err.(type) {
case *UnmarshalTypeError:
err.Struct = d.errorContext.Struct.Name()
- err.Field = strings.Join(d.errorContext.FieldStack, ".")
+ fieldStack := d.errorContext.FieldStack
+ if err.Field != "" {
+ fieldStack = append(fieldStack, err.Field)
+ }
+ err.Field = strings.Join(fieldStack, ".")
}
}
return err
@@ -492,9 +493,9 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm
}
// Prevent infinite loop if v is an interface pointing to its own address:
- // var v interface{}
+ // var v any
// v = &v
- if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
+ if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) {
v = v.Elem()
break
}
@@ -784,7 +785,10 @@ func (d *decodeState) object(v reflect.Value) error {
}
subv = v
destring = f.quoted
- for _, i := range f.index {
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ for i, ind := range f.index {
if subv.Kind() == reflect.Pointer {
if subv.IsNil() {
// If a struct embeds a pointer to an unexported type,
@@ -804,13 +808,16 @@ func (d *decodeState) object(v reflect.Value) error {
}
subv = subv.Elem()
}
- subv = subv.Field(i)
- }
- if d.errorContext == nil {
- d.errorContext = new(errorContext)
+ if i < len(f.index)-1 {
+ d.errorContext.FieldStack = append(
+ d.errorContext.FieldStack,
+ subv.Type().Field(ind).Name,
+ )
+ }
+ subv = subv.Field(ind)
}
- d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
d.errorContext.Struct = t
+ d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
d.appendStrictFieldStackKey(f.name)
} else if d.disallowUnknownFields {
d.saveStrictError(d.newFieldError(unknownStrictErrType, string(key)))
@@ -1118,7 +1125,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool
// in an empty interface. They are not strictly necessary,
// but they avoid the weight of reflection in this common case.
-// valueInterface is like value but returns interface{}
+// valueInterface is like value but returns any.
func (d *decodeState) valueInterface() (val any) {
switch d.opcode {
default:
@@ -1135,7 +1142,7 @@ func (d *decodeState) valueInterface() (val any) {
return
}
-// arrayInterface is like array but returns []interface{}.
+// arrayInterface is like array but returns []any.
func (d *decodeState) arrayInterface() []any {
origStrictFieldStackLen := len(d.strictFieldStack)
defer func() {
@@ -1170,7 +1177,7 @@ func (d *decodeState) arrayInterface() []any {
return v
}
-// objectInterface is like object but returns map[string]interface{}.
+// objectInterface is like object but returns map[string]any.
func (d *decodeState) objectInterface() map[string]any {
origStrictFieldStackLen := len(d.strictFieldStack)
defer func() {
diff --git a/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
index eb73bff5..4e3a1a2f 100644
--- a/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
+++ b/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
@@ -71,8 +71,8 @@ import (
//
// The "omitempty" option specifies that the field should be omitted
// from the encoding if the field has an empty value, defined as
-// false, 0, a nil pointer, a nil interface value, and any empty array,
-// slice, map, or string.
+// false, 0, a nil pointer, a nil interface value, and any array,
+// slice, map, or string of length zero.
//
// As a special case, if the field tag is "-", the field is always omitted.
// Note that a field with name "-" can still be generated using the tag "-,".
@@ -98,6 +98,17 @@ import (
// // Field appears in JSON as key "-".
// Field int `json:"-,"`
//
+// The "omitzero" option specifies that the field should be omitted
+// from the encoding if the field has a zero value, according to rules:
+//
+// 1) If the field type has an "IsZero() bool" method, that will be used to
+// determine whether the value is zero.
+//
+// 2) Otherwise, the value is zero if it is the zero value for its type.
+//
+// If both "omitempty" and "omitzero" are specified, the field will be omitted
+// if the value is either empty or zero (or both).
+//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// integer, or boolean types. This extra level of encoding is sometimes used
@@ -690,7 +701,8 @@ FieldLoop:
fv = fv.Field(i)
}
- if f.omitEmpty && isEmptyValue(fv) {
+ if (f.omitEmpty && isEmptyValue(fv)) ||
+ (f.omitZero && (f.isZero == nil && fv.IsZero() || (f.isZero != nil && f.isZero(fv)))) {
continue
}
e.WriteByte(next)
@@ -808,7 +820,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
// Here we use a struct to memorize the pointer to the first element of the slice
// and its length.
ptr := struct {
- ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe
+ ptr any // always an unsafe.Pointer, but avoids a dependency on package unsafe
len int
}{v.UnsafePointer(), v.Len()}
if _, ok := e.ptrSeen[ptr]; ok {
@@ -1039,11 +1051,19 @@ type field struct {
index []int
typ reflect.Type
omitEmpty bool
+ omitZero bool
+ isZero func(reflect.Value) bool
quoted bool
encoder encoderFunc
}
+type isZeroer interface {
+ IsZero() bool
+}
+
+var isZeroerType = reflect.TypeFor[isZeroer]()
+
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
@@ -1135,6 +1155,7 @@ func typeFields(t reflect.Type) structFields {
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
+ omitZero: opts.Contains("omitzero"),
quoted: quoted,
}
field.nameBytes = []byte(field.name)
@@ -1144,6 +1165,40 @@ func typeFields(t reflect.Type) structFields {
field.nameEscHTML = `"` + string(nameEscBuf) + `":`
field.nameNonEsc = `"` + field.name + `":`
+ if field.omitZero {
+ t := sf.Type
+ // Provide a function that uses a type's IsZero method.
+ switch {
+ case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
+ field.isZero = func(v reflect.Value) bool {
+ // Avoid panics calling IsZero on a nil interface or
+ // non-nil interface with nil pointer.
+ return v.IsNil() ||
+ (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) ||
+ v.Interface().(isZeroer).IsZero()
+ }
+ case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
+ field.isZero = func(v reflect.Value) bool {
+ // Avoid panics calling IsZero on nil pointer.
+ return v.IsNil() || v.Interface().(isZeroer).IsZero()
+ }
+ case t.Implements(isZeroerType):
+ field.isZero = func(v reflect.Value) bool {
+ return v.Interface().(isZeroer).IsZero()
+ }
+ case reflect.PointerTo(t).Implements(isZeroerType):
+ field.isZero = func(v reflect.Value) bool {
+ if !v.CanAddr() {
+ // Temporarily box v so we can take the address.
+ v2 := reflect.New(v.Type()).Elem()
+ v2.Set(v)
+ v = v2
+ }
+ return v.Addr().Interface().(isZeroer).IsZero()
+ }
+ }
+ }
+
fields = append(fields, field)
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
diff --git a/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
index 48fc4d94..cc2108b9 100644
--- a/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
+++ b/operator/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
@@ -31,8 +31,8 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
-// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
-// [Number] instead of as a float64.
+// UseNumber causes the Decoder to unmarshal a number into an
+// interface value as a [Number] instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// DisallowUnknownFields causes the Decoder to return an error when the destination